diff --git a/Documentation/ABI/testing/sysfs-class-net-qmi b/Documentation/ABI/testing/sysfs-class-net-qmi index fa5a00bb114372bfbc67008e8d0ebe8d1566ed6b..7122d6264c49d6c02c2c0074e145f19b93fc63dd 100644 --- a/Documentation/ABI/testing/sysfs-class-net-qmi +++ b/Documentation/ABI/testing/sysfs-class-net-qmi @@ -21,3 +21,30 @@ Description: is responsible for coordination of driver and firmware link framing mode, changing this setting to 'Y' if the firmware is configured for 'raw-ip' mode. + +What: /sys/class/net//qmi/add_mux +Date: March 2017 +KernelVersion: 4.11 +Contact: Bjørn Mork +Description: + Unsigned integer. + + Write a number ranging from 1 to 127 to add a qmap mux + based network device, supported by recent Qualcomm based + modems. + + The network device will be called qmimux. + + Userspace is in charge of managing the qmux network device + activation and data stream setup on the modem side by + using the proper QMI protocol requests. + +What: /sys/class/net//qmi/del_mux +Date: March 2017 +KernelVersion: 4.11 +Contact: Bjørn Mork +Description: + Unsigned integer. + + Write a number ranging from 1 to 127 to delete a previously + created qmap mux based network device. diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt index 10587bdadbbe5a8e8fe703e23c194420e3701f9f..26c77d985fafe06092467c5a2ea8a0e176d0d460 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt @@ -2,11 +2,14 @@ Required properties: - compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2", - "brcm,genet-v3", "brcm,genet-v4". + "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5". - reg: address and length of the register set for the device -- interrupts: must be two cells, the first cell is the general purpose - interrupt line, while the second cell is the interrupt for the ring - RX and TX queues operating in ring mode +- interrupts and/or interrupts-extended: must be two cells, the first cell + is the general purpose interrupt line, while the second cell is the + interrupt for the ring RX and TX queues operating in ring mode. An + optional third interrupt cell for Wake-on-LAN can be specified. + See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt + for information on the property specifics. - phy-mode: see ethernet.txt file in the same directory - #address-cells: should be 1 - #size-cells: should be 1 @@ -29,15 +32,15 @@ Optional properties: Required child nodes: -- mdio bus node: this node should always be present regarless of the PHY +- mdio bus node: this node should always be present regardless of the PHY configuration of the GENET instance MDIO bus node required properties: - compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2" - "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the - parent node compatible property (e.g: brcm,genet-v4 pairs with - brcm,genet-mdio-v4) + "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5", the version + has to match the parent node compatible property (e.g: brcm,genet-v4 pairs + with brcm,genet-mdio-v4) - reg: address and length relative to the parent node base register address - #address-cells: address cell for MDIO bus addressing, should be 1 - #size-cells: size of the cells for MDIO bus addressing, should be 0 diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt index ab0bb4247d14950959044cc138e71e7c736f85b9..4648948f7c3b8f26391292b06aa01743100e8796 100644 --- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt +++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt @@ -2,8 +2,9 @@ Required properties: - compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2", - "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio" -- reg: address and length of the regsiter set for the device, first one is the + "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5" or + "brcm,unimac-mdio" +- reg: address and length of the register set for the device, first one is the base register, and the second one is optional and for indirect accesses to larger than 16-bits MDIO transactions - reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw" diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt new file mode 100644 index 0000000000000000000000000000000000000000..23aa94eab2076f142dea3a0c878aebb15d46d7fd --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt @@ -0,0 +1,24 @@ +* Holt HI-311X stand-alone CAN controller device tree bindings + +Required properties: + - compatible: Should be one of the following: + - "holt,hi3110" for HI-3110 + - reg: SPI chip select. + - clocks: The clock feeding the CAN controller. + - interrupt-parent: The parent interrupt controller. + - interrupts: Should contain IRQ line for the CAN controller. + +Optional properties: + - vdd-supply: Regulator that powers the CAN controller. + - xceiver-supply: Regulator that powers the CAN transceiver. + +Example: + can0: can@1 { + compatible = "holt,hi3110"; + reg = <1>; + clocks = <&clk32m>; + interrupt-parent = <&gpio4>; + interrupts = <13 IRQ_TYPE_EDGE_RISING>; + vdd-supply = <®5v0>; + xceiver-supply = <®5v0>; + }; diff --git a/Documentation/devicetree/bindings/net/can/ti_hecc.txt b/Documentation/devicetree/bindings/net/can/ti_hecc.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0f0a7cfe3293893ae42fae3ca14571c29288a01 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/ti_hecc.txt @@ -0,0 +1,32 @@ +Texas Instruments High End CAN Controller (HECC) +================================================ + +This file provides information, what the device node +for the hecc interface contains. + +Required properties: +- compatible: "ti,am3517-hecc" +- reg: addresses and lengths of the register spaces for 'hecc', 'hecc-ram' + and 'mbx' +- reg-names :"hecc", "hecc-ram", "mbx" +- interrupts: interrupt mapping for the hecc interrupts sources +- clocks: clock phandles (see clock bindings for details) + +Optional properties: +- ti,use-hecc1int: if provided configures HECC to produce all interrupts + on HECC1INT interrupt line. By default HECC0INT interrupt + line will be used. +- xceiver-supply: regulator that powers the CAN transceiver + +Example: + +For am3517evm board: + hecc: can@5c050000 { + compatible = "ti,am3517-hecc"; + reg = <0x5c050000 0x80>, + <0x5c053000 0x180>, + <0x5c052000 0x200>; + reg-names = "hecc", "hecc-ram", "mbx"; + interrupts = <24>; + clocks = <&hecc_ck>; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/lan9303.txt b/Documentation/devicetree/bindings/net/dsa/lan9303.txt new file mode 100644 index 0000000000000000000000000000000000000000..04f2965a446768920ae90f846419d438773a633d --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/lan9303.txt @@ -0,0 +1,105 @@ +SMSC/MicroChip LAN9303 three port ethernet switch +------------------------------------------------- + +Required properties: + +- compatible: should be + - "smsc,lan9303-i2c" for I2C managed mode + or + - "smsc,lan9303-mdio" for mdio managed mode + +Optional properties: + +- reset-gpios: GPIO to be used to reset the whole device +- reset-duration: reset duration in milliseconds, defaults to 200 ms + +Subnodes: + +The integrated switch subnode should be specified according to the binding +described in dsa/dsa.txt. The CPU port of this switch is always port 0. + +Note: always use 'reg = <0/1/2>;' for the three DSA ports, even if the device is +configured to use 1/2/3 instead. This hardware configuration will be +auto-detected and mapped accordingly. + +Example: + +I2C managed mode: + + master: masterdevice@X { + status = "okay"; + + fixed-link { /* RMII fixed link to LAN9303 */ + speed = <100>; + full-duplex; + }; + }; + + switch: switch@a { + compatible = "smsc,lan9303-i2c"; + reg = <0xa>; + status = "okay"; + reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>; + reset-duration = <200>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { /* RMII fixed link to master */ + reg = <0>; + label = "cpu"; + ethernet = <&master>; + }; + + port@1 { /* external port 1 */ + reg = <1>; + label = "lan1; + }; + + port@2 { /* external port 2 */ + reg = <2>; + label = "lan2"; + }; + }; + }; + +MDIO managed mode: + + master: masterdevice@X { + status = "okay"; + phy-handle = <&switch>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch: switch-phy@0 { + compatible = "smsc,lan9303-mdio"; + reg = <0>; + reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>; + reset-duration = <100>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "cpu"; + ethernet = <&master>; + }; + + port@1 { /* external port 1 */ + reg = <1>; + label = "lan1; + }; + + port@2 { /* external port 2 */ + reg = <2>; + label = "lan2"; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt new file mode 100644 index 0000000000000000000000000000000000000000..a9bc27b93ee326fe2ac665ffc143b0e3e345c0a0 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt @@ -0,0 +1,92 @@ +Mediatek MT7530 Ethernet switch +================================ + +Required properties: + +- compatible: Must be compatible = "mediatek,mt7530"; +- #address-cells: Must be 1. +- #size-cells: Must be 0. +- mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part + on multi-chip module belong to MT7623A has or the remotely standalone + chip as the function MT7623N reference board provided for. +- core-supply: Phandle to the regulator node necessary for the core power. +- io-supply: Phandle to the regulator node necessary for the I/O power. + See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt + for details for the regulator setup on these boards. + +If the property mediatek,mcm isn't defined, following property is required + +- reset-gpios: Should be a gpio specifier for a reset line. + +Else, following properties are required + +- resets : Phandle pointing to the system reset controller with + line index for the ethsys. +- reset-names : Should be set to "mcm". + +Required properties for the child nodes within ports container: + +- reg: Port address described must be 6 for CPU port and from 0 to 5 for + user ports. +- phy-mode: String, must be either "trgmii" or "rgmii" for port labeled + "cpu". + +See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +required, optional properties and how the integrated switch subnodes must +be specified. + +Example: + + &mdio0 { + switch@0 { + compatible = "mediatek,mt7530"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + core-supply = <&mt6323_vpa_reg>; + io-supply = <&mt6323_vemc3v3_reg>; + reset-gpios = <&pio 33 0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + port@0 { + reg = <0>; + label = "lan0"; + }; + + port@1 { + reg = <1>; + label = "lan1"; + }; + + port@2 { + reg = <2>; + label = "lan2"; + }; + + port@3 { + reg = <3>; + label = "lan3"; + }; + + port@4 { + reg = <4>; + label = "wan"; + }; + + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "trgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/ftgmac100.txt b/Documentation/devicetree/bindings/net/ftgmac100.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1ce1680246f0134f08a8b1957d29d0f49f655ca --- /dev/null +++ b/Documentation/devicetree/bindings/net/ftgmac100.txt @@ -0,0 +1,35 @@ +* Faraday Technology FTGMAC100 gigabit ethernet controller + +Required properties: +- compatible: "faraday,ftgmac100" + + Must also contain one of these if used as part of an Aspeed AST2400 + or 2500 family SoC as they have some subtle tweaks to the + implementation: + + - "aspeed,ast2400-mac" + - "aspeed,ast2500-mac" + +- reg: Address and length of the register set for the device +- interrupts: Should contain ethernet controller interrupt + +Optional properties: +- phy-mode: See ethernet.txt file in the same directory. If the property is + absent, "rgmii" is assumed. Supported values are "rgmii*" and "rmii" for + aspeed parts. Other (unknown) parts will accept any value. +- use-ncsi: Use the NC-SI stack instead of an MDIO PHY. Currently assumes + rmii (100bT) but kept as a separate property in case NC-SI grows support + for a gigabit link. +- no-hw-checksum: Used to disable HW checksum support. Here for backward + compatibility as the driver now should have correct defaults based on + the SoC. + +Example: + + mac0: ethernet@1e660000 { + compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; + reg = <0x1e660000 0x180>; + interrupts = <2>; + status = "okay"; + use-ncsi; + }; diff --git a/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1046e636fa14b494963f6770ffa578359dcd224 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt @@ -0,0 +1,28 @@ +* CA8210 IEEE 802.15.4 * + +Required properties: + - compatible: Should be "cascoda,ca8210" + - reg: Controlling chip select + - spi-max-frequency: Maximum clock speed, should be *less than* + 4000000 + - spi-cpol: Requires inverted clock polarity + - reset-gpio: GPIO attached to reset + - irq-gpio: GPIO attached to IRQ +Optional properties: + - extclock-enable: Include for the ca8210 to route its 16MHz clock + to an output + - extclock-freq: Frequency in Hz of the external clock + - extclock-gpio: GPIO of the ca8210 to output the clock on + +Example: + ca8210@0 { + compatible = "cascoda,ca8210"; + reg = <0>; + spi-max-frequency = <3000000>; + spi-cpol; + reset-gpio = <&gpio1 1 GPIO_ACTIVE_HIGH>; + irq-gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; + extclock-enable; + extclock-freq = 16000000; + extclock-gpio = 2; + }; diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt index 9417e54c26c0eba82004047d06fda4f3dd4d02b8..ccdabdcc8618c24e6d6801845aee62c54306466c 100644 --- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt +++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt @@ -7,17 +7,20 @@ interface. Required properties: - compatible: "marvell,orion-mdio" -- reg: address and length of the SMI register +- reg: address and length of the MDIO registers. When an interrupt is + not present, the length is the size of the SMI register (4 bytes) + otherwise it must be 0x84 bytes to cover the interrupt control + registers. Optional properties: - interrupts: interrupt line number for the SMI error/done interrupt -- clocks: Phandle to the clock control device and gate bit +- clocks: phandle for up to three required clocks for the MDIO instance The child nodes of the MDIO driver are the individual PHY devices connected to this MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. -Example at the SoC level: +Example at the SoC level without an interrupt property: mdio { #address-cells = <1>; @@ -26,6 +29,16 @@ mdio { reg = <0xd0072004 0x4>; }; +Example with an interrupt property: + +mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "marvell,orion-mdio"; + reg = <0xd0072004 0x84>; + interrupts = <30>; +}; + And at the board level: mdio { diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 4754364df4c66adfc53e2546e31e0d124070dca1..6b4956beff8c42c3214906c83d94072fca8e9083 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt @@ -1,17 +1,28 @@ -* Marvell Armada 375 Ethernet Controller (PPv2) +* Marvell Armada 375 Ethernet Controller (PPv2.1) + Marvell Armada 7K/8K Ethernet Controller (PPv2.2) Required properties: -- compatible: should be "marvell,armada-375-pp2" +- compatible: should be one of: + "marvell,armada-375-pp2" + "marvell,armada-7k-pp2" - reg: addresses and length of the register sets for the device. - Must contain the following register sets: + For "marvell,armada-375-pp2", must contain the following register + sets: - common controller registers - LMS registers - In addition, at least one port register set is required. -- clocks: a pointer to the reference clocks for this device, consequently: - - main controller clock - - GOP clock -- clock-names: names of used clocks, must be "pp_clk" and "gop_clk". + - one register area per Ethernet port + For "marvell,armada-7k-pp2", must contain the following register + sets: + - packet processor registers + - networking interfaces registers + +- clocks: pointers to the reference clocks for this device, consequently: + - main controller clock (for both armada-375-pp2 and armada-7k-pp2) + - GOP clock (for both armada-375-pp2 and armada-7k-pp2) + - MG clock (only for armada-7k-pp2) +- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and + "mg_clk" (the latter only for armada-7k-pp2). The ethernet ports are represented by subnodes. At least one port is required. @@ -19,8 +30,10 @@ required. Required properties (port): - interrupts: interrupt for the port -- port-id: should be '0' or '1' for ethernet ports, and '2' for the - loopback port +- port-id: ID of the port from the MAC point of view +- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the + GOP (Group Of Ports) point of view. This ID is used to index the + per-port registers in the second register area. - phy-mode: See ethernet.txt file in the same directory Optional properties (port): @@ -29,7 +42,7 @@ Optional properties (port): - phy: a phandle to a phy node defining the PHY address (as the reg property, a single integer). -Example: +Example for marvell,armada-375-pp2: ethernet@f0000 { compatible = "marvell,armada-375-pp2"; @@ -57,3 +70,30 @@ ethernet@f0000 { phy-mode = "gmii"; }; }; + +Example for marvell,armada-7k-pp2: + +cpm_ethernet: ethernet@0 { + compatible = "marvell,armada-7k-pp22"; + reg = <0x0 0x100000>, <0x129000 0xb000>; + clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>; + clock-names = "pp_clk", "gop_clk", "gp_clk"; + + eth0: eth0 { + interrupts = ; + port-id = <0>; + gop-port-id = <0>; + }; + + eth1: eth1 { + interrupts = ; + port-id = <1>; + gop-port-id = <2>; + }; + + eth2: eth2 { + interrupts = ; + port-id = <2>; + gop-port-id = <3>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/mdio.txt b/Documentation/devicetree/bindings/net/mdio.txt new file mode 100644 index 0000000000000000000000000000000000000000..96a53f89aa6e2fa7f9a0d4c6c3b18d03a3b75994 --- /dev/null +++ b/Documentation/devicetree/bindings/net/mdio.txt @@ -0,0 +1,37 @@ +Common MDIO bus properties. + +These are generic properties that can apply to any MDIO bus. + +Optional properties: +- reset-gpios: One GPIO that control the RESET lines of all PHYs on that MDIO + bus. +- reset-delay-us: RESET pulse width in microseconds. + +A list of child nodes, one per device on the bus is expected. These +should follow the generic phy.txt, or a device specific binding document. + +The 'reset-delay-us' indicates the RESET signal pulse width in microseconds and +applies to all PHY devices. It must therefore be appropriately determined based +on all PHY requirements (maximum value of all per-PHY RESET pulse widths). + +Example : +This example shows these optional properties, plus other properties +required for the TI Davinci MDIO driver. + + davinci_mdio: ethernet@0x5c030000 { + compatible = "ti,davinci_mdio"; + reg = <0x5c030000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; + reset-delay-us = <2>; + + ethphy0: ethernet-phy@1 { + reg = <1>; + }; + + ethphy1: ethernet-phy@3 { + reg = <3>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt index 32b35a07abe4224c298e601eb09ad50f1c0c0b2f..c627bbb3009e5a3f54472d58c2f5ef8f37f871fa 100644 --- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt +++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt @@ -5,8 +5,8 @@ Required properties: - spi-max-frequency: Maximum SPI frequency (<= 2000000). - interrupt-parent: phandle of parent interrupt handler. - interrupts: A single interrupt specifier. -- ti,enable-gpios: Two GPIO entries used for 'EN' and 'EN2' pins on the - TRF7970A. +- ti,enable-gpios: One or two GPIO entries used for 'EN' and 'EN2' pins on the + TRF7970A. EN2 is optional. - vin-supply: Regulator for supply voltage to VIN pin Optional SoC Specific Properties: @@ -21,6 +21,8 @@ Optional SoC Specific Properties: - t5t-rmb-extra-byte-quirk: Specify that the trf7970a has the erratum where an extra byte is returned by Read Multiple Block commands issued to Type 5 tags. +- vdd-io-supply: Regulator specifying voltage for vdd-io +- clock-frequency: Set to specify that the input frequency to the trf7970a is 13560000Hz or 27120000Hz Example (for ARM-based BeagleBone with TRF7970A on SPI1): @@ -39,10 +41,12 @@ Example (for ARM-based BeagleBone with TRF7970A on SPI1): <&gpio2 5 GPIO_ACTIVE_LOW>; vin-supply = <&ldo3_reg>; vin-voltage-override = <5000000>; + vdd-io-supply = <&ldo2_reg>; autosuspend-delay = <30000>; irq-status-read-quirk; en2-rf-quirk; t5t-rmb-extra-byte-quirk; + clock-frequency = <27120000>; status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/net/nokia-bluetooth.txt b/Documentation/devicetree/bindings/net/nokia-bluetooth.txt new file mode 100644 index 0000000000000000000000000000000000000000..42be7dc9a70ba7cb0f653270e4941dc85046b6e8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/nokia-bluetooth.txt @@ -0,0 +1,51 @@ +Nokia Bluetooth Chips +--------------------- + +Nokia phones often come with UART connected bluetooth chips from different +vendors and modified device API. Those devices speak a protocol named H4+ +(also known as h4p) by Nokia, which is similar to the H4 protocol from the +Bluetooth standard. In addition to the H4 protocol it specifies two more +UART status lines for wakeup of UART transceivers to improve power management +and a few new packet types used to negotiate uart speed. + +Required properties: + + - compatible: should contain "nokia,h4p-bluetooth" as well as one of the following: + * "brcm,bcm2048-nokia" + * "ti,wl1271-bluetooth-nokia" + - reset-gpios: GPIO specifier, used to reset the BT module (active low) + - bluetooth-wakeup-gpios: GPIO specifier, used to wakeup the BT module (active high) + - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor (active high) + - clock-names: should be "sysclk" + - clocks: should contain a clock specifier for every name in clock-names + +Optional properties: + + - None + +Example: + +/ { + /* controlled (enabled/disabled) directly by BT module */ + bluetooth_clk: vctcxo { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <38400000>; + }; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; + + bluetooth { + compatible = "ti,wl1271-bluetooth-nokia", "nokia,h4p-bluetooth"; + + reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>; /* gpio26 */ + host-wakeup-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */ + bluetooth-wakeup-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; /* gpio37 */ + + clocks = <&bluetooth_clk>; + clock-names = "sysclk"; + }; +}; diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index d3bfc2b30fb5ecc07493b4c5510d5cdc32b3ff3a..c3a7be6615c547a5001c67996e18458b7f8e5b5d 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -7,9 +7,12 @@ Required properties: - interrupt-parent: Should be the phandle for the interrupt controller that services interrupts for this device - interrupts: Should contain the STMMAC interrupts -- interrupt-names: Should contain the interrupt names "macirq" - "eth_wake_irq" if this interrupt is supported in the "interrupts" - property +- interrupt-names: Should contain a list of interrupt names corresponding to + the interrupts in the interrupts property, if available. + Valid interrupt names are: + - "macirq" (combined signal for various interrupt events) + - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection) + - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state) - phy-mode: See ethernet.txt file in the same directory. - snps,reset-gpio gpio number for phy reset. - snps,reset-active-low boolean flag to indicate if phy reset is active low. @@ -28,9 +31,9 @@ Optional properties: clocks may be specified in derived bindings. - clock-names: One name for each entry in the clocks property, the first one should be "stmmaceth" and the second one should be "pclk". -- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is - available this clock is used for programming the Timestamp Addend Register. - If not passed then the system clock will be used and this is fine on some +- ptp_ref: this is the PTP reference clock; in case of the PTP is available + this clock is used for programming the Timestamp Addend Register. If not + passed then the system clock will be used and this is fine on some platforms. - tx-fifo-depth: See ethernet.txt file in the same directory - rx-fifo-depth: See ethernet.txt file in the same directory @@ -72,7 +75,45 @@ Optional properties: - snps,mb: mixed-burst - snps,rb: rebuild INCRx Burst - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. - +- Multiple RX Queues parameters: below the list of all the parameters to + configure the multiple RX queues: + - snps,rx-queues-to-use: number of RX queues to be used in the driver + - Choose one of these RX scheduling algorithms: + - snps,rx-sched-sp: Strict priority + - snps,rx-sched-wsp: Weighted Strict priority + - For each RX queue + - Choose one of these modes: + - snps,dcb-algorithm: Queue to be enabled as DCB + - snps,avb-algorithm: Queue to be enabled as AVB + - snps,map-to-dma-channel: Channel to map + - Specifiy specific packet routing: + - snps,route-avcp: AV Untagged Control packets + - snps,route-ptp: PTP Packets + - snps,route-dcbcp: DCB Control Packets + - snps,route-up: Untagged Packets + - snps,route-multi-broad: Multicast & Broadcast Packets + - snps,priority: RX queue priority (Range: 0x0 to 0xF) +- Multiple TX Queues parameters: below the list of all the parameters to + configure the multiple TX queues: + - snps,tx-queues-to-use: number of TX queues to be used in the driver + - Choose one of these TX scheduling algorithms: + - snps,tx-sched-wrr: Weighted Round Robin + - snps,tx-sched-wfq: Weighted Fair Queuing + - snps,tx-sched-dwrr: Deficit Weighted Round Robin + - snps,tx-sched-sp: Strict priority + - For each TX queue + - snps,weight: TX queue weight (if using a DCB weight algorithm) + - Choose one of these modes: + - snps,dcb-algorithm: TX queue will be working in DCB + - snps,avb-algorithm: TX queue will be working in AVB + [Attention] Queue 0 is reserved for legacy traffic + and so no AVB is available in this queue. + - Configure Credit Base Shaper (if AVB Mode selected): + - snps,send_slope: enable Low Power Interface + - snps,idle_slope: unlock on WoL + - snps,high_credit: max write outstanding req. limit + - snps,low_credit: max read outstanding req. limit + - snps,priority: TX queue priority (Range: 0x0 to 0xF) Examples: stmmac_axi_setup: stmmac-axi-config { @@ -81,12 +122,41 @@ Examples: snps,blen = <256 128 64 32 0 0 0>; }; + mtl_rx_setup: rx-queues-config { + snps,rx-queues-to-use = <1>; + snps,rx-sched-sp; + queue0 { + snps,dcb-algorithm; + snps,map-to-dma-channel = <0x0>; + snps,priority = <0x0>; + }; + }; + + mtl_tx_setup: tx-queues-config { + snps,tx-queues-to-use = <2>; + snps,tx-sched-wrr; + queue0 { + snps,weight = <0x10>; + snps,dcb-algorithm; + snps,priority = <0x0>; + }; + + queue1 { + snps,avb-algorithm; + snps,send_slope = <0x1000>; + snps,idle_slope = <0x1000>; + snps,high_credit = <0x3E800>; + snps,low_credit = <0xFFC18000>; + snps,priority = <0x1>; + }; + }; + gmac0: ethernet@e0800000 { compatible = "st,spear600-gmac"; reg = <0xe0800000 0x8000>; interrupt-parent = <&vic1>; - interrupts = <24 23>; - interrupt-names = "macirq", "eth_wake_irq"; + interrupts = <24 23 22>; + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; mac-address = [000000000000]; /* Filled in by U-Boot */ max-frame-size = <3800>; phy-mode = "gmii"; @@ -104,4 +174,6 @@ Examples: phy1: ethernet-phy@0 { }; }; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; }; diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti,wilink-st.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbad73a84ac43250865fd6e47dc47a388e082342 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ti,wilink-st.txt @@ -0,0 +1,35 @@ +TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices + +TI WiLink devices have a UART interface for providing Bluetooth, FM radio, +and GPS over what's called "shared transport". The shared transport is +standard BT HCI protocol with additional channels for the other functions. + +These devices also have a separate WiFi interface as described in +wireless/ti,wlcore.txt. + +This bindings follows the UART slave device binding in +../serial/slave-device.txt. + +Required properties: + - compatible: should be one of the following: + "ti,wl1271-st" + "ti,wl1273-st" + "ti,wl1831-st" + "ti,wl1835-st" + "ti,wl1837-st" + +Optional properties: + - enable-gpios : GPIO signal controlling enabling of BT. Active high. + - vio-supply : Vio input supply (1.8V) + - vbat-supply : Vbat input supply (2.9-4.8V) + +Example: + +&serial0 { + compatible = "ns16550a"; + ... + bluetooth { + compatible = "ti,wl1835-st"; + enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; + }; +}; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 830c9987fa02bc5fd639b530d619f5474fbfdb93..b685d13539e81f32273cffb3ac3009918fc19677 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -51,6 +51,7 @@ brcm Broadcom Corporation buffalo Buffalo, Inc. calxeda Calxeda capella Capella Microsystems, Inc +cascoda Cascoda, Ltd. cavium Cavium, Inc. cdns Cadence Design Systems Inc. ceva Ceva, Inc. diff --git a/Documentation/driver-api/80211/cfg80211.rst b/Documentation/driver-api/80211/cfg80211.rst index eca534ab617259a63bdf0d63f6a14319dedc7c0d..8ffac57e1f5b7ddd0b56547258f604d06816e9bd 100644 --- a/Documentation/driver-api/80211/cfg80211.rst +++ b/Documentation/driver-api/80211/cfg80211.rst @@ -2,6 +2,9 @@ cfg80211 subsystem ================== +.. kernel-doc:: include/net/cfg80211.h + :doc: Introduction + Device registration =================== @@ -179,6 +182,12 @@ Actions and configuration .. kernel-doc:: include/net/cfg80211.h :functions: cfg80211_ibss_joined +.. kernel-doc:: include/net/cfg80211.h + :functions: cfg80211_connect_resp_params + +.. kernel-doc:: include/net/cfg80211.h + :functions: cfg80211_connect_done + .. kernel-doc:: include/net/cfg80211.h :functions: cfg80211_connect_result diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 683ada5ad81deadd19e84ab91c324de02aa36c68..b69b205501dec6a7571489a51bd9718c65c4270d 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -595,10 +595,9 @@ got from bpf_prog_create(), and 'ctx' the given context (e.g. skb pointer). All constraints and restrictions from bpf_check_classic() apply before a conversion to the new layout is being done behind the scenes! -Currently, the classic BPF format is being used for JITing on most of the -architectures. x86-64, aarch64 and s390x perform JIT compilation from eBPF -instruction set, however, future work will migrate other JIT compilers as well, -so that they will profit from the very same benefits. +Currently, the classic BPF format is being used for JITing on most 32-bit +architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64 perform JIT +compilation from eBPF instruction set. Some core changes of the new internal format: diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt index a251bf4fe9c9281baf497f17bff8630b4eb21705..57e616ed10b0b54fc8ca2ad1ea405f742db2ab1e 100644 --- a/Documentation/networking/i40e.txt +++ b/Documentation/networking/i40e.txt @@ -63,6 +63,78 @@ Additional Configurations The latest release of ethtool can be found from https://www.kernel.org/pub/software/network/ethtool + + Flow Director n-ntuple traffic filters (FDir) + --------------------------------------------- + The driver utilizes the ethtool interface for configuring ntuple filters, + via "ethtool -N ". + + The sctp4, ip4, udp4, and tcp4 flow types are supported with the standard + fields including src-ip, dst-ip, src-port and dst-port. The driver only + supports fully enabling or fully masking the fields, so use of the mask + fields for partial matches is not supported. + + Additionally, the driver supports using the action to specify filters for a + Virtual Function. You can specify the action as a 64bit value, where the + lower 32 bits represents the queue number, while the next 8 bits represent + which VF. Note that 0 is the PF, so the VF identifier is offset by 1. For + example: + + ... action 0x800000002 ... + + Would indicate to direct traffic for Virtual Function 7 (8 minus 1) on queue + 2 of that VF. + + The driver also supports using the user-defined field to specify 2 bytes of + arbitrary data to match within the packet payload in addition to the regular + fields. The data is specified in the lower 32bits of the user-def field in + the following way: + + +----------------------------+---------------------------+ + | 31 28 24 20 16 | 15 12 8 4 0| + +----------------------------+---------------------------+ + | offset into packet payload | 2 bytes of flexible data | + +----------------------------+---------------------------+ + + As an example, + + ... user-def 0x4FFFF .... + + means to match the value 0xFFFF 4 bytes into the packet payload. Note that + the offset is based on the beginning of the payload, and not the beginning + of the packet. Thus + + flow-type tcp4 ... user-def 0x8BEAF .... + + would match TCP/IPv4 packets which have the value 0xBEAF 8bytes into the + TCP/IPv4 payload. + + For ICMP, the hardware parses the ICMP header as 4 bytes of header and 4 + bytes of payload, so if you want to match an ICMP frames payload you may need + to add 4 to the offset in order to match the data. + + Furthermore, the offset can only be up to a value of 64, as the hardware + will only read up to 64 bytes of data from the payload. It must also be even + as the flexible data is 2 bytes long and must be aligned to byte 0 of the + packet payload. + + When programming filters, the hardware is limited to using a single input + set for each flow type. This means that it is an error to program two + different filters with the same type that don't match on the same fields. + Thus the second of the following two commands will fail: + + ethtool -N flow-type tcp4 src-ip 192.168.0.7 action 5 + ethtool -N flow-type tcp4 dst-ip 192.168.15.18 action 1 + + This is because the first filter will be accepted and reprogram the input + set for TCPv4 filters, but the second filter will be unable to reprogram the + input set until all the conflicting TCPv4 filters are first removed. + + Note that the user-defined flexible offset is also considered part of the + input set and cannot be programmed separately for multiple filters of the + same type. However, the flexible data is not part of the input set and + multiple filters may use the same offset but match against different data. + Data Center Bridging (DCB) -------------------------- DCB configuration is not currently supported. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ab02304613771b6f6e120da96fb677c293d032d2..974ab47ae53a81c27b2b57533db813288139fd7b 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -73,6 +73,14 @@ fib_multipath_use_neigh - BOOLEAN 0 - disabled 1 - enabled +fib_multipath_hash_policy - INTEGER + Controls which hash policy to use for multipath routes. Only valid + for kernels built with CONFIG_IP_ROUTE_MULTIPATH enabled. + Default: 0 (Layer 3) + Possible values: + 0 - Layer 3 + 1 - Layer 4 + route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. @@ -594,6 +602,14 @@ tcp_fastopen - INTEGER Note that that additional client or server features are only effective if the basic support (0x1 and 0x2) are enabled respectively. +tcp_fastopen_blackhole_timeout_sec - INTEGER + Initial time period in second to disable Fastopen on active TCP sockets + when a TFO firewall blackhole issue happens. + This time period will grow exponentially when more blackhole issues + get detected right after Fastopen is re-enabled and will reset to + initial value when the blackhole issue goes away. + By default, it is set to 1hr. + tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt will be retransmitted. Should not be higher than 127. Default value @@ -640,11 +656,6 @@ tcp_tso_win_divisor - INTEGER building larger TSO frames. Default: 3 -tcp_tw_recycle - BOOLEAN - Enable fast recycling TIME-WAIT sockets. Default value is 0. - It should not be changed without advice/request of technical - experts. - tcp_tw_reuse - BOOLEAN Allow to reuse TIME-WAIT sockets for new connections when it is safe from protocol viewpoint. Default value is 0. @@ -853,12 +864,21 @@ ip_dynaddr - BOOLEAN ip_early_demux - BOOLEAN Optimize input packet processing down to one demux for certain kinds of local sockets. Currently we only do this - for established TCP sockets. + for established TCP and connected UDP sockets. It may add an additional cost for pure routing workloads that reduces overall throughput, in such case you should disable it. Default: 1 +tcp_early_demux - BOOLEAN + Enable early demux for established TCP sockets. + Default: 1 + +udp_early_demux - BOOLEAN + Enable early demux for connected UDP sockets. Disable this if + your system could experience more unconnected load. + Default: 1 + icmp_echo_ignore_all - BOOLEAN If set non-zero, then the kernel will ignore all ICMP ECHO requests sent to it. @@ -1458,11 +1478,20 @@ accept_ra_pinfo - BOOLEAN Functional default: enabled if accept_ra is enabled. disabled if accept_ra is disabled. +accept_ra_rt_info_min_plen - INTEGER + Minimum prefix length of Route Information in RA. + + Route Information w/ prefix smaller than this variable shall + be ignored. + + Functional default: 0 if accept_ra_rtr_pref is enabled. + -1 if accept_ra_rtr_pref is disabled. + accept_ra_rt_info_max_plen - INTEGER Maximum prefix length of Route Information in RA. - Route Information w/ prefix larger than or equal to this - variable shall be ignored. + Route Information w/ prefix larger than this variable shall + be ignored. Functional default: 0 if accept_ra_rtr_pref is enabled. -1 if accept_ra_rtr_pref is disabled. diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt index e6b1c025fdd89362a40e7aa676859a3a83646e7a..056898685d408e463f1a191f36e2384e9974ab6d 100644 --- a/Documentation/networking/ipvs-sysctl.txt +++ b/Documentation/networking/ipvs-sysctl.txt @@ -175,6 +175,14 @@ nat_icmp_send - BOOLEAN for VS/NAT when the load balancer receives packets from real servers but the connection entries don't exist. +pmtu_disc - BOOLEAN + 0 - disabled + not 0 - enabled (default) + + By default, reject with FRAG_NEEDED all DF packets that exceed + the PMTU, irrespective of the forwarding method. For TUN method + the flag can be disabled to fragment such packets. + secure_tcp - INTEGER 0 - disabled (default) @@ -185,15 +193,59 @@ secure_tcp - INTEGER The value definition is the same as that of drop_entry and drop_packet. -sync_threshold - INTEGER - default 3 +sync_threshold - vector of 2 INTEGERs: sync_threshold, sync_period + default 3 50 + + It sets synchronization threshold, which is the minimum number + of incoming packets that a connection needs to receive before + the connection will be synchronized. A connection will be + synchronized, every time the number of its incoming packets + modulus sync_period equals the threshold. The range of the + threshold is from 0 to sync_period. + + When sync_period and sync_refresh_period are 0, send sync only + for state changes or only once when pkts matches sync_threshold + +sync_refresh_period - UNSIGNED INTEGER + default 0 + + In seconds, difference in reported connection timer that triggers + new sync message. It can be used to avoid sync messages for the + specified period (or half of the connection timeout if it is lower) + if connection state is not changed since last sync. + + This is useful for normal connections with high traffic to reduce + sync rate. Additionally, retry sync_retries times with period of + sync_refresh_period/8. + +sync_retries - INTEGER + default 0 + + Defines sync retries with period of sync_refresh_period/8. Useful + to protect against loss of sync messages. The range of the + sync_retries is from 0 to 3. + +sync_qlen_max - UNSIGNED LONG + + Hard limit for queued sync messages that are not sent yet. It + defaults to 1/32 of the memory pages but actually represents + number of messages. It will protect us from allocating large + parts of memory when the sending rate is lower than the queuing + rate. + +sync_sock_size - INTEGER + default 0 + + Configuration of SNDBUF (master) or RCVBUF (slave) socket limit. + Default value is 0 (preserve system defaults). + +sync_ports - INTEGER + default 1 - It sets synchronization threshold, which is the minimum number - of incoming packets that a connection needs to receive before - the connection will be synchronized. A connection will be - synchronized, every time the number of its incoming packets - modulus 50 equals the threshold. The range of the threshold is - from 0 to 49. + The number of threads that master and backup servers can use for + sync traffic. Every thread will use single UDP port, thread 0 will + use the default port 8848 while last thread will use port + 8848+sync_ports-1. snat_reroute - BOOLEAN 0 - disabled diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt index 15d8d16934fd13727bb35e9c5078484127bbfa30..2f24a1912a48c73d360416d9889dd47c25bbbe28 100644 --- a/Documentation/networking/mpls-sysctl.txt +++ b/Documentation/networking/mpls-sysctl.txt @@ -19,6 +19,25 @@ platform_labels - INTEGER Possible values: 0 - 1048575 Default: 0 +ip_ttl_propagate - BOOL + Control whether TTL is propagated from the IPv4/IPv6 header to + the MPLS header on imposing labels and propagated from the + MPLS header to the IPv4/IPv6 header on popping the last label. + + If disabled, the MPLS transport network will appear as a + single hop to transit traffic. + + 0 - disabled / RFC 3443 [Short] Pipe Model + 1 - enabled / RFC 3443 Uniform Model (default) + +default_ttl - BOOL + Default TTL value to use for MPLS packets where it cannot be + propagated from an IP header, either because one isn't present + or ip_ttl_propagate has been disabled. + + Possible values: 1 - 255 + Default: 255 + conf//input - BOOL Control whether packets can be input on this interface. diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 2bbac05ab9e26d4b27daf0db23e820a94e23ad6e..3e7b946dea2778441ce0599643535188699e16ca 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -13,43 +13,43 @@ an example setup using a data-center-class switch ASIC chip. Other setups with SR-IOV or soft switches, such as OVS, are possible. -                             User-space tools                                  -                                                                               -       user space                   |                                          -      +-------------------------------------------------------------------+    -       kernel                       | Netlink                                  -                                    |                                          -                     +--------------+-------------------------------+          -                     |         Network stack                        |          -                     |           (Linux)                            |          -                     |                                              |          -                     +----------------------------------------------+          -                                                                               +                             User-space tools + +       user space                   | +      +-------------------------------------------------------------------+ +       kernel                       | Netlink +                                    | +                     +--------------+-------------------------------+ +                     |         Network stack                        | +                     |           (Linux)                            | +                     |                                              | +                     +----------------------------------------------+ + sw1p2 sw1p4 sw1p6 -                      sw1p1  + sw1p3 +  sw1p5 +         eth1              -                        +    |    +    |    +    |            +                -                        |    |    |    |    |    |            |                -                     +--+----+----+----+-+--+----+---+  +-----+-----+          -                     |         Switch driver         |  |    mgmt   |          -                     |        (this document)        |  |   driver  |          -                     |                               |  |           |          -                     +--------------+----------------+  +-----------+          -                                    |                                          -       kernel                       | HW bus (eg PCI)                          -      +-------------------------------------------------------------------+    -       hardware                     |                                          -                     +--------------+---+------------+                         -                     |         Switch device (sw1)   |                         -                     |  +----+                       +--------+                -                     |  |    v offloaded data path   | mgmt port               -                     |  |    |                       |                         -                     +--|----|----+----+----+----+---+                         -                        |    |    |    |    |    |                             -                        +    +    +    +    +    +                             +                      sw1p1  + sw1p3 +  sw1p5 +         eth1 +                        +    |    +    |    +    |            + +                        |    |    |    |    |    |            | +                     +--+----+----+----+-+--+----+---+  +-----+-----+ +                     |         Switch driver         |  |    mgmt   | +                     |        (this document)        |  |   driver  | +                     |                               |  |           | +                     +--------------+----------------+  +-----------+ +                                    | +       kernel                       | HW bus (eg PCI) +      +-------------------------------------------------------------------+ +       hardware                     | +                     +--------------+---+------------+ +                     |         Switch device (sw1)   | +                     |  +----+                       +--------+ +                     |  |    v offloaded data path   | mgmt port +                     |  |    |                       | +                     +--|----|----+----+----+----+---+ +                        |    |    |    |    |    | +                        +    +    +    +    +    +                        p1   p2   p3   p4   p5   p6 -                                        -                             front-panel ports                                 -                                                                               + +                             front-panel ports + Fig 1. diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 2ebabc93014a2442824d2da7c79b28d53eaa4b41..14db18c970b1b048c0ecc50d75e10efb674715ca 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -188,7 +188,16 @@ netdev_budget Maximum number of packets taken from all interfaces in one polling cycle (NAPI poll). In one polling cycle interfaces which are registered to polling are -probed in a round-robin manner. +probed in a round-robin manner. Also, a polling cycle may not exceed +netdev_budget_usecs microseconds, even if netdev_budget has not been +exhausted. + +netdev_budget_usecs +--------------------- + +Maximum number of microseconds in one NAPI polling cycle. Polling +will exit when either netdev_budget_usecs have elapsed during the +poll cycle or the number of packets processed reaches netdev_budget. netdev_max_backlog ------------------ diff --git a/MAINTAINERS b/MAINTAINERS index 756da3f484d12f9353f44e864cf2e16ef3af406b..b1036a7df39e1af2c67f32e1e64618953a6cbbfe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -896,12 +896,19 @@ F: arch/arm64/boot/dts/apm/ APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER M: Iyappan Subramanian M: Keyur Chudgar +M: Quan Nguyen S: Supported F: drivers/net/ethernet/apm/xgene/ F: drivers/net/phy/mdio-xgene.c F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt +APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER +M: Iyappan Subramanian +M: Keyur Chudgar +S: Supported +F: drivers/net/ethernet/apm/xgene-v2/ + APPLIED MICRO (APM) X-GENE SOC PMU M: Tai Nguyen S: Supported @@ -2944,6 +2951,15 @@ W: http://www.linux-c6x.org/wiki/index.php/Main_Page S: Maintained F: arch/c6x/ +CA8210 IEEE-802.15.4 RADIO DRIVER +M: Harry Morris +M: linuxdev@cascoda.com +L: linux-wpan@vger.kernel.org +W: https://github.com/Cascoda/ca8210-linux.git +S: Maintained +F: drivers/net/ieee802154/ca8210.c +F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt + CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells L: linux-cachefs@redhat.com (moderated for non-subscribers) @@ -7884,7 +7900,7 @@ S: Maintained F: drivers/net/ethernet/marvell/mvneta.* MARVELL MWIFIEX WIRELESS DRIVER -M: Amitkumar Karwar +M: Amitkumar Karwar M: Nishant Sarmukadam M: Ganapathi Bhat M: Xinming Hu @@ -8829,12 +8845,12 @@ F: net/core/flow.c F: net/xfrm/ F: net/key/ F: net/ipv4/xfrm* -F: net/ipv4/esp4.c +F: net/ipv4/esp4* F: net/ipv4/ah4.c F: net/ipv4/ipcomp.c F: net/ipv4/ip_vti.c F: net/ipv6/xfrm* -F: net/ipv6/esp6.c +F: net/ipv6/esp6* F: net/ipv6/ah6.c F: net/ipv6/ipcomp6.c F: net/ipv6/ip6_vti.c @@ -8888,8 +8904,6 @@ S: Supported F: drivers/net/ethernet/qlogic/netxen/ NFC SUBSYSTEM -M: Lauro Ramos Venancio -M: Aloisio Almeida Jr M: Samuel Ortiz L: linux-wireless@vger.kernel.org L: linux-nfc@lists.01.org (subscribers-only) @@ -11125,6 +11139,12 @@ F: include/linux/dma/dw.h F: include/linux/platform_data/dma-dw.h F: drivers/dma/dw/ +SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER +M: Jie Deng +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/synopsys/ + SYNOPSYS DESIGNWARE I2C DRIVER M: Jarkko Nikula R: Andy Shevchenko @@ -12229,12 +12249,19 @@ F: Documentation/accounting/taskstats* F: include/linux/taskstats* F: kernel/taskstats.c -TC CLASSIFIER +TC subsystem M: Jamal Hadi Salim +M: Cong Wang +M: Jiri Pirko L: netdev@vger.kernel.org S: Maintained F: include/net/pkt_cls.h +F: include/net/pkt_sched.h +F: include/net/tc_act/ F: include/uapi/linux/pkt_cls.h +F: include/uapi/linux/pkt_sched.h +F: include/uapi/linux/tc_act/ +F: include/uapi/linux/tc_ematch/ F: net/sched/ TCP LOW PRIORITY MODULE @@ -13330,8 +13357,11 @@ L: netdev@vger.kernel.org S: Maintained F: include/linux/virtio_vsock.h F: include/uapi/linux/virtio_vsock.h +F: include/uapi/linux/vsockmon.h +F: net/vmw_vsock/af_vsock_tap.c F: net/vmw_vsock/virtio_transport_common.c F: net/vmw_vsock/virtio_transport.c +F: drivers/net/vsockmon.c F: drivers/vhost/vsock.c F: drivers/vhost/vsock.h diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index afc901b7a6f6e68c819aec1ab9199806f24fc1c1..148d7a32754e343397e84bb077afee8b82ecf3c6 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -99,4 +99,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index 0b4932cc02a8d8bb66165e40c11113647b92633f..c79c937b0a8aab92f2227deb8bf2d756542b57e8 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -42,18 +42,16 @@ }; mac0: ethernet@1e660000 { - compatible = "faraday,ftgmac100"; + compatible = "aspeed,ast2400-mac", "faraday,ftgmac100"; reg = <0x1e660000 0x180>; interrupts = <2>; - no-hw-checksum; status = "disabled"; }; mac1: ethernet@1e680000 { - compatible = "faraday,ftgmac100"; + compatible = "aspeed,ast2400-mac", "faraday,ftgmac100"; reg = <0x1e680000 0x180>; interrupts = <3>; - no-hw-checksum; status = "disabled"; }; diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index b664fe380936390a6f1ecdee49f157763e858513..b6596633036cd3a66c29e0c4c28663980fa7c200 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -33,18 +33,16 @@ }; mac0: ethernet@1e660000 { - compatible = "faraday,ftgmac100"; + compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; reg = <0x1e660000 0x180>; interrupts = <2>; - no-hw-checksum; status = "disabled"; }; mac1: ethernet@1e680000 { - compatible = "faraday,ftgmac100"; + compatible = "aspeed,ast2500-mac", "faraday,ftgmac100"; reg = <0x1e680000 0x180>; interrupts = <3>; - no-hw-checksum; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index dba3c131c62c09625c48cfabb7019e683eb060e7..9b4ba71692101077d5a77a6b18749b7c6c4d26f6 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -98,6 +98,11 @@ assigned-clocks = <&sys_ctrl HI6220_UART1_SRC>; assigned-clock-rates = <150000000>; status = "ok"; + + bluetooth { + compatible = "ti,wl1835-st"; + enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; + }; }; uart2: uart@f7112000 { diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index aecc07e09a18bd911a6df59554bf5261aa9f8ec7..29cb2ca756f6eea49ad3fd8a6a95a2cac34a5449 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -80,6 +80,7 @@ enum aarch64_insn_register_type { AARCH64_INSN_REGTYPE_RM, AARCH64_INSN_REGTYPE_RD, AARCH64_INSN_REGTYPE_RA, + AARCH64_INSN_REGTYPE_RS, }; enum aarch64_insn_register { @@ -188,6 +189,8 @@ enum aarch64_insn_ldst_type { AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, + AARCH64_INSN_LDST_LOAD_EX, + AARCH64_INSN_LDST_STORE_EX, }; enum aarch64_insn_adsb_type { @@ -240,6 +243,23 @@ enum aarch64_insn_logic_type { AARCH64_INSN_LOGIC_BIC_SETFLAGS }; +enum aarch64_insn_prfm_type { + AARCH64_INSN_PRFM_TYPE_PLD, + AARCH64_INSN_PRFM_TYPE_PLI, + AARCH64_INSN_PRFM_TYPE_PST, +}; + +enum aarch64_insn_prfm_target { + AARCH64_INSN_PRFM_TARGET_L1, + AARCH64_INSN_PRFM_TARGET_L2, + AARCH64_INSN_PRFM_TARGET_L3, +}; + +enum aarch64_insn_prfm_policy { + AARCH64_INSN_PRFM_POLICY_KEEP, + AARCH64_INSN_PRFM_POLICY_STRM, +}; + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ { return (code & (mask)) == (val); } \ @@ -248,6 +268,7 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000) __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) +__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) @@ -357,6 +378,11 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, int offset, enum aarch64_insn_variant variant, enum aarch64_insn_ldst_type type); +u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_register state, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type); u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, @@ -397,6 +423,10 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_logic_type type); +u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, + enum aarch64_insn_prfm_type type, + enum aarch64_insn_prfm_target target, + enum aarch64_insn_prfm_policy policy); s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 3a63954a8b143e75f9ccde84ca783af3942e9726..b884a926a632e534eca9a8e1e0aff161ade5933c 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -474,6 +474,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, shift = 10; break; case AARCH64_INSN_REGTYPE_RM: + case AARCH64_INSN_REGTYPE_RS: shift = 16; break; default: @@ -757,6 +758,111 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, offset >> shift); } +u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_register state, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_LDST_LOAD_EX: + insn = aarch64_insn_get_load_ex_value(); + break; + case AARCH64_INSN_LDST_STORE_EX: + insn = aarch64_insn_get_store_ex_value(); + break; + default: + pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } + + insn = aarch64_insn_encode_ldst_size(size, insn); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + reg); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + base); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, + AARCH64_INSN_REG_ZR); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, + state); +} + +static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type, + enum aarch64_insn_prfm_target target, + enum aarch64_insn_prfm_policy policy, + u32 insn) +{ + u32 imm_type = 0, imm_target = 0, imm_policy = 0; + + switch (type) { + case AARCH64_INSN_PRFM_TYPE_PLD: + break; + case AARCH64_INSN_PRFM_TYPE_PLI: + imm_type = BIT(0); + break; + case AARCH64_INSN_PRFM_TYPE_PST: + imm_type = BIT(1); + break; + default: + pr_err("%s: unknown prfm type encoding %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } + + switch (target) { + case AARCH64_INSN_PRFM_TARGET_L1: + break; + case AARCH64_INSN_PRFM_TARGET_L2: + imm_target = BIT(0); + break; + case AARCH64_INSN_PRFM_TARGET_L3: + imm_target = BIT(1); + break; + default: + pr_err("%s: unknown prfm target encoding %d\n", __func__, target); + return AARCH64_BREAK_FAULT; + } + + switch (policy) { + case AARCH64_INSN_PRFM_POLICY_KEEP: + break; + case AARCH64_INSN_PRFM_POLICY_STRM: + imm_policy = BIT(0); + break; + default: + pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy); + return AARCH64_BREAK_FAULT; + } + + /* In this case, imm5 is encoded into Rt field. */ + insn &= ~GENMASK(4, 0); + insn |= imm_policy | (imm_target << 1) | (imm_type << 3); + + return insn; +} + +u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, + enum aarch64_insn_prfm_type type, + enum aarch64_insn_prfm_target target, + enum aarch64_insn_prfm_policy policy) +{ + u32 insn = aarch64_insn_get_prfm_value(); + + insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn); + + insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + base); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0); +} + u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index 7c16e547ccb22d1ef4eaf5cc9c91cc8540ae8581..b02a9268dfbf1093016dc3609e79681fa6da9dba 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -83,6 +83,25 @@ /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */ #define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX) +/* Load/store exclusive */ +#define A64_SIZE(sf) \ + ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32) +#define A64_LSX(sf, Rt, Rn, Rs, type) \ + aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \ + AARCH64_INSN_LDST_##type) +/* Rt = [Rn]; (atomic) */ +#define A64_LDXR(sf, Rt, Rn) \ + A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX) +/* [Rn] = Rt; (atomic) Rs = [state] */ +#define A64_STXR(sf, Rt, Rn, Rs) \ + A64_LSX(sf, Rt, Rn, Rs, STORE_EX) + +/* Prefetch */ +#define A64_PRFM(Rn, type, target, policy) \ + aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \ + AARCH64_INSN_PRFM_TARGET_##target, \ + AARCH64_INSN_PRFM_POLICY_##policy) + /* Add/subtract (immediate) */ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a785554916c06caa412564338f7728d655a899eb..d68abde52740d7b55f89dea8daf84f3109b0e17c 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -321,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool isdw = BPF_SIZE(code) == BPF_DW; u8 jmp_cond; s32 jmp_offset; @@ -604,15 +605,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const struct bpf_insn insn1 = insn[1]; u64 imm64; - if (insn1.code != 0 || insn1.src_reg != 0 || - insn1.dst_reg != 0 || insn1.off != 0) { - /* Note: verifier in BPF core must catch invalid - * instructions. - */ - pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); - return -EINVAL; - } - imm64 = (u64)insn1.imm << 32 | (u32)imm; emit_a64_mov_i64(dst, imm64, ctx); @@ -690,7 +682,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_STX | BPF_XADD | BPF_W: /* STX XADD: lock *(u64 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_DW: - goto notyet; + emit_a64_mov_i(1, tmp, off, ctx); + emit(A64_ADD(1, tmp, tmp, dst), ctx); + emit(A64_PRFM(tmp, PST, L1, STRM), ctx); + emit(A64_LDXR(isdw, tmp2, tmp), ctx); + emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); + emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); + jmp_offset = -3; + check_imm19(jmp_offset); + emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); + break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ case BPF_LD | BPF_ABS | BPF_W: @@ -757,10 +758,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) } break; } -notyet: - pr_info_once("*** NOT YET: opcode %02x ***\n", code); - return -EFAULT; - default: pr_err_once("unknown opcode %02x\n", code); return -EINVAL; @@ -779,14 +776,14 @@ static int build_body(struct jit_ctx *ctx) int ret; ret = build_insn(insn, ctx); - - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; - if (ret > 0) { i++; + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; continue; } + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; if (ret) return ret; } diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index 81e03530ed39ee7e3b25b7442361f64aa883c179..1ccf45657472a5240ddd815c90de42397effc64d 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -92,5 +92,11 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 57feb0c1f7d707dd51ce20ffba0a418f5b5687ff..2c3f4b48042ae34319a2359b69ffd8fe57cedc21 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -101,4 +101,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 5853f8e92c20cda02450346d839b3f0b466359ee..ae6548d29a1819b4cc75826b881697a753e63a36 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -92,4 +92,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 566ecdcb5b4bcb2cd4d5888a1ce787b8fcbd0b97..3418ec9c1c5016df7c5acd27cdce5e3507ef4175 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -110,4 +110,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index 0e12527c4b0e6de154efaa91fe197eb995a1535c..4526e92301a6768ec714c5d95d501cdfafaf5cfa 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -92,4 +92,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 7a109b73ddf7e814f9c29ffa1ec9b99977ead5d1..514701840bd93441c54107d57f10411b64576f13 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -91,4 +91,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 0x402F +#define SO_MEMINFO 0x4030 + +#define SO_INCOMING_NAPI_ID 0x4031 + +#define SO_COOKIE 0x4032 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index 44583a52f882540986928cc48a63971251226a0f..58e2ec0310fc9508b51c39ec6ab00edb7e51c258 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -99,4 +99,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index b24a64cbfeb10a91274a59117f2e76ea3c583e00..e8e5ecf673fdd864cf9a50a6e7c99512ec446c40 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -98,4 +98,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ed96869c4101db43608e0e1811c76eb12f15f408..58243b0d21c006cfea9c47723221a33b7673fae9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -31,7 +31,8 @@ config SPARC select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 - select HAVE_CBPF_JIT + select HAVE_CBPF_JIT if SPARC32 + select HAVE_EBPF_JIT if SPARC64 select HAVE_DEBUG_BUGVERBOSE select GENERIC_SMP_IDLE_THREAD select GENERIC_CLOCKEVENTS diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index a25dc32f5d6a163c1b0e7b7ae775f43898ba4d58..3f4ad19d9ec70c7ab080483fa936219ef9be05f6 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -88,6 +88,12 @@ #define SCM_TIMESTAMPING_OPT_STATS 0x0038 +#define SO_MEMINFO 0x0039 + +#define SO_INCOMING_NAPI_ID 0x003a + +#define SO_COOKIE 0x003b + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile index 1306a58ac5413b742e9214cdc4983d34f5fec4fc..76fa8e95b721bbddcbc2f1693729bd7cfa56b0c7 100644 --- a/arch/sparc/net/Makefile +++ b/arch/sparc/net/Makefile @@ -1,4 +1,4 @@ # # Arch-specific network modules # -obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_asm_$(BITS).o bpf_jit_comp_$(BITS).o diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit_32.h similarity index 96% rename from arch/sparc/net/bpf_jit.h rename to arch/sparc/net/bpf_jit_32.h index 33d6b375ff12095471171911e0d46dbb626191d5..d5c069bff5f983d2a985029732f460f1fd8ea501 100644 --- a/arch/sparc/net/bpf_jit.h +++ b/arch/sparc/net/bpf_jit_32.h @@ -39,7 +39,7 @@ #define r_TMP2 G2 #define r_OFF G3 -/* assembly code in arch/sparc/net/bpf_jit_asm.S */ +/* assembly code in arch/sparc/net/bpf_jit_asm_32.S */ extern u32 bpf_jit_load_word[]; extern u32 bpf_jit_load_half[]; extern u32 bpf_jit_load_byte[]; diff --git a/arch/sparc/net/bpf_jit_64.h b/arch/sparc/net/bpf_jit_64.h new file mode 100644 index 0000000000000000000000000000000000000000..74abd45796ea6e3440afe3842287c42ae9e27ee4 --- /dev/null +++ b/arch/sparc/net/bpf_jit_64.h @@ -0,0 +1,66 @@ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#ifndef __ASSEMBLER__ +#define G0 0x00 +#define G1 0x01 +#define G2 0x02 +#define G3 0x03 +#define G6 0x06 +#define G7 0x07 +#define O0 0x08 +#define O1 0x09 +#define O2 0x0a +#define O3 0x0b +#define O4 0x0c +#define O5 0x0d +#define SP 0x0e +#define O7 0x0f +#define L0 0x10 +#define L1 0x11 +#define L2 0x12 +#define L3 0x13 +#define L4 0x14 +#define L5 0x15 +#define L6 0x16 +#define L7 0x17 +#define I0 0x18 +#define I1 0x19 +#define I2 0x1a +#define I3 0x1b +#define I4 0x1c +#define I5 0x1d +#define FP 0x1e +#define I7 0x1f + +#define r_SKB L0 +#define r_HEADLEN L4 +#define r_SKB_DATA L5 +#define r_TMP G1 +#define r_TMP2 G3 + +/* assembly code in arch/sparc/net/bpf_jit_asm_64.S */ +extern u32 bpf_jit_load_word[]; +extern u32 bpf_jit_load_half[]; +extern u32 bpf_jit_load_byte[]; +extern u32 bpf_jit_load_byte_msh[]; +extern u32 bpf_jit_load_word_positive_offset[]; +extern u32 bpf_jit_load_half_positive_offset[]; +extern u32 bpf_jit_load_byte_positive_offset[]; +extern u32 bpf_jit_load_byte_msh_positive_offset[]; +extern u32 bpf_jit_load_word_negative_offset[]; +extern u32 bpf_jit_load_half_negative_offset[]; +extern u32 bpf_jit_load_byte_negative_offset[]; +extern u32 bpf_jit_load_byte_msh_negative_offset[]; + +#else +#define r_RESULT %o0 +#define r_SKB %o0 +#define r_OFF %o1 +#define r_HEADLEN %l4 +#define r_SKB_DATA %l5 +#define r_TMP %g1 +#define r_TMP2 %g3 +#endif + +#endif /* _BPF_JIT_H */ diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm_32.S similarity index 95% rename from arch/sparc/net/bpf_jit_asm.S rename to arch/sparc/net/bpf_jit_asm_32.S index 8c83f4b8eb15a7c314c10f9b44963b2220672d25..dcc402f5738a1ad6a7cdf61b6dfac155a86df7d8 100644 --- a/arch/sparc/net/bpf_jit_asm.S +++ b/arch/sparc/net/bpf_jit_asm_32.S @@ -1,18 +1,11 @@ #include -#include "bpf_jit.h" +#include "bpf_jit_32.h" -#ifdef CONFIG_SPARC64 -#define SAVE_SZ 176 -#define SCRATCH_OFF STACK_BIAS + 128 -#define BE_PTR(label) be,pn %xcc, label -#define SIGN_EXTEND(reg) sra reg, 0, reg -#else #define SAVE_SZ 96 #define SCRATCH_OFF 72 #define BE_PTR(label) be label #define SIGN_EXTEND(reg) -#endif #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ diff --git a/arch/sparc/net/bpf_jit_asm_64.S b/arch/sparc/net/bpf_jit_asm_64.S new file mode 100644 index 0000000000000000000000000000000000000000..3b3f14655f819b149980e6647ca23883ac145e64 --- /dev/null +++ b/arch/sparc/net/bpf_jit_asm_64.S @@ -0,0 +1,161 @@ +#include + +#include "bpf_jit_64.h" + +#define SAVE_SZ 176 +#define SCRATCH_OFF STACK_BIAS + 128 +#define BE_PTR(label) be,pn %xcc, label +#define SIGN_EXTEND(reg) sra reg, 0, reg + +#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */ + + .text + .globl bpf_jit_load_word +bpf_jit_load_word: + cmp r_OFF, 0 + bl bpf_slow_path_word_neg + nop + .globl bpf_jit_load_word_positive_offset +bpf_jit_load_word_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 3 + ble bpf_slow_path_word + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_RESULT +load_word_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x2], r_TMP2 + sll r_OFF, 8, r_OFF + or r_OFF, r_TMP2, r_OFF + ldub [r_TMP + 0x3], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_RESULT + + .globl bpf_jit_load_half +bpf_jit_load_half: + cmp r_OFF, 0 + bl bpf_slow_path_half_neg + nop + .globl bpf_jit_load_half_positive_offset +bpf_jit_load_half_positive_offset: + sub r_HEADLEN, r_OFF, r_TMP + cmp r_TMP, 1 + ble bpf_slow_path_half + add r_SKB_DATA, r_OFF, r_TMP + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_RESULT +load_half_unaligned: + ldub [r_TMP + 0x0], r_OFF + ldub [r_TMP + 0x1], r_TMP2 + sll r_OFF, 8, r_OFF + retl + or r_OFF, r_TMP2, r_RESULT + + .globl bpf_jit_load_byte +bpf_jit_load_byte: + cmp r_OFF, 0 + bl bpf_slow_path_byte_neg + nop + .globl bpf_jit_load_byte_positive_offset +bpf_jit_load_byte_positive_offset: + cmp r_OFF, r_HEADLEN + bge bpf_slow_path_byte + nop + retl + ldub [r_SKB_DATA + r_OFF], r_RESULT + +#define bpf_slow_path_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov %i1, %o1; \ + add %fp, SCRATCH_OFF, %o2; \ + call skb_copy_bits; \ + mov (LEN), %o3; \ + cmp %o0, 0; \ + restore; + +bpf_slow_path_word: + bpf_slow_path_common(4) + bl bpf_error + ld [%sp + SCRATCH_OFF], r_RESULT + retl + nop +bpf_slow_path_half: + bpf_slow_path_common(2) + bl bpf_error + lduh [%sp + SCRATCH_OFF], r_RESULT + retl + nop +bpf_slow_path_byte: + bpf_slow_path_common(1) + bl bpf_error + ldub [%sp + SCRATCH_OFF], r_RESULT + retl + nop + +#define bpf_negative_common(LEN) \ + save %sp, -SAVE_SZ, %sp; \ + mov %i0, %o0; \ + mov %i1, %o1; \ + SIGN_EXTEND(%o1); \ + call bpf_internal_load_pointer_neg_helper; \ + mov (LEN), %o2; \ + mov %o0, r_TMP; \ + cmp %o0, 0; \ + BE_PTR(bpf_error); \ + restore; + +bpf_slow_path_word_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_word_negative_offset +bpf_jit_load_word_negative_offset: + bpf_negative_common(4) + andcc r_TMP, 3, %g0 + bne load_word_unaligned + nop + retl + ld [r_TMP], r_RESULT + +bpf_slow_path_half_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_half_negative_offset +bpf_jit_load_half_negative_offset: + bpf_negative_common(2) + andcc r_TMP, 1, %g0 + bne load_half_unaligned + nop + retl + lduh [r_TMP], r_RESULT + +bpf_slow_path_byte_neg: + sethi %hi(SKF_MAX_NEG_OFF), r_TMP + cmp r_OFF, r_TMP + bl bpf_error + nop + .globl bpf_jit_load_byte_negative_offset +bpf_jit_load_byte_negative_offset: + bpf_negative_common(1) + retl + ldub [r_TMP], r_RESULT + +bpf_error: + /* Make the JIT program itself return zero. */ + ret + restore %g0, %g0, %o0 diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp_32.c similarity index 95% rename from arch/sparc/net/bpf_jit_comp.c rename to arch/sparc/net/bpf_jit_comp_32.c index a6d9204a6a0bd352ec6fa4e3c89450a81dee8a37..d193748548e23d3e0d6c115ea0a6cc4a662808a1 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -8,7 +8,7 @@ #include #include -#include "bpf_jit.h" +#include "bpf_jit_32.h" int bpf_jit_enable __read_mostly; @@ -17,24 +17,6 @@ static inline bool is_simm13(unsigned int value) return value + 0x1000 < 0x2000; } -static void bpf_flush_icache(void *start_, void *end_) -{ -#ifdef CONFIG_SPARC64 - /* Cheetah's I-cache is fully coherent. */ - if (tlb_type == spitfire) { - unsigned long start = (unsigned long) start_; - unsigned long end = (unsigned long) end_; - - start &= ~7UL; - end = (end + 7UL) & ~7UL; - while (start < end) { - flushi(start); - start += 32; - } - } -#endif -} - #define SEEN_DATAREF 1 /* might call external helpers */ #define SEEN_XREG 2 /* ebx is used */ #define SEEN_MEM 4 /* use mem[] for temporary storage */ @@ -82,11 +64,7 @@ static void bpf_flush_icache(void *start_, void *end_) #define BE (F2(0, 2) | CONDE) #define BNE (F2(0, 2) | CONDNE) -#ifdef CONFIG_SPARC64 -#define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) -#else #define BE_PTR BE -#endif #define SETHI(K, REG) \ (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) @@ -116,13 +94,8 @@ static void bpf_flush_icache(void *start_, void *end_) #define LD64 F3(3, 0x0b) #define ST32 F3(3, 0x04) -#ifdef CONFIG_SPARC64 -#define LDPTR LD64 -#define BASE_STACKFRAME 176 -#else #define LDPTR LD32 #define BASE_STACKFRAME 96 -#endif #define LD32I (LD32 | IMMED) #define LD8I (LD8 | IMMED) @@ -234,11 +207,7 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ __emit_load8(BASE, STRUCT, FIELD, DEST); \ } while (0) -#ifdef CONFIG_SPARC64 -#define BIAS (STACK_BIAS - 4) -#else #define BIAS (-4) -#endif #define emit_ldmem(OFF, DEST) \ do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ @@ -249,13 +218,8 @@ do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ } while (0) #ifdef CONFIG_SMP -#ifdef CONFIG_SPARC64 -#define emit_load_cpu(REG) \ - emit_load16(G6, struct thread_info, cpu, REG) -#else #define emit_load_cpu(REG) \ emit_load32(G6, struct thread_info, cpu, REG) -#endif #else #define emit_load_cpu(REG) emit_clear(REG) #endif @@ -486,7 +450,6 @@ void bpf_jit_compile(struct bpf_prog *fp) if (K == 1) break; emit_write_y(G0); -#ifdef CONFIG_SPARC32 /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. @@ -494,31 +457,21 @@ void bpf_jit_compile(struct bpf_prog *fp) emit_nop(); emit_nop(); emit_nop(); -#endif emit_alu_K(DIV, K); break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; -#ifdef CONFIG_SPARC32 emit_branch(BE, t_offset + 20); -#else - emit_branch(BE, t_offset + 8); -#endif emit_nop(); /* delay slot */ } else { emit_branch_off(BNE, 16); emit_nop(); -#ifdef CONFIG_SPARC32 emit_jump(cleanup_addr + 20); -#else - emit_jump(cleanup_addr + 8); -#endif emit_clear(r_A); } emit_write_y(G0); -#ifdef CONFIG_SPARC32 /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. @@ -526,7 +479,6 @@ void bpf_jit_compile(struct bpf_prog *fp) emit_nop(); emit_nop(); emit_nop(); -#endif emit_alu_X(DIV); break; case BPF_ALU | BPF_NEG: @@ -797,7 +749,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; bpf_jit_dump(flen, proglen, pass + 1, image); if (image) { - bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; fp->jited = 1; } diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c new file mode 100644 index 0000000000000000000000000000000000000000..21de77419f484b16939d4ce4bf6abbf734ccf325 --- /dev/null +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -0,0 +1,1566 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "bpf_jit_64.h" + +int bpf_jit_enable __read_mostly; + +static inline bool is_simm13(unsigned int value) +{ + return value + 0x1000 < 0x2000; +} + +static inline bool is_simm10(unsigned int value) +{ + return value + 0x200 < 0x400; +} + +static inline bool is_simm5(unsigned int value) +{ + return value + 0x10 < 0x20; +} + +static inline bool is_sethi(unsigned int value) +{ + return (value & ~0x3fffff) == 0; +} + +static void bpf_flush_icache(void *start_, void *end_) +{ + /* Cheetah's I-cache is fully coherent. */ + if (tlb_type == spitfire) { + unsigned long start = (unsigned long) start_; + unsigned long end = (unsigned long) end_; + + start &= ~7UL; + end = (end + 7UL) & ~7UL; + while (start < end) { + flushi(start); + start += 32; + } + } +} + +#define SEEN_DATAREF 1 /* might call external helpers */ +#define SEEN_XREG 2 /* ebx is used */ +#define SEEN_MEM 4 /* use mem[] for temporary storage */ + +#define S13(X) ((X) & 0x1fff) +#define S5(X) ((X) & 0x1f) +#define IMMED 0x00002000 +#define RD(X) ((X) << 25) +#define RS1(X) ((X) << 14) +#define RS2(X) ((X)) +#define OP(X) ((X) << 30) +#define OP2(X) ((X) << 22) +#define OP3(X) ((X) << 19) +#define COND(X) (((X) & 0xf) << 25) +#define CBCOND(X) (((X) & 0x1f) << 25) +#define F1(X) OP(X) +#define F2(X, Y) (OP(X) | OP2(Y)) +#define F3(X, Y) (OP(X) | OP3(Y)) +#define ASI(X) (((X) & 0xff) << 5) + +#define CONDN COND(0x0) +#define CONDE COND(0x1) +#define CONDLE COND(0x2) +#define CONDL COND(0x3) +#define CONDLEU COND(0x4) +#define CONDCS COND(0x5) +#define CONDNEG COND(0x6) +#define CONDVC COND(0x7) +#define CONDA COND(0x8) +#define CONDNE COND(0x9) +#define CONDG COND(0xa) +#define CONDGE COND(0xb) +#define CONDGU COND(0xc) +#define CONDCC COND(0xd) +#define CONDPOS COND(0xe) +#define CONDVS COND(0xf) + +#define CONDGEU CONDCC +#define CONDLU CONDCS + +#define WDISP22(X) (((X) >> 2) & 0x3fffff) +#define WDISP19(X) (((X) >> 2) & 0x7ffff) + +/* The 10-bit branch displacement for CBCOND is split into two fields */ +static u32 WDISP10(u32 off) +{ + u32 ret = ((off >> 2) & 0xff) << 5; + + ret |= ((off >> (2 + 8)) & 0x03) << 19; + + return ret; +} + +#define CBCONDE CBCOND(0x09) +#define CBCONDLE CBCOND(0x0a) +#define CBCONDL CBCOND(0x0b) +#define CBCONDLEU CBCOND(0x0c) +#define CBCONDCS CBCOND(0x0d) +#define CBCONDN CBCOND(0x0e) +#define CBCONDVS CBCOND(0x0f) +#define CBCONDNE CBCOND(0x19) +#define CBCONDG CBCOND(0x1a) +#define CBCONDGE CBCOND(0x1b) +#define CBCONDGU CBCOND(0x1c) +#define CBCONDCC CBCOND(0x1d) +#define CBCONDPOS CBCOND(0x1e) +#define CBCONDVC CBCOND(0x1f) + +#define CBCONDGEU CBCONDCC +#define CBCONDLU CBCONDCS + +#define ANNUL (1 << 29) +#define XCC (1 << 21) + +#define BRANCH (F2(0, 1) | XCC) +#define CBCOND_OP (F2(0, 3) | XCC) + +#define BA (BRANCH | CONDA) +#define BG (BRANCH | CONDG) +#define BGU (BRANCH | CONDGU) +#define BLEU (BRANCH | CONDLEU) +#define BGE (BRANCH | CONDGE) +#define BGEU (BRANCH | CONDGEU) +#define BLU (BRANCH | CONDLU) +#define BE (BRANCH | CONDE) +#define BNE (BRANCH | CONDNE) + +#define SETHI(K, REG) \ + (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) +#define OR_LO(K, REG) \ + (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) + +#define ADD F3(2, 0x00) +#define AND F3(2, 0x01) +#define ANDCC F3(2, 0x11) +#define OR F3(2, 0x02) +#define XOR F3(2, 0x03) +#define SUB F3(2, 0x04) +#define SUBCC F3(2, 0x14) +#define MUL F3(2, 0x0a) +#define MULX F3(2, 0x09) +#define UDIVX F3(2, 0x0d) +#define DIV F3(2, 0x0e) +#define SLL F3(2, 0x25) +#define SLLX (F3(2, 0x25)|(1<<12)) +#define SRA F3(2, 0x27) +#define SRAX (F3(2, 0x27)|(1<<12)) +#define SRL F3(2, 0x26) +#define SRLX (F3(2, 0x26)|(1<<12)) +#define JMPL F3(2, 0x38) +#define SAVE F3(2, 0x3c) +#define RESTORE F3(2, 0x3d) +#define CALL F1(1) +#define BR F2(0, 0x01) +#define RD_Y F3(2, 0x28) +#define WR_Y F3(2, 0x30) + +#define LD32 F3(3, 0x00) +#define LD8 F3(3, 0x01) +#define LD16 F3(3, 0x02) +#define LD64 F3(3, 0x0b) +#define LD64A F3(3, 0x1b) +#define ST8 F3(3, 0x05) +#define ST16 F3(3, 0x06) +#define ST32 F3(3, 0x04) +#define ST64 F3(3, 0x0e) + +#define CAS F3(3, 0x3c) +#define CASX F3(3, 0x3e) + +#define LDPTR LD64 +#define BASE_STACKFRAME 176 + +#define LD32I (LD32 | IMMED) +#define LD8I (LD8 | IMMED) +#define LD16I (LD16 | IMMED) +#define LD64I (LD64 | IMMED) +#define LDPTRI (LDPTR | IMMED) +#define ST32I (ST32 | IMMED) + +struct jit_ctx { + struct bpf_prog *prog; + unsigned int *offset; + int idx; + int epilogue_offset; + bool tmp_1_used; + bool tmp_2_used; + bool tmp_3_used; + bool saw_ld_abs_ind; + bool saw_frame_pointer; + bool saw_call; + bool saw_tail_call; + u32 *image; +}; + +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) +#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 2) +#define SKB_DATA_REG (MAX_BPF_JIT_REG + 3) +#define TMP_REG_3 (MAX_BPF_JIT_REG + 4) + +/* Map BPF registers to SPARC registers */ +static const int bpf2sparc[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = O5, + + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = O0, + [BPF_REG_2] = O1, + [BPF_REG_3] = O2, + [BPF_REG_4] = O3, + [BPF_REG_5] = O4, + + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = L0, + [BPF_REG_7] = L1, + [BPF_REG_8] = L2, + [BPF_REG_9] = L3, + + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = L6, + + [BPF_REG_AX] = G7, + + /* temporary register for internal BPF JIT */ + [TMP_REG_1] = G1, + [TMP_REG_2] = G2, + [TMP_REG_3] = G3, + + [SKB_HLEN_REG] = L4, + [SKB_DATA_REG] = L5, +}; + +static void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = insn; + + ctx->idx++; +} + +static void emit_call(u32 *func, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) { + void *here = &ctx->image[ctx->idx]; + unsigned int off; + + off = (void *)func - here; + ctx->image[ctx->idx] = CALL | ((off >> 2) & 0x3fffffff); + } + ctx->idx++; +} + +static void emit_nop(struct jit_ctx *ctx) +{ + emit(SETHI(0, G0), ctx); +} + +static void emit_reg_move(u32 from, u32 to, struct jit_ctx *ctx) +{ + emit(OR | RS1(G0) | RS2(from) | RD(to), ctx); +} + +/* Emit 32-bit constant, zero extended. */ +static void emit_set_const(s32 K, u32 reg, struct jit_ctx *ctx) +{ + emit(SETHI(K, reg), ctx); + emit(OR_LO(K, reg), ctx); +} + +/* Emit 32-bit constant, sign extended. */ +static void emit_set_const_sext(s32 K, u32 reg, struct jit_ctx *ctx) +{ + if (K >= 0) { + emit(SETHI(K, reg), ctx); + emit(OR_LO(K, reg), ctx); + } else { + u32 hbits = ~(u32) K; + u32 lbits = -0x400 | (u32) K; + + emit(SETHI(hbits, reg), ctx); + emit(XOR | IMMED | RS1(reg) | S13(lbits) | RD(reg), ctx); + } +} + +static void emit_alu(u32 opcode, u32 src, u32 dst, struct jit_ctx *ctx) +{ + emit(opcode | RS1(dst) | RS2(src) | RD(dst), ctx); +} + +static void emit_alu3(u32 opcode, u32 a, u32 b, u32 c, struct jit_ctx *ctx) +{ + emit(opcode | RS1(a) | RS2(b) | RD(c), ctx); +} + +static void emit_alu_K(unsigned int opcode, unsigned int dst, unsigned int imm, + struct jit_ctx *ctx) +{ + bool small_immed = is_simm13(imm); + unsigned int insn = opcode; + + insn |= RS1(dst) | RD(dst); + if (small_immed) { + emit(insn | IMMED | S13(imm), ctx); + } else { + unsigned int tmp = bpf2sparc[TMP_REG_1]; + + ctx->tmp_1_used = true; + + emit_set_const_sext(imm, tmp, ctx); + emit(insn | RS2(tmp), ctx); + } +} + +static void emit_alu3_K(unsigned int opcode, unsigned int src, unsigned int imm, + unsigned int dst, struct jit_ctx *ctx) +{ + bool small_immed = is_simm13(imm); + unsigned int insn = opcode; + + insn |= RS1(src) | RD(dst); + if (small_immed) { + emit(insn | IMMED | S13(imm), ctx); + } else { + unsigned int tmp = bpf2sparc[TMP_REG_1]; + + ctx->tmp_1_used = true; + + emit_set_const_sext(imm, tmp, ctx); + emit(insn | RS2(tmp), ctx); + } +} + +static void emit_loadimm32(s32 K, unsigned int dest, struct jit_ctx *ctx) +{ + if (K >= 0 && is_simm13(K)) { + /* or %g0, K, DEST */ + emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); + } else { + emit_set_const(K, dest, ctx); + } +} + +static void emit_loadimm(s32 K, unsigned int dest, struct jit_ctx *ctx) +{ + if (is_simm13(K)) { + /* or %g0, K, DEST */ + emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); + } else { + emit_set_const(K, dest, ctx); + } +} + +static void emit_loadimm_sext(s32 K, unsigned int dest, struct jit_ctx *ctx) +{ + if (is_simm13(K)) { + /* or %g0, K, DEST */ + emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); + } else { + emit_set_const_sext(K, dest, ctx); + } +} + +static void analyze_64bit_constant(u32 high_bits, u32 low_bits, + int *hbsp, int *lbsp, int *abbasp) +{ + int lowest_bit_set, highest_bit_set, all_bits_between_are_set; + int i; + + lowest_bit_set = highest_bit_set = -1; + i = 0; + do { + if ((lowest_bit_set == -1) && ((low_bits >> i) & 1)) + lowest_bit_set = i; + if ((highest_bit_set == -1) && ((high_bits >> (32 - i - 1)) & 1)) + highest_bit_set = (64 - i - 1); + } while (++i < 32 && (highest_bit_set == -1 || + lowest_bit_set == -1)); + if (i == 32) { + i = 0; + do { + if (lowest_bit_set == -1 && ((high_bits >> i) & 1)) + lowest_bit_set = i + 32; + if (highest_bit_set == -1 && + ((low_bits >> (32 - i - 1)) & 1)) + highest_bit_set = 32 - i - 1; + } while (++i < 32 && (highest_bit_set == -1 || + lowest_bit_set == -1)); + } + + all_bits_between_are_set = 1; + for (i = lowest_bit_set; i <= highest_bit_set; i++) { + if (i < 32) { + if ((low_bits & (1 << i)) != 0) + continue; + } else { + if ((high_bits & (1 << (i - 32))) != 0) + continue; + } + all_bits_between_are_set = 0; + break; + } + *hbsp = highest_bit_set; + *lbsp = lowest_bit_set; + *abbasp = all_bits_between_are_set; +} + +static unsigned long create_simple_focus_bits(unsigned long high_bits, + unsigned long low_bits, + int lowest_bit_set, int shift) +{ + long hi, lo; + + if (lowest_bit_set < 32) { + lo = (low_bits >> lowest_bit_set) << shift; + hi = ((high_bits << (32 - lowest_bit_set)) << shift); + } else { + lo = 0; + hi = ((high_bits >> (lowest_bit_set - 32)) << shift); + } + return hi | lo; +} + +static bool const64_is_2insns(unsigned long high_bits, + unsigned long low_bits) +{ + int highest_bit_set, lowest_bit_set, all_bits_between_are_set; + + if (high_bits == 0 || high_bits == 0xffffffff) + return true; + + analyze_64bit_constant(high_bits, low_bits, + &highest_bit_set, &lowest_bit_set, + &all_bits_between_are_set); + + if ((highest_bit_set == 63 || lowest_bit_set == 0) && + all_bits_between_are_set != 0) + return true; + + if (highest_bit_set - lowest_bit_set < 21) + return true; + + return false; +} + +static void sparc_emit_set_const64_quick2(unsigned long high_bits, + unsigned long low_imm, + unsigned int dest, + int shift_count, struct jit_ctx *ctx) +{ + emit_loadimm32(high_bits, dest, ctx); + + /* Now shift it up into place. */ + emit_alu_K(SLLX, dest, shift_count, ctx); + + /* If there is a low immediate part piece, finish up by + * putting that in as well. + */ + if (low_imm != 0) + emit(OR | IMMED | RS1(dest) | S13(low_imm) | RD(dest), ctx); +} + +static void emit_loadimm64(u64 K, unsigned int dest, struct jit_ctx *ctx) +{ + int all_bits_between_are_set, lowest_bit_set, highest_bit_set; + unsigned int tmp = bpf2sparc[TMP_REG_1]; + u32 low_bits = (K & 0xffffffff); + u32 high_bits = (K >> 32); + + /* These two tests also take care of all of the one + * instruction cases. + */ + if (high_bits == 0xffffffff && (low_bits & 0x80000000)) + return emit_loadimm_sext(K, dest, ctx); + if (high_bits == 0x00000000) + return emit_loadimm32(K, dest, ctx); + + analyze_64bit_constant(high_bits, low_bits, &highest_bit_set, + &lowest_bit_set, &all_bits_between_are_set); + + /* 1) mov -1, %reg + * sllx %reg, shift, %reg + * 2) mov -1, %reg + * srlx %reg, shift, %reg + * 3) mov some_small_const, %reg + * sllx %reg, shift, %reg + */ + if (((highest_bit_set == 63 || lowest_bit_set == 0) && + all_bits_between_are_set != 0) || + ((highest_bit_set - lowest_bit_set) < 12)) { + int shift = lowest_bit_set; + long the_const = -1; + + if ((highest_bit_set != 63 && lowest_bit_set != 0) || + all_bits_between_are_set == 0) { + the_const = + create_simple_focus_bits(high_bits, low_bits, + lowest_bit_set, 0); + } else if (lowest_bit_set == 0) + shift = -(63 - highest_bit_set); + + emit(OR | IMMED | RS1(G0) | S13(the_const) | RD(dest), ctx); + if (shift > 0) + emit_alu_K(SLLX, dest, shift, ctx); + else if (shift < 0) + emit_alu_K(SRLX, dest, -shift, ctx); + + return; + } + + /* Now a range of 22 or less bits set somewhere. + * 1) sethi %hi(focus_bits), %reg + * sllx %reg, shift, %reg + * 2) sethi %hi(focus_bits), %reg + * srlx %reg, shift, %reg + */ + if ((highest_bit_set - lowest_bit_set) < 21) { + unsigned long focus_bits = + create_simple_focus_bits(high_bits, low_bits, + lowest_bit_set, 10); + + emit(SETHI(focus_bits, dest), ctx); + + /* If lowest_bit_set == 10 then a sethi alone could + * have done it. + */ + if (lowest_bit_set < 10) + emit_alu_K(SRLX, dest, 10 - lowest_bit_set, ctx); + else if (lowest_bit_set > 10) + emit_alu_K(SLLX, dest, lowest_bit_set - 10, ctx); + return; + } + + /* Ok, now 3 instruction sequences. */ + if (low_bits == 0) { + emit_loadimm32(high_bits, dest, ctx); + emit_alu_K(SLLX, dest, 32, ctx); + return; + } + + /* We may be able to do something quick + * when the constant is negated, so try that. + */ + if (const64_is_2insns((~high_bits) & 0xffffffff, + (~low_bits) & 0xfffffc00)) { + /* NOTE: The trailing bits get XOR'd so we need the + * non-negated bits, not the negated ones. + */ + unsigned long trailing_bits = low_bits & 0x3ff; + + if ((((~high_bits) & 0xffffffff) == 0 && + ((~low_bits) & 0x80000000) == 0) || + (((~high_bits) & 0xffffffff) == 0xffffffff && + ((~low_bits) & 0x80000000) != 0)) { + unsigned long fast_int = (~low_bits & 0xffffffff); + + if ((is_sethi(fast_int) && + (~high_bits & 0xffffffff) == 0)) { + emit(SETHI(fast_int, dest), ctx); + } else if (is_simm13(fast_int)) { + emit(OR | IMMED | RS1(G0) | S13(fast_int) | RD(dest), ctx); + } else { + emit_loadimm64(fast_int, dest, ctx); + } + } else { + u64 n = ((~low_bits) & 0xfffffc00) | + (((unsigned long)((~high_bits) & 0xffffffff))<<32); + emit_loadimm64(n, dest, ctx); + } + + low_bits = -0x400 | trailing_bits; + + emit(XOR | IMMED | RS1(dest) | S13(low_bits) | RD(dest), ctx); + return; + } + + /* 1) sethi %hi(xxx), %reg + * or %reg, %lo(xxx), %reg + * sllx %reg, yyy, %reg + */ + if ((highest_bit_set - lowest_bit_set) < 32) { + unsigned long focus_bits = + create_simple_focus_bits(high_bits, low_bits, + lowest_bit_set, 0); + + /* So what we know is that the set bits straddle the + * middle of the 64-bit word. + */ + sparc_emit_set_const64_quick2(focus_bits, 0, dest, + lowest_bit_set, ctx); + return; + } + + /* 1) sethi %hi(high_bits), %reg + * or %reg, %lo(high_bits), %reg + * sllx %reg, 32, %reg + * or %reg, low_bits, %reg + */ + if (is_simm13(low_bits) && ((int)low_bits > 0)) { + sparc_emit_set_const64_quick2(high_bits, low_bits, + dest, 32, ctx); + return; + } + + /* Oh well, we tried... Do a full 64-bit decomposition. */ + ctx->tmp_1_used = true; + + emit_loadimm32(high_bits, tmp, ctx); + emit_loadimm32(low_bits, dest, ctx); + emit_alu_K(SLLX, tmp, 32, ctx); + emit(OR | RS1(dest) | RS2(tmp) | RD(dest), ctx); +} + +static void emit_branch(unsigned int br_opc, unsigned int from_idx, unsigned int to_idx, + struct jit_ctx *ctx) +{ + unsigned int off = to_idx - from_idx; + + if (br_opc & XCC) + emit(br_opc | WDISP19(off << 2), ctx); + else + emit(br_opc | WDISP22(off << 2), ctx); +} + +static void emit_cbcond(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx, + const u8 dst, const u8 src, struct jit_ctx *ctx) +{ + unsigned int off = to_idx - from_idx; + + emit(cb_opc | WDISP10(off << 2) | RS1(dst) | RS2(src), ctx); +} + +static void emit_cbcondi(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx, + const u8 dst, s32 imm, struct jit_ctx *ctx) +{ + unsigned int off = to_idx - from_idx; + + emit(cb_opc | IMMED | WDISP10(off << 2) | RS1(dst) | S5(imm), ctx); +} + +#define emit_read_y(REG, CTX) emit(RD_Y | RD(REG), CTX) +#define emit_write_y(REG, CTX) emit(WR_Y | IMMED | RS1(REG) | S13(0), CTX) + +#define emit_cmp(R1, R2, CTX) \ + emit(SUBCC | RS1(R1) | RS2(R2) | RD(G0), CTX) + +#define emit_cmpi(R1, IMM, CTX) \ + emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX) + +#define emit_btst(R1, R2, CTX) \ + emit(ANDCC | RS1(R1) | RS2(R2) | RD(G0), CTX) + +#define emit_btsti(R1, IMM, CTX) \ + emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX) + +static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, + const s32 imm, bool is_imm, int branch_dst, + struct jit_ctx *ctx) +{ + bool use_cbcond = (sparc64_elf_hwcap & AV_SPARC_CBCOND) != 0; + const u8 tmp = bpf2sparc[TMP_REG_1]; + + branch_dst = ctx->offset[branch_dst]; + + if (!is_simm10(branch_dst - ctx->idx) || + BPF_OP(code) == BPF_JSET) + use_cbcond = false; + + if (is_imm) { + bool fits = true; + + if (use_cbcond) { + if (!is_simm5(imm)) + fits = false; + } else if (!is_simm13(imm)) { + fits = false; + } + if (!fits) { + ctx->tmp_1_used = true; + emit_loadimm_sext(imm, tmp, ctx); + src = tmp; + is_imm = false; + } + } + + if (!use_cbcond) { + u32 br_opcode; + + if (BPF_OP(code) == BPF_JSET) { + if (is_imm) + emit_btsti(dst, imm, ctx); + else + emit_btst(dst, src, ctx); + } else { + if (is_imm) + emit_cmpi(dst, imm, ctx); + else + emit_cmp(dst, src, ctx); + } + switch (BPF_OP(code)) { + case BPF_JEQ: + br_opcode = BE; + break; + case BPF_JGT: + br_opcode = BGU; + break; + case BPF_JGE: + br_opcode = BGEU; + break; + case BPF_JSET: + case BPF_JNE: + br_opcode = BNE; + break; + case BPF_JSGT: + br_opcode = BG; + break; + case BPF_JSGE: + br_opcode = BGE; + break; + default: + /* Make sure we dont leak kernel information to the + * user. + */ + return -EFAULT; + } + emit_branch(br_opcode, ctx->idx, branch_dst, ctx); + emit_nop(ctx); + } else { + u32 cbcond_opcode; + + switch (BPF_OP(code)) { + case BPF_JEQ: + cbcond_opcode = CBCONDE; + break; + case BPF_JGT: + cbcond_opcode = CBCONDGU; + break; + case BPF_JGE: + cbcond_opcode = CBCONDGEU; + break; + case BPF_JNE: + cbcond_opcode = CBCONDNE; + break; + case BPF_JSGT: + cbcond_opcode = CBCONDG; + break; + case BPF_JSGE: + cbcond_opcode = CBCONDGE; + break; + default: + /* Make sure we dont leak kernel information to the + * user. + */ + return -EFAULT; + } + cbcond_opcode |= CBCOND_OP; + if (is_imm) + emit_cbcondi(cbcond_opcode, ctx->idx, branch_dst, + dst, imm, ctx); + else + emit_cbcond(cbcond_opcode, ctx->idx, branch_dst, + dst, src, ctx); + } + return 0; +} + +static void load_skb_regs(struct jit_ctx *ctx, u8 r_skb) +{ + const u8 r_headlen = bpf2sparc[SKB_HLEN_REG]; + const u8 r_data = bpf2sparc[SKB_DATA_REG]; + const u8 r_tmp = bpf2sparc[TMP_REG_1]; + unsigned int off; + + off = offsetof(struct sk_buff, len); + emit(LD32I | RS1(r_skb) | S13(off) | RD(r_headlen), ctx); + + off = offsetof(struct sk_buff, data_len); + emit(LD32I | RS1(r_skb) | S13(off) | RD(r_tmp), ctx); + + emit(SUB | RS1(r_headlen) | RS2(r_tmp) | RD(r_headlen), ctx); + + off = offsetof(struct sk_buff, data); + emit(LDPTRI | RS1(r_skb) | S13(off) | RD(r_data), ctx); +} + +/* Just skip the save instruction and the ctx register move. */ +#define BPF_TAILCALL_PROLOGUE_SKIP 16 +#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) + +static void build_prologue(struct jit_ctx *ctx) +{ + s32 stack_needed = BASE_STACKFRAME; + + if (ctx->saw_frame_pointer || ctx->saw_tail_call) + stack_needed += MAX_BPF_STACK; + + if (ctx->saw_tail_call) + stack_needed += 8; + + /* save %sp, -176, %sp */ + emit(SAVE | IMMED | RS1(SP) | S13(-stack_needed) | RD(SP), ctx); + + /* tail_call_cnt = 0 */ + if (ctx->saw_tail_call) { + u32 off = BPF_TAILCALL_CNT_SP_OFF; + + emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(G0), ctx); + } else { + emit_nop(ctx); + } + if (ctx->saw_frame_pointer) { + const u8 vfp = bpf2sparc[BPF_REG_FP]; + + emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); + } + + emit_reg_move(I0, O0, ctx); + /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ + + if (ctx->saw_ld_abs_ind) + load_skb_regs(ctx, bpf2sparc[BPF_REG_1]); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + ctx->epilogue_offset = ctx->idx; + + /* ret (jmpl %i7 + 8, %g0) */ + emit(JMPL | IMMED | RS1(I7) | S13(8) | RD(G0), ctx); + + /* restore %i5, %g0, %o0 */ + emit(RESTORE | RS1(bpf2sparc[BPF_REG_0]) | RS2(G0) | RD(O0), ctx); +} + +static void emit_tail_call(struct jit_ctx *ctx) +{ + const u8 bpf_array = bpf2sparc[BPF_REG_2]; + const u8 bpf_index = bpf2sparc[BPF_REG_3]; + const u8 tmp = bpf2sparc[TMP_REG_1]; + u32 off; + + ctx->saw_tail_call = true; + + off = offsetof(struct bpf_array, map.max_entries); + emit(LD32 | IMMED | RS1(bpf_array) | S13(off) | RD(tmp), ctx); + emit_cmp(bpf_index, tmp, ctx); +#define OFFSET1 17 + emit_branch(BGEU, ctx->idx, ctx->idx + OFFSET1, ctx); + emit_nop(ctx); + + off = BPF_TAILCALL_CNT_SP_OFF; + emit(LD32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); + emit_cmpi(tmp, MAX_TAIL_CALL_CNT, ctx); +#define OFFSET2 13 + emit_branch(BGU, ctx->idx, ctx->idx + OFFSET2, ctx); + emit_nop(ctx); + + emit_alu_K(ADD, tmp, 1, ctx); + off = BPF_TAILCALL_CNT_SP_OFF; + emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); + + emit_alu3_K(SLL, bpf_index, 3, tmp, ctx); + emit_alu(ADD, bpf_array, tmp, ctx); + off = offsetof(struct bpf_array, ptrs); + emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx); + + emit_cmpi(tmp, 0, ctx); +#define OFFSET3 5 + emit_branch(BE, ctx->idx, ctx->idx + OFFSET3, ctx); + emit_nop(ctx); + + off = offsetof(struct bpf_prog, bpf_func); + emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx); + + off = BPF_TAILCALL_PROLOGUE_SKIP; + emit(JMPL | IMMED | RS1(tmp) | S13(off) | RD(G0), ctx); + emit_nop(ctx); +} + +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + const u8 dst = bpf2sparc[insn->dst_reg]; + const u8 src = bpf2sparc[insn->src_reg]; + const int i = insn - ctx->prog->insnsi; + const s16 off = insn->off; + const s32 imm = insn->imm; + u32 *func; + + if (insn->src_reg == BPF_REG_FP) + ctx->saw_frame_pointer = true; + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + emit_alu3_K(SRL, src, 0, dst, ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: + emit_reg_move(src, dst, ctx); + break; + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit_alu(ADD, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit_alu(SUB, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit_alu(AND, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit_alu(OR, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit_alu(XOR, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_MUL | BPF_X: + emit_alu(MUL, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_MUL | BPF_X: + emit_alu(MULX, src, dst, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + emit_cmp(src, G0, ctx); + emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx); + emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx); + + emit_write_y(G0, ctx); + emit_alu(DIV, src, dst, ctx); + break; + + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_cmp(src, G0, ctx); + emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx); + emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx); + + emit_alu(UDIVX, src, dst, ctx); + break; + + case BPF_ALU | BPF_MOD | BPF_X: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + + ctx->tmp_1_used = true; + + emit_cmp(src, G0, ctx); + emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx); + emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx); + + emit_write_y(G0, ctx); + emit_alu3(DIV, dst, src, tmp, ctx); + emit_alu3(MULX, tmp, src, tmp, ctx); + emit_alu3(SUB, dst, tmp, dst, ctx); + goto do_alu32_trunc; + } + case BPF_ALU64 | BPF_MOD | BPF_X: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + + ctx->tmp_1_used = true; + + emit_cmp(src, G0, ctx); + emit_branch(BE|ANNUL, ctx->idx, ctx->epilogue_offset, ctx); + emit_loadimm(0, bpf2sparc[BPF_REG_0], ctx); + + emit_alu3(UDIVX, dst, src, tmp, ctx); + emit_alu3(MULX, tmp, src, tmp, ctx); + emit_alu3(SUB, dst, tmp, dst, ctx); + break; + } + case BPF_ALU | BPF_LSH | BPF_X: + emit_alu(SLL, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_alu(SLLX, src, dst, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + emit_alu(SRL, src, dst, ctx); + break; + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_alu(SRLX, src, dst, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + emit_alu(SRA, src, dst, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_alu(SRAX, src, dst, ctx); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + emit(SUB | RS1(0) | RS2(dst) | RD(dst), ctx); + goto do_alu32_trunc; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + emit_alu_K(SLL, dst, 16, ctx); + emit_alu_K(SRL, dst, 16, ctx); + break; + case 32: + emit_alu_K(SRL, dst, 0, ctx); + break; + case 64: + /* nop */ + break; + + } + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + const u8 tmp2 = bpf2sparc[TMP_REG_2]; + + ctx->tmp_1_used = true; + switch (imm) { + case 16: + emit_alu3_K(AND, dst, 0xff, tmp, ctx); + emit_alu3_K(SRL, dst, 8, dst, ctx); + emit_alu3_K(AND, dst, 0xff, dst, ctx); + emit_alu3_K(SLL, tmp, 8, tmp, ctx); + emit_alu(OR, tmp, dst, ctx); + break; + + case 32: + ctx->tmp_2_used = true; + emit_alu3_K(SRL, dst, 24, tmp, ctx); /* tmp = dst >> 24 */ + emit_alu3_K(SRL, dst, 16, tmp2, ctx); /* tmp2 = dst >> 16 */ + emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */ + emit_alu3_K(SLL, tmp2, 8, tmp2, ctx); /* tmp2 = tmp2 << 8 */ + emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */ + emit_alu3_K(SRL, dst, 8, tmp2, ctx); /* tmp2 = dst >> 8 */ + emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */ + emit_alu3_K(SLL, tmp2, 16, tmp2, ctx); /* tmp2 = tmp2 << 16 */ + emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */ + emit_alu3_K(AND, dst, 0xff, dst, ctx); /* dst = dst & 0xff */ + emit_alu3_K(SLL, dst, 24, dst, ctx); /* dst = dst << 24 */ + emit_alu(OR, tmp, dst, ctx); /* dst = dst | tmp */ + break; + + case 64: + emit_alu3_K(ADD, SP, STACK_BIAS + 128, tmp, ctx); + emit(ST64 | RS1(tmp) | RS2(G0) | RD(dst), ctx); + emit(LD64A | ASI(ASI_PL) | RS1(tmp) | RS2(G0) | RD(dst), ctx); + break; + } + break; + } + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + emit_loadimm32(imm, dst, ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_loadimm_sext(imm, dst, ctx); + break; + /* dst = dst OP imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + emit_alu_K(ADD, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + emit_alu_K(SUB, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + emit_alu_K(AND, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + emit_alu_K(OR, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + emit_alu_K(XOR, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU | BPF_MUL | BPF_K: + emit_alu_K(MUL, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_MUL | BPF_K: + emit_alu_K(MULX, dst, imm, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + if (imm == 0) + return -EINVAL; + + emit_write_y(G0, ctx); + emit_alu_K(DIV, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_DIV | BPF_K: + if (imm == 0) + return -EINVAL; + + emit_alu_K(UDIVX, dst, imm, ctx); + break; + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: { + const u8 tmp = bpf2sparc[TMP_REG_2]; + unsigned int div; + + if (imm == 0) + return -EINVAL; + + div = (BPF_CLASS(code) == BPF_ALU64) ? UDIVX : DIV; + + ctx->tmp_2_used = true; + + if (BPF_CLASS(code) != BPF_ALU64) + emit_write_y(G0, ctx); + if (is_simm13(imm)) { + emit(div | IMMED | RS1(dst) | S13(imm) | RD(tmp), ctx); + emit(MULX | IMMED | RS1(tmp) | S13(imm) | RD(tmp), ctx); + emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx); + } else { + const u8 tmp1 = bpf2sparc[TMP_REG_1]; + + ctx->tmp_1_used = true; + + emit_set_const_sext(imm, tmp1, ctx); + emit(div | RS1(dst) | RS2(tmp1) | RD(tmp), ctx); + emit(MULX | RS1(tmp) | RS2(tmp1) | RD(tmp), ctx); + emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx); + } + goto do_alu32_trunc; + } + case BPF_ALU | BPF_LSH | BPF_K: + emit_alu_K(SLL, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_LSH | BPF_K: + emit_alu_K(SLLX, dst, imm, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + emit_alu_K(SRL, dst, imm, ctx); + break; + case BPF_ALU64 | BPF_RSH | BPF_K: + emit_alu_K(SRLX, dst, imm, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + emit_alu_K(SRA, dst, imm, ctx); + goto do_alu32_trunc; + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit_alu_K(SRAX, dst, imm, ctx); + break; + + do_alu32_trunc: + if (BPF_CLASS(code) == BPF_ALU) + emit_alu_K(SRL, dst, 0, ctx); + break; + + /* JUMP off */ + case BPF_JMP | BPF_JA: + emit_branch(BA, ctx->idx, ctx->offset[i + off], ctx); + emit_nop(ctx); + break; + /* IF (dst COND src) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: { + int err; + + err = emit_compare_and_branch(code, dst, src, 0, false, i + off, ctx); + if (err) + return err; + break; + } + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: { + int err; + + err = emit_compare_and_branch(code, dst, 0, imm, true, i + off, ctx); + if (err) + return err; + break; + } + + /* function call */ + case BPF_JMP | BPF_CALL: + { + u8 *func = ((u8 *)__bpf_call_base) + imm; + + ctx->saw_call = true; + + emit_call((u32 *)func, ctx); + emit_nop(ctx); + + emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); + + if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) + load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); + break; + } + + /* tail call */ + case BPF_JMP | BPF_CALL |BPF_X: + emit_tail_call(ctx); + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + /* Optimization: when last instruction is EXIT, + simply fallthrough to epilogue. */ + if (i == ctx->prog->len - 1) + break; + emit_branch(BA, ctx->idx, ctx->epilogue_offset, ctx); + emit_nop(ctx); + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + const struct bpf_insn insn1 = insn[1]; + u64 imm64; + + imm64 = (u64)insn1.imm << 32 | (u32)imm; + emit_loadimm64(imm64, dst, ctx); + + return 1; + } + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + u32 opcode = 0, rs2; + + ctx->tmp_1_used = true; + switch (BPF_SIZE(code)) { + case BPF_W: + opcode = LD32; + break; + case BPF_H: + opcode = LD16; + break; + case BPF_B: + opcode = LD8; + break; + case BPF_DW: + opcode = LD64; + break; + } + + if (is_simm13(off)) { + opcode |= IMMED; + rs2 = S13(off); + } else { + emit_loadimm(off, tmp, ctx); + rs2 = RS2(tmp); + } + emit(opcode | RS1(src) | rs2 | RD(dst), ctx); + break; + } + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + const u8 tmp2 = bpf2sparc[TMP_REG_2]; + u32 opcode = 0, rs2; + + ctx->tmp_2_used = true; + emit_loadimm(imm, tmp2, ctx); + + switch (BPF_SIZE(code)) { + case BPF_W: + opcode = ST32; + break; + case BPF_H: + opcode = ST16; + break; + case BPF_B: + opcode = ST8; + break; + case BPF_DW: + opcode = ST64; + break; + } + + if (is_simm13(off)) { + opcode |= IMMED; + rs2 = S13(off); + } else { + ctx->tmp_1_used = true; + emit_loadimm(off, tmp, ctx); + rs2 = RS2(tmp); + } + emit(opcode | RS1(dst) | rs2 | RD(tmp2), ctx); + break; + } + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + u32 opcode = 0, rs2; + + switch (BPF_SIZE(code)) { + case BPF_W: + opcode = ST32; + break; + case BPF_H: + opcode = ST16; + break; + case BPF_B: + opcode = ST8; + break; + case BPF_DW: + opcode = ST64; + break; + } + if (is_simm13(off)) { + opcode |= IMMED; + rs2 = S13(off); + } else { + ctx->tmp_1_used = true; + emit_loadimm(off, tmp, ctx); + rs2 = RS2(tmp); + } + emit(opcode | RS1(dst) | rs2 | RD(src), ctx); + break; + } + + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + const u8 tmp2 = bpf2sparc[TMP_REG_2]; + const u8 tmp3 = bpf2sparc[TMP_REG_3]; + + ctx->tmp_1_used = true; + ctx->tmp_2_used = true; + ctx->tmp_3_used = true; + emit_loadimm(off, tmp, ctx); + emit_alu3(ADD, dst, tmp, tmp, ctx); + + emit(LD32 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx); + emit_alu3(ADD, tmp2, src, tmp3, ctx); + emit(CAS | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx); + emit_cmp(tmp2, tmp3, ctx); + emit_branch(BNE, 4, 0, ctx); + emit_nop(ctx); + break; + } + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: { + const u8 tmp = bpf2sparc[TMP_REG_1]; + const u8 tmp2 = bpf2sparc[TMP_REG_2]; + const u8 tmp3 = bpf2sparc[TMP_REG_3]; + + ctx->tmp_1_used = true; + ctx->tmp_2_used = true; + ctx->tmp_3_used = true; + emit_loadimm(off, tmp, ctx); + emit_alu3(ADD, dst, tmp, tmp, ctx); + + emit(LD64 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx); + emit_alu3(ADD, tmp2, src, tmp3, ctx); + emit(CASX | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx); + emit_cmp(tmp2, tmp3, ctx); + emit_branch(BNE, 4, 0, ctx); + emit_nop(ctx); + break; + } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ + case BPF_LD | BPF_ABS | BPF_W: + func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_word); + goto common_load; + case BPF_LD | BPF_ABS | BPF_H: + func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_half); + goto common_load; + case BPF_LD | BPF_ABS | BPF_B: + func = CHOOSE_LOAD_FUNC(imm, bpf_jit_load_byte); + goto common_load; + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ + case BPF_LD | BPF_IND | BPF_W: + func = bpf_jit_load_word; + goto common_load; + case BPF_LD | BPF_IND | BPF_H: + func = bpf_jit_load_half; + goto common_load; + + case BPF_LD | BPF_IND | BPF_B: + func = bpf_jit_load_byte; + common_load: + ctx->saw_ld_abs_ind = true; + + emit_reg_move(bpf2sparc[BPF_REG_6], O0, ctx); + emit_loadimm(imm, O1, ctx); + + if (BPF_MODE(code) == BPF_IND) + emit_alu(ADD, src, O1, ctx); + + emit_call(func, ctx); + emit_alu_K(SRA, O1, 0, ctx); + + emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); + break; + + default: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + ret = build_insn(insn, ctx); + + if (ret > 0) { + i++; + ctx->offset[i] = ctx->idx; + continue; + } + ctx->offset[i] = ctx->idx; + if (ret) + return ret; + } + return 0; +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *ptr; + /* We are guaranteed to have aligned memory. */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = 0x91d02005; /* ta 5 */ +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + bool tmp_blinded = false; + struct jit_ctx ctx; + u32 image_size; + u8 *image_ptr; + int pass; + + if (!bpf_jit_enable) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); + if (ctx.offset == NULL) { + prog = orig_prog; + goto out; + } + + /* Fake pass to detect features used, and get an accurate assessment + * of what the final image size will be. + */ + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } + build_prologue(&ctx); + build_epilogue(&ctx); + + /* Now we know the actual image size. */ + image_size = sizeof(u32) * ctx.idx; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + ctx.image = (u32 *)image_ptr; + + for (pass = 1; pass < 3; pass++) { + ctx.idx = 0; + + build_prologue(&ctx); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + if (bpf_jit_enable > 1) + pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c%c]\n", pass, + image_size - (ctx.idx * 4), + ctx.tmp_1_used ? '1' : ' ', + ctx.tmp_2_used ? '2' : ' ', + ctx.tmp_3_used ? '3' : ' ', + ctx.saw_ld_abs_ind ? 'L' : ' ', + ctx.saw_frame_pointer ? 'F' : ' ', + ctx.saw_call ? 'C' : ' ', + ctx.saw_tail_call ? 'T' : ' '); + } + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, pass, ctx.image); + + bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE)); + + bpf_jit_binary_lock_ro(header); + + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + +out_off: + kfree(ctx.offset); +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 32322ce9b405450c1187a8e9403658617293e39e..14f840df1d9505bd43859537b0d88d66206a1608 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -490,13 +490,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; case BPF_LD | BPF_IMM | BPF_DW: - if (insn[1].code != 0 || insn[1].src_reg != 0 || - insn[1].dst_reg != 0 || insn[1].off != 0) { - /* verifier must catch invalid insns */ - pr_err("invalid BPF_LD_IMM64 insn\n"); - return -EINVAL; - } - /* optimization: if imm64 is zero, use 'xor ,' * to save 7 bytes. */ diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 9fdbe1fe0473802caaf04782f9a5c05ca813f013..1eb6d2fe70d3483aa191a3f7cf4d3ad3dfe497a9 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -103,4 +103,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* _XTENSA_SOCKET_H */ diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 89acaab1d90951c3b2df9444bf122fc419bdd21f..0dbe2be7f78316b2359f11be09ac24e528f3b8de 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -483,7 +483,8 @@ static const struct crypto_link { [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng }, }; -static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct nlattr *attrs[CRYPTOCFGA_MAX+1]; const struct crypto_link *link; @@ -522,7 +523,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, - crypto_policy); + crypto_policy, extack); if (err < 0) return err; diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 4a610795b585fd41765676529eaaccc0685cda93..906705e5f7763c5ac91f1f00a140f49c54ee7c2a 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2267,9 +2267,8 @@ static int amb_probe(struct pci_dev *pci_dev, dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS; dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS; - init_timer(&dev->housekeeping); - dev->housekeeping.function = do_housekeeping; - dev->housekeeping.data = (unsigned long) dev; + setup_timer(&dev->housekeeping, do_housekeeping, + (unsigned long)dev); mod_timer(&dev->housekeeping, jiffies); // enable host interrupts diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 771a2a253440f736711bd996044d78b6ce3b723a..7bde8d7a2816b5cb42e7d3104c26a960ad32f8b9 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -185,8 +185,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->owner = THIS_MODULE; chip->parent = bcma_bus_get_host_dev(bus); #if IS_BUILTIN(CONFIG_OF) - if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) - chip->of_node = cc->core->dev.of_node; + chip->of_node = cc->core->dev.of_node; #endif switch (bus->chipinfo.id) { case BCMA_CHIP_ID_BCM4707: diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 12da68ec48baa751d78899e89ac7da2e60f197f7..e6986c7608f1e836eb8c61bfc3b756ee6d150e80 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -201,9 +201,6 @@ static void bcma_of_fill_device(struct device *parent, { struct device_node *node; - if (!IS_ENABLED(CONFIG_OF_IRQ)) - return; - node = bcma_of_find_child_device(parent, core); if (node) core->dev.of_node = node; @@ -242,19 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); + core->dev.parent = bcma_bus_get_host_dev(bus); + if (core->dev.parent) + bcma_of_fill_device(core->dev.parent, core); switch (bus->hosttype) { case BCMA_HOSTTYPE_PCI: - core->dev.parent = &bus->host_pci->dev; core->dma_dev = &bus->host_pci->dev; core->irq = bus->host_pci->irq; break; case BCMA_HOSTTYPE_SOC: if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) { core->dma_dev = &bus->host_pdev->dev; - core->dev.parent = &bus->host_pdev->dev; - if (core->dev.parent) - bcma_of_fill_device(core->dev.parent, core); } else { core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c index b2d4791498a660618aa6f603db35fd0c0b5a94e7..6bf806df60dc6008e6754b74dec4ff9a021ce8ae 100644 --- a/drivers/block/drbd/drbd_nla.c +++ b/drivers/block/drbd/drbd_nla.c @@ -34,7 +34,7 @@ int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, err = drbd_nla_check_mandatory(maxtype, nla); if (!err) - err = nla_parse_nested(tb, maxtype, nla, policy); + err = nla_parse_nested(tb, maxtype, nla, policy, NULL); return err; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 56efb0444b4d6edf71f59ea2447de861635c20cf..9b482baa869ebd480027f1166b07ae0491d7551f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1662,7 +1662,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) goto out; } ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, - nbd_sock_policy); + nbd_sock_policy, info->extack); if (ret != 0) { printk(KERN_ERR "nbd: error processing sock list\n"); ret = -EINVAL; @@ -1818,7 +1818,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) goto out; } ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, - nbd_sock_policy); + nbd_sock_policy, info->extack); if (ret != 0) { printk(KERN_ERR "nbd: error processing sock list\n"); ret = -EINVAL; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 08e054507d0bcd7fc59f5d14d0927ba6ca35291f..737d93ef27c5da8b36552bd09409467c3fb38660 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -76,6 +76,12 @@ config BT_HCIUART Say Y here to compile support for Bluetooth UART devices into the kernel or say M to compile it as module (hci_uart). +config BT_HCIUART_SERDEV + bool + depends on SERIAL_DEV_BUS && BT_HCIUART + depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART + default y + config BT_HCIUART_H4 bool "UART (H4) protocol support" depends on BT_HCIUART @@ -86,6 +92,18 @@ config BT_HCIUART_H4 Say Y here to compile support for HCI UART (H4) protocol. +config BT_HCIUART_NOKIA + tristate "UART Nokia H4+ protocol support" + depends on BT_HCIUART + depends on BT_HCIUART_SERDEV + depends on PM + help + Nokia H4+ is serial protocol for communication between Bluetooth + device and host. This protocol is required for Bluetooth devices + with UART interface in Nokia devices. + + Say Y here to compile support for Nokia's H4+ protocol. + config BT_HCIUART_BCSP bool "BCSP protocol support" depends on BT_HCIUART @@ -344,7 +362,7 @@ config BT_WILINK config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG || (COMPILE_TEST && RPMSG=n) depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) select BT_QCA help diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 80627187c8b68253c6ba4d101c5ca8c2f43dcfd9..e693ca6eeed9267d53f3eae0386bf90643d25cb3 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -25,10 +25,13 @@ obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_RTL) += btrtl.o obj-$(CONFIG_BT_QCA) += btqca.o +obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o + btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o hci_uart-y := hci_ldisc.o +hci_uart-$(CONFIG_BT_HCIUART_SERDEV) += hci_serdev.o hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index c0b3b55769921da4d8bd9c52c12924cfa06908cc..007c0a45f31baa18b9da1b3a51e9b63536e1eba5 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -695,9 +695,8 @@ static int bluecard_open(struct bluecard_info *info) spin_lock_init(&(info->lock)); - init_timer(&(info->timer)); - info->timer.function = &bluecard_activity_led_timeout; - info->timer.data = (u_long)info; + setup_timer(&(info->timer), &bluecard_activity_led_timeout, + (u_long)info); skb_queue_head_init(&(info->txq)); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 08e01f002bad215c7dc5929ea1a2cf7e42f4a820..eb794f08b2385bfcb2162531cee4fbaa62c818fd 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -60,13 +61,15 @@ static const struct of_device_id btmrvl_sdio_of_match_table[] = { static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv) { - struct btmrvl_plt_wake_cfg *cfg = priv; + struct btmrvl_sdio_card *card = priv; + struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg; - if (cfg->irq_bt >= 0) { - pr_info("%s: wake by bt", __func__); - cfg->wake_by_bt = true; - disable_irq_nosync(irq); - } + pr_info("%s: wake by bt", __func__); + cfg->wake_by_bt = true; + disable_irq_nosync(irq); + + pm_wakeup_event(&card->func->dev, 0); + pm_system_wakeup(); return IRQ_HANDLED; } @@ -101,7 +104,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, } else { ret = devm_request_irq(dev, cfg->irq_bt, btmrvl_wake_irq_bt, - 0, "bt_wake", cfg); + 0, "bt_wake", card); if (ret) { dev_err(dev, "Failed to request irq_bt %d (%d)\n", @@ -1574,7 +1577,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func) MODULE_SHUTDOWN_REQ); btmrvl_sdio_disable_host_int(card); } - BT_DBG("unregester dev"); + BT_DBG("unregister dev"); card->priv->surprise_removed = true; btmrvl_sdio_unregister_dev(card); btmrvl_remove_card(card->priv); @@ -1625,6 +1628,13 @@ static int btmrvl_sdio_suspend(struct device *dev) if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { BT_ERR("HS not activated, suspend failed!"); + /* Disable platform specific wakeup interrupt */ + if (card->plt_wake_cfg && + card->plt_wake_cfg->irq_bt >= 0) { + disable_irq_wake(card->plt_wake_cfg->irq_bt); + disable_irq(card->plt_wake_cfg->irq_bt); + } + priv->adapter->is_suspending = false; return -EBUSY; } @@ -1637,10 +1647,10 @@ static int btmrvl_sdio_suspend(struct device *dev) if (priv->adapter->hs_state == HS_ACTIVATED) { BT_DBG("suspend with MMC_PM_KEEP_POWER"); return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); - } else { - BT_DBG("suspend without MMC_PM_KEEP_POWER"); - return 0; } + + BT_DBG("suspend without MMC_PM_KEEP_POWER"); + return 0; } static int btmrvl_sdio_resume(struct device *dev) diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 8d4868af9bbd88fff13eb8525dec5310654f54de..ef730c173d4b875063726ccd398b7cded2e6f660 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include @@ -26,8 +26,8 @@ struct btqcomsmd { struct hci_dev *hdev; - struct qcom_smd_channel *acl_channel; - struct qcom_smd_channel *cmd_channel; + struct rpmsg_endpoint *acl_channel; + struct rpmsg_endpoint *cmd_channel; }; static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, @@ -48,19 +48,19 @@ static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, return hci_recv_frame(hdev, skb); } -static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel, - const void *data, size_t count) +static int btqcomsmd_acl_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) { - struct btqcomsmd *btq = qcom_smd_get_drvdata(channel); + struct btqcomsmd *btq = priv; btq->hdev->stat.byte_rx += count; return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count); } -static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel, - const void *data, size_t count) +static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) { - struct btqcomsmd *btq = qcom_smd_get_drvdata(channel); + struct btqcomsmd *btq = priv; return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count); } @@ -72,12 +72,12 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: - ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len); + ret = rpmsg_send(btq->acl_channel, skb->data, skb->len); hdev->stat.acl_tx++; hdev->stat.byte_tx += skb->len; break; case HCI_COMMAND_PKT: - ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len); + ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len); hdev->stat.cmd_tx++; break; default: @@ -114,18 +114,15 @@ static int btqcomsmd_probe(struct platform_device *pdev) wcnss = dev_get_drvdata(pdev->dev.parent); btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL", - btqcomsmd_acl_callback); + btqcomsmd_acl_callback, btq); if (IS_ERR(btq->acl_channel)) return PTR_ERR(btq->acl_channel); btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", - btqcomsmd_cmd_callback); + btqcomsmd_cmd_callback, btq); if (IS_ERR(btq->cmd_channel)) return PTR_ERR(btq->cmd_channel); - qcom_smd_set_drvdata(btq->acl_channel, btq); - qcom_smd_set_drvdata(btq->cmd_channel, btq); - hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; @@ -158,6 +155,9 @@ static int btqcomsmd_remove(struct platform_device *pdev) hci_unregister_dev(btq->hdev); hci_free_dev(btq->hdev); + rpmsg_destroy_ept(btq->cmd_channel); + rpmsg_destroy_ept(btq->acl_channel); + return 0; } diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index fc9b25703c67d0ee9bd4acdd335b287781446529..8279094dd713bf8b03d0403e2ebee77313bcde3c 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -275,11 +275,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) BT_INFO("%s: rtl: loading %s", hdev->name, name); ret = request_firmware(&fw, name, &hdev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load %s", hdev->name, name); + if (ret < 0) return ret; - } - ret = fw->size; *buff = kmemdup(fw->data, ret, GFP_KERNEL); @@ -331,6 +328,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, u8 *cfg_buff = NULL; u8 *tbuff; char *cfg_name = NULL; + bool config_needed = false; switch (lmp_subver) { case RTL_ROM_LMP_8723B: @@ -344,6 +342,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, break; case RTL_ROM_LMP_8822B: cfg_name = "rtl_bt/rtl8822b_config.bin"; + config_needed = true; break; default: BT_ERR("%s: rtl: no config according to lmp_subver %04x", @@ -353,8 +352,12 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, if (cfg_name) { cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff); - if (cfg_sz < 0) + if (cfg_sz < 0) { cfg_sz = 0; + if (config_needed) + BT_ERR("Necessary config file %s not found\n", + cfg_name); + } } else cfg_sz = 0; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1c8094ef3f228414bb427c258b9201034e01004d..7fa373b428f8dc081e93271dd95d23a098c243ee 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -262,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, @@ -328,6 +330,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, @@ -2024,13 +2027,18 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) return -EINVAL; } - /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP) - * and 0x0c (WsP) are supported by this firmware loading method. + /* Check for supported iBT hardware variants of this firmware + * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come along. */ - if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) { + switch (ver.hw_variant) { + case 0x0b: /* SfP */ + case 0x0c: /* WsP */ + case 0x12: /* ThP */ + break; + default: BT_ERR("%s: Unsupported Intel hardware variant (%u)", hdev->name, ver.hw_variant); return -EINVAL; @@ -2792,6 +2800,7 @@ static irqreturn_t btusb_oob_wake_handler(int irq, void *priv) struct btusb_data *data = priv; pm_wakeup_event(&data->udev->dev, 0); + pm_system_wakeup(); /* Disable only if not already disabled (keep it balanced) */ if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 5262a2077d7a83c217635e83eb66f10fde82b3d6..f87bfdfee4ff5adab798167929650f14f978931a 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -146,13 +146,13 @@ static bool bcm_device_exists(struct bcm_device *device) static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) { if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) - clk_enable(dev->clk); + clk_prepare_enable(dev->clk); gpiod_set_value(dev->shutdown, powered); gpiod_set_value(dev->device_wakeup, powered); if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled) - clk_disable(dev->clk); + clk_disable_unprepare(dev->clk); dev->clk_enabled = powered; @@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu) hu->priv = bcm; + if (!hu->tty->dev) + goto out; + mutex_lock(&bcm_device_lock); list_for_each(p, &bcm_device_list) { struct bcm_device *dev = list_entry(p, struct bcm_device, list); @@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu) } mutex_unlock(&bcm_device_lock); - +out: return 0; } @@ -697,28 +700,14 @@ static int bcm_resource(struct acpi_resource *ares, void *data) /* Always tell the ACPI core to skip this resource */ return 1; } +#endif /* CONFIG_ACPI */ -static int bcm_acpi_probe(struct bcm_device *dev) +static int bcm_platform_probe(struct bcm_device *dev) { struct platform_device *pdev = dev->pdev; - LIST_HEAD(resources); - const struct dmi_system_id *dmi_id; - const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; - const struct acpi_device_id *id; - int ret; dev->name = dev_name(&pdev->dev); - /* Retrieve GPIO data */ - id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); - if (id) - gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data; - - ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev), - gpio_mapping); - if (ret) - return ret; - dev->clk = devm_clk_get(&pdev->dev, NULL); dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev, @@ -755,6 +744,33 @@ static int bcm_acpi_probe(struct bcm_device *dev) return -EINVAL; } + return 0; +} + +#ifdef CONFIG_ACPI +static int bcm_acpi_probe(struct bcm_device *dev) +{ + struct platform_device *pdev = dev->pdev; + LIST_HEAD(resources); + const struct dmi_system_id *dmi_id; + const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; + const struct acpi_device_id *id; + int ret; + + /* Retrieve GPIO data */ + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id) + gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data; + + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev), + gpio_mapping); + if (ret) + return ret; + + ret = bcm_platform_probe(dev); + if (ret) + return ret; + /* Retrieve UART ACPI info */ ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev), &resources, bcm_resource, dev); @@ -789,7 +805,10 @@ static int bcm_probe(struct platform_device *pdev) dev->pdev = pdev; - ret = bcm_acpi_probe(dev); + if (has_acpi_companion(&pdev->dev)) + ret = bcm_acpi_probe(dev); + else + ret = bcm_platform_probe(dev); if (ret) return ret; diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 635597b6e1681f25e8ed6004c6960369dcae3e1b..82e5a32b87a43646cf49721e0f39a1671d498897 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -171,9 +171,20 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const unsigned char *buffer, int count, const struct h4_recv_pkt *pkts, int pkts_count) { + struct hci_uart *hu = hci_get_drvdata(hdev); + u8 alignment = hu->alignment; + while (count) { int i, len; + /* remove padding bytes from buffer */ + for (; hu->padding && count > 0; hu->padding--) { + count--; + buffer++; + } + if (!count) + break; + if (!skb) { for (i = 0; i < pkts_count; i++) { if (buffer[0] != (&pkts[i])->type) @@ -253,11 +264,17 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, } if (!dlen) { + hu->padding = (skb->len - 1) % alignment; + hu->padding = (alignment - hu->padding) % alignment; + /* No more data, complete frame */ (&pkts[i])->recv(hdev, skb); skb = NULL; } } else { + hu->padding = (skb->len - 1) % alignment; + hu->padding = (alignment - hu->padding) % alignment; + /* Complete frame */ (&pkts[i])->recv(hdev, skb); skb = NULL; diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 9e271286c5e553dbd6112f8a83393c8bf5b5dd43..fa5099986f1b3ca50c14745990a0fe682963b737 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered) struct list_head *p; int err = -ENODEV; + if (!hu->tty->dev) + return err; + mutex_lock(&intel_device_list_lock); list_for_each(p, &intel_device_list) { @@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work) struct intel_data *intel = container_of(work, struct intel_data, busy_work); + if (!intel->hu->tty->dev) + return; + /* Link is busy, delay the suspend */ mutex_lock(&intel_device_list_lock); list_for_each(p, &intel_device_list) { @@ -601,12 +607,18 @@ static int intel_setup(struct hci_uart *hu) return -EINVAL; } - /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is - * supported by this firmware loading method. This check has been - * put in place to ensure correct forward compatibility options - * when newer hardware variants come along. - */ - if (ver.hw_variant != 0x0b) { + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (ver.hw_variant) { + case 0x0b: /* LnP */ + case 0x0c: /* WsP */ + case 0x12: /* ThP */ + break; + default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", ver.hw_variant); return -EINVAL; @@ -699,11 +711,14 @@ static int intel_setup(struct hci_uart *hu) /* With this Intel bootloader only the hardware variant and device * revision information are used to select the right firmware. * - * Currently this bootloader support is limited to hardware variant - * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b). + * The firmware filename is ibt--.sfi. + * + * Currently the supported hardware variants are: + * 11 (0x0b) for iBT 3.0 (LnP/SfP) */ - snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi", - le16_to_cpu(params->dev_revid)); + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", + le16_to_cpu(ver.hw_variant), + le16_to_cpu(params->dev_revid)); err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -716,8 +731,9 @@ static int intel_setup(struct hci_uart *hu) bt_dev_info(hdev, "Found device firmware: %s", fwname); /* Save the DDC file name for later */ - snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc", - le16_to_cpu(params->dev_revid)); + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", + le16_to_cpu(ver.hw_variant), + le16_to_cpu(params->dev_revid)); kfree_skb(skb); @@ -889,6 +905,8 @@ static int intel_setup(struct hci_uart *hu) list_for_each(p, &intel_device_list) { struct intel_device *dev = list_entry(p, struct intel_device, list); + if (!hu->tty->dev) + break; if (hu->tty->dev->parent == dev->pdev->dev.parent) { if (device_may_wakeup(&dev->pdev->dev)) { set_bit(STATE_LPM_ENABLED, &intel->flags); @@ -1056,6 +1074,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p skb %p", hu, skb); + if (!hu->tty->dev) + goto out_enqueue; + /* Be sure our controller is resumed and potential LPM transaction * completed before enqueuing any packet. */ @@ -1072,7 +1093,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) } } mutex_unlock(&intel_device_list_lock); - +out_enqueue: skb_queue_tail(&intel->txq, skb); return 0; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 9497c469efd225cddd8cb53214d18c7310a14539..2edd30556956d83d9087a5b6a78dac198fed6eac 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -113,16 +113,21 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) { struct sk_buff *skb = hu->tx_skb; - if (!skb) - skb = hu->proto->dequeue(hu); - else + if (!skb) { + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + skb = hu->proto->dequeue(hu); + } else { hu->tx_skb = NULL; + } return skb; } int hci_uart_tx_wakeup(struct hci_uart *hu) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + return 0; + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); return 0; @@ -134,6 +139,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) return 0; } +EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup); static void hci_uart_write_work(struct work_struct *work) { @@ -176,6 +182,7 @@ static void hci_uart_init_work(struct work_struct *work) { struct hci_uart *hu = container_of(work, struct hci_uart, init_ready); int err; + struct hci_dev *hdev; if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return; @@ -183,9 +190,12 @@ static void hci_uart_init_work(struct work_struct *work) err = hci_register_dev(hu->hdev); if (err < 0) { BT_ERR("Can't register HCI device"); - hci_free_dev(hu->hdev); + hdev = hu->hdev; hu->hdev = NULL; + hci_free_dev(hdev); + clear_bit(HCI_UART_PROTO_READY, &hu->flags); hu->proto->close(hu); + return; } set_bit(HCI_UART_REGISTERED, &hu->flags); @@ -251,6 +261,9 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + return -EUNATCH; + hu->proto->enqueue(hu, skb); hci_uart_tx_wakeup(hu); @@ -318,25 +331,6 @@ void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, hu->oper_speed = oper_speed; } -void hci_uart_init_tty(struct hci_uart *hu) -{ - struct tty_struct *tty = hu->tty; - struct ktermios ktermios; - - /* Bring the UART into a known 8 bits no parity hw fc state */ - ktermios = tty->termios; - ktermios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | - INLCR | IGNCR | ICRNL | IXON); - ktermios.c_oflag &= ~OPOST; - ktermios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); - ktermios.c_cflag &= ~(CSIZE | PARENB); - ktermios.c_cflag |= CS8; - ktermios.c_cflag |= CRTSCTS; - - /* tty_set_termios() return not checked as it is always 0 */ - tty_set_termios(tty, &ktermios); -} - void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct tty_struct *tty = hu->tty; @@ -459,6 +453,10 @@ static int hci_uart_tty_open(struct tty_struct *tty) hu->tty = tty; tty->receive_room = 65536; + /* disable alignment support by default */ + hu->alignment = 1; + hu->padding = 0; + INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); @@ -616,6 +614,7 @@ static int hci_uart_register_dev(struct hci_uart *hu) if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); + hu->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 02692fe30279cbea1a82e5445a2a5c0884d6f7a4..adc444f309a3cf847148c99950d8d769fd9f4419 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -34,20 +34,24 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include #include +#include +#include #include +#include #include #include +#include #include "hci_uart.h" @@ -76,6 +80,12 @@ struct hcill_cmd { u8 cmd; } __packed; +struct ll_device { + struct hci_uart hu; + struct serdev_device *serdev; + struct gpio_desc *enable_gpio; +}; + struct ll_struct { unsigned long rx_state; unsigned long rx_count; @@ -136,6 +146,9 @@ static int ll_open(struct hci_uart *hu) hu->priv = ll; + if (hu->serdev) + serdev_device_open(hu->serdev); + return 0; } @@ -164,6 +177,13 @@ static int ll_close(struct hci_uart *hu) kfree_skb(ll->rx_skb); + if (hu->serdev) { + struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); + gpiod_set_value_cansleep(lldev->enable_gpio, 0); + + serdev_device_close(hu->serdev); + } + hu->priv = NULL; kfree(ll); @@ -505,9 +525,244 @@ static struct sk_buff *ll_dequeue(struct hci_uart *hu) return skb_dequeue(&ll->txq); } +#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) +static int read_local_version(struct hci_dev *hdev) +{ + int err = 0; + unsigned short version = 0; + struct sk_buff *skb; + struct hci_rp_read_local_version *ver; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading TI version information failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + if (skb->len != sizeof(*ver)) { + err = -EILSEQ; + goto out; + } + + ver = (struct hci_rp_read_local_version *)skb->data; + if (le16_to_cpu(ver->manufacturer) != 13) { + err = -ENODEV; + goto out; + } + + version = le16_to_cpu(ver->lmp_subver); + +out: + if (err) bt_dev_err(hdev, "Failed to read TI version info: %d", err); + kfree_skb(skb); + return err ? err : version; +} + +/** + * download_firmware - + * internal function which parses through the .bts firmware + * script file intreprets SEND, DELAY actions only as of now + */ +static int download_firmware(struct ll_device *lldev) +{ + unsigned short chip, min_ver, maj_ver; + int version, err, len; + unsigned char *ptr, *action_ptr; + unsigned char bts_scr_name[40]; /* 40 char long bts scr name? */ + const struct firmware *fw; + struct sk_buff *skb; + struct hci_command *cmd; + + version = read_local_version(lldev->hu.hdev); + if (version < 0) + return version; + + chip = (version & 0x7C00) >> 10; + min_ver = (version & 0x007F); + maj_ver = (version & 0x0380) >> 7; + if (version & 0x8000) + maj_ver |= 0x0008; + + snprintf(bts_scr_name, sizeof(bts_scr_name), + "ti-connectivity/TIInit_%d.%d.%d.bts", + chip, maj_ver, min_ver); + + err = request_firmware(&fw, bts_scr_name, &lldev->serdev->dev); + if (err || !fw->data || !fw->size) { + bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s", + err, bts_scr_name); + return -EINVAL; + } + ptr = (void *)fw->data; + len = fw->size; + /* bts_header to remove out magic number and + * version + */ + ptr += sizeof(struct bts_header); + len -= sizeof(struct bts_header); + + while (len > 0 && ptr) { + bt_dev_dbg(lldev->hu.hdev, " action size %d, type %d ", + ((struct bts_action *)ptr)->size, + ((struct bts_action *)ptr)->type); + + action_ptr = &(((struct bts_action *)ptr)->data[0]); + + switch (((struct bts_action *)ptr)->type) { + case ACTION_SEND_COMMAND: /* action send */ + bt_dev_dbg(lldev->hu.hdev, "S"); + cmd = (struct hci_command *)action_ptr; + if (cmd->opcode == 0xff36) { + /* ignore remote change + * baud rate HCI VS command */ + bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware"); + break; + } + if (cmd->prefix != 1) + bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix); + + skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(lldev->hu.hdev, "send command failed\n"); + goto out_rel_fw; + } + kfree_skb(skb); + break; + case ACTION_WAIT_EVENT: /* wait */ + /* no need to wait as command was synchronous */ + bt_dev_dbg(lldev->hu.hdev, "W"); + break; + case ACTION_DELAY: /* sleep */ + bt_dev_info(lldev->hu.hdev, "sleep command in scr"); + mdelay(((struct bts_action_delay *)action_ptr)->msec); + break; + } + len -= (sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size); + ptr += sizeof(struct bts_action) + + ((struct bts_action *)ptr)->size; + } + +out_rel_fw: + /* fw download complete */ + release_firmware(fw); + return err; +} + +static int ll_setup(struct hci_uart *hu) +{ + int err, retry = 3; + struct ll_device *lldev; + struct serdev_device *serdev = hu->serdev; + u32 speed; + + if (!serdev) + return 0; + + lldev = serdev_device_get_drvdata(serdev); + + serdev_device_set_flow_control(serdev, true); + + do { + /* Configure BT_EN to HIGH state */ + gpiod_set_value_cansleep(lldev->enable_gpio, 0); + msleep(5); + gpiod_set_value_cansleep(lldev->enable_gpio, 1); + msleep(100); + + err = download_firmware(lldev); + if (!err) + break; + + /* Toggle BT_EN and retry */ + bt_dev_err(hu->hdev, "download firmware failed, retrying..."); + } while (retry--); + + if (err) + return err; + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (speed) { + struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT); + if (!IS_ERR(skb)) { + kfree_skb(skb); + serdev_device_set_baudrate(serdev, speed); + } + } + + return 0; +} + +static const struct hci_uart_proto llp; + +static int hci_ti_probe(struct serdev_device *serdev) +{ + struct hci_uart *hu; + struct ll_device *lldev; + u32 max_speed = 3000000; + + lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL); + if (!lldev) + return -ENOMEM; + hu = &lldev->hu; + + serdev_device_set_drvdata(serdev, lldev); + lldev->serdev = hu->serdev = serdev; + + lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(lldev->enable_gpio)) + return PTR_ERR(lldev->enable_gpio); + + of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed); + hci_uart_set_speeds(hu, 115200, max_speed); + + return hci_uart_register_device(hu, &llp); +} + +static void hci_ti_remove(struct serdev_device *serdev) +{ + struct ll_device *lldev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &lldev->hu; + struct hci_dev *hdev = hu->hdev; + + cancel_work_sync(&hu->write_work); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + hu->proto->close(hu); +} + +static const struct of_device_id hci_ti_of_match[] = { + { .compatible = "ti,wl1831-st" }, + { .compatible = "ti,wl1835-st" }, + { .compatible = "ti,wl1837-st" }, + {}, +}; +MODULE_DEVICE_TABLE(of, hci_ti_of_match); + +static struct serdev_device_driver hci_ti_drv = { + .driver = { + .name = "hci-ti", + .of_match_table = of_match_ptr(hci_ti_of_match), + }, + .probe = hci_ti_probe, + .remove = hci_ti_remove, +}; +#else +#define ll_setup NULL +#endif + static const struct hci_uart_proto llp = { .id = HCI_UART_LL, .name = "LL", + .setup = ll_setup, .open = ll_open, .close = ll_close, .recv = ll_recv, @@ -518,10 +773,14 @@ static const struct hci_uart_proto llp = { int __init ll_init(void) { + serdev_device_driver_register(&hci_ti_drv); + return hci_uart_register_proto(&llp); } int __exit ll_deinit(void) { + serdev_device_driver_unregister(&hci_ti_drv); + return hci_uart_unregister_proto(&llp); } diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c new file mode 100644 index 0000000000000000000000000000000000000000..a7d687d8d456915cf1acaa8dd94345d12c0bccee --- /dev/null +++ b/drivers/bluetooth/hci_nokia.c @@ -0,0 +1,827 @@ +/* + * Bluetooth HCI UART H4 driver with Nokia Extensions AKA Nokia H4+ + * + * Copyright (C) 2015 Marcel Holtmann + * Copyright (C) 2015-2017 Sebastian Reichel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hci_uart.h" +#include "btbcm.h" + +#define VERSION "0.1" + +#define NOKIA_ID_BCM2048 0x04 +#define NOKIA_ID_TI1271 0x31 + +#define FIRMWARE_BCM2048 "nokia/bcmfw.bin" +#define FIRMWARE_TI1271 "nokia/ti1273.bin" + +#define HCI_NOKIA_NEG_PKT 0x06 +#define HCI_NOKIA_ALIVE_PKT 0x07 +#define HCI_NOKIA_RADIO_PKT 0x08 + +#define HCI_NOKIA_NEG_HDR_SIZE 1 +#define HCI_NOKIA_MAX_NEG_SIZE 255 +#define HCI_NOKIA_ALIVE_HDR_SIZE 1 +#define HCI_NOKIA_MAX_ALIVE_SIZE 255 +#define HCI_NOKIA_RADIO_HDR_SIZE 2 +#define HCI_NOKIA_MAX_RADIO_SIZE 255 + +#define NOKIA_PROTO_PKT 0x44 +#define NOKIA_PROTO_BYTE 0x4c + +#define NOKIA_NEG_REQ 0x00 +#define NOKIA_NEG_ACK 0x20 +#define NOKIA_NEG_NAK 0x40 + +#define H4_TYPE_SIZE 1 + +#define NOKIA_RECV_ALIVE \ + .type = HCI_NOKIA_ALIVE_PKT, \ + .hlen = HCI_NOKIA_ALIVE_HDR_SIZE, \ + .loff = 0, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_ALIVE_SIZE \ + +#define NOKIA_RECV_NEG \ + .type = HCI_NOKIA_NEG_PKT, \ + .hlen = HCI_NOKIA_NEG_HDR_SIZE, \ + .loff = 0, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_NEG_SIZE \ + +#define NOKIA_RECV_RADIO \ + .type = HCI_NOKIA_RADIO_PKT, \ + .hlen = HCI_NOKIA_RADIO_HDR_SIZE, \ + .loff = 1, \ + .lsize = 1, \ + .maxlen = HCI_NOKIA_MAX_RADIO_SIZE \ + +struct hci_nokia_neg_hdr { + u8 dlen; +} __packed; + +struct hci_nokia_neg_cmd { + u8 ack; + u16 baud; + u16 unused1; + u8 proto; + u16 sys_clk; + u16 unused2; +} __packed; + +#define NOKIA_ALIVE_REQ 0x55 +#define NOKIA_ALIVE_RESP 0xcc + +struct hci_nokia_alive_hdr { + u8 dlen; +} __packed; + +struct hci_nokia_alive_pkt { + u8 mid; + u8 unused; +} __packed; + +struct hci_nokia_neg_evt { + u8 ack; + u16 baud; + u16 unused1; + u8 proto; + u16 sys_clk; + u16 unused2; + u8 man_id; + u8 ver_id; +} __packed; + +#define MAX_BAUD_RATE 3692300 +#define SETUP_BAUD_RATE 921600 +#define INIT_BAUD_RATE 120000 + +struct hci_nokia_radio_hdr { + u8 evt; + u8 dlen; +} __packed; + +struct nokia_bt_dev { + struct hci_uart hu; + struct serdev_device *serdev; + + struct gpio_desc *reset; + struct gpio_desc *wakeup_host; + struct gpio_desc *wakeup_bt; + unsigned long sysclk_speed; + + int wake_irq; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + bdaddr_t bdaddr; + + int init_error; + struct completion init_completion; + + u8 man_id; + u8 ver_id; + + bool initialized; + bool tx_enabled; + bool rx_enabled; +}; + +static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb); + +static void nokia_flow_control(struct serdev_device *serdev, bool enable) +{ + if (enable) { + serdev_device_set_rts(serdev, true); + serdev_device_set_flow_control(serdev, true); + } else { + serdev_device_set_flow_control(serdev, false); + serdev_device_set_rts(serdev, false); + } +} + +static irqreturn_t wakeup_handler(int irq, void *data) +{ + struct nokia_bt_dev *btdev = data; + struct device *dev = &btdev->serdev->dev; + int wake_state = gpiod_get_value(btdev->wakeup_host); + + if (btdev->rx_enabled == wake_state) + return IRQ_HANDLED; + + if (wake_state) + pm_runtime_get(dev); + else + pm_runtime_put(dev); + + btdev->rx_enabled = wake_state; + + return IRQ_HANDLED; +} + +static int nokia_reset(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + /* reset routine */ + gpiod_set_value_cansleep(btdev->reset, 1); + gpiod_set_value_cansleep(btdev->wakeup_bt, 1); + + msleep(100); + + /* safety check */ + err = gpiod_get_value_cansleep(btdev->wakeup_host); + if (err == 1) { + dev_err(dev, "reset: host wakeup not low!"); + return -EPROTO; + } + + /* flush queue */ + serdev_device_write_flush(btdev->serdev); + + /* init uart */ + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, INIT_BAUD_RATE); + + gpiod_set_value_cansleep(btdev->reset, 0); + + /* wait for cts */ + err = serdev_device_wait_for_cts(btdev->serdev, true, 200); + if (err < 0) { + dev_err(dev, "CTS not received: %d", err); + return err; + } + + nokia_flow_control(btdev->serdev, true); + + return 0; +} + +static int nokia_send_alive_packet(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_alive_hdr *hdr; + struct hci_nokia_alive_pkt *pkt; + struct sk_buff *skb; + int len; + + init_completion(&btdev->init_completion); + + len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt); + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_NOKIA_ALIVE_PKT; + memset(skb->data, 0x00, len); + + hdr = (struct hci_nokia_alive_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->dlen = sizeof(*pkt); + pkt = (struct hci_nokia_alive_pkt *)skb_put(skb, sizeof(*pkt)); + pkt->mid = NOKIA_ALIVE_REQ; + + nokia_enqueue(hu, skb); + hci_uart_tx_wakeup(hu); + + dev_dbg(dev, "Alive sent"); + + if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, + msecs_to_jiffies(1000))) { + return -ETIMEDOUT; + } + + if (btdev->init_error < 0) + return btdev->init_error; + + return 0; +} + +static int nokia_send_negotiation(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_neg_cmd *neg_cmd; + struct hci_nokia_neg_hdr *neg_hdr; + struct sk_buff *skb; + int len, err; + u16 baud = DIV_ROUND_CLOSEST(btdev->sysclk_speed * 10, SETUP_BAUD_RATE); + int sysclk = btdev->sysclk_speed / 1000; + + len = H4_TYPE_SIZE + sizeof(*neg_hdr) + sizeof(*neg_cmd); + skb = bt_skb_alloc(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_NOKIA_NEG_PKT; + + neg_hdr = (struct hci_nokia_neg_hdr *)skb_put(skb, sizeof(*neg_hdr)); + neg_hdr->dlen = sizeof(*neg_cmd); + + neg_cmd = (struct hci_nokia_neg_cmd *)skb_put(skb, sizeof(*neg_cmd)); + neg_cmd->ack = NOKIA_NEG_REQ; + neg_cmd->baud = cpu_to_le16(baud); + neg_cmd->unused1 = 0x0000; + neg_cmd->proto = NOKIA_PROTO_BYTE; + neg_cmd->sys_clk = cpu_to_le16(sysclk); + neg_cmd->unused2 = 0x0000; + + btdev->init_error = 0; + init_completion(&btdev->init_completion); + + nokia_enqueue(hu, skb); + hci_uart_tx_wakeup(hu); + + dev_dbg(dev, "Negotiation sent"); + + if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, + msecs_to_jiffies(10000))) { + return -ETIMEDOUT; + } + + if (btdev->init_error < 0) + return btdev->init_error; + + /* Change to previously negotiated speed. Flow Control + * is disabled until bluetooth adapter is ready to avoid + * broken bytes being received. + */ + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, SETUP_BAUD_RATE); + err = serdev_device_wait_for_cts(btdev->serdev, true, 200); + if (err < 0) { + dev_err(dev, "CTS not received: %d", err); + return err; + } + nokia_flow_control(btdev->serdev, true); + + dev_dbg(dev, "Negotiation successful"); + + return 0; +} + +static int nokia_setup_fw(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + const char *fwname; + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + int err; + + dev_dbg(dev, "setup firmware"); + + if (btdev->man_id == NOKIA_ID_BCM2048) { + fwname = FIRMWARE_BCM2048; + } else if (btdev->man_id == NOKIA_ID_TI1271) { + fwname = FIRMWARE_TI1271; + } else { + dev_err(dev, "Unsupported bluetooth device!"); + return -ENODEV; + } + + err = request_firmware(&fw, fwname, dev); + if (err < 0) { + dev_err(dev, "%s: Failed to load Nokia firmware file (%d)", + hu->hdev->name, err); + return err; + } + + fw_ptr = fw->data; + fw_size = fw->size; + + while (fw_size >= 4) { + u16 pkt_size = get_unaligned_le16(fw_ptr); + u8 pkt_type = fw_ptr[2]; + const struct hci_command_hdr *cmd; + u16 opcode; + struct sk_buff *skb; + + switch (pkt_type) { + case HCI_COMMAND_PKT: + cmd = (struct hci_command_hdr *)(fw_ptr + 3); + opcode = le16_to_cpu(cmd->opcode); + + skb = __hci_cmd_sync(hu->hdev, opcode, cmd->plen, + fw_ptr + 3 + HCI_COMMAND_HDR_SIZE, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + dev_err(dev, "%s: FW command %04x failed (%d)", + hu->hdev->name, opcode, err); + goto done; + } + kfree_skb(skb); + break; + case HCI_NOKIA_RADIO_PKT: + case HCI_NOKIA_NEG_PKT: + case HCI_NOKIA_ALIVE_PKT: + break; + } + + fw_ptr += pkt_size + 2; + fw_size -= pkt_size + 2; + } + +done: + release_firmware(fw); + return err; +} + +static int nokia_setup(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + btdev->initialized = false; + + nokia_flow_control(btdev->serdev, false); + + pm_runtime_get_sync(dev); + + if (btdev->tx_enabled) { + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(&btdev->serdev->dev); + btdev->tx_enabled = false; + } + + dev_dbg(dev, "protocol setup"); + + /* 0. reset connection */ + err = nokia_reset(hu); + if (err < 0) { + dev_err(dev, "Reset failed: %d", err); + goto out; + } + + /* 1. negotiate speed etc */ + err = nokia_send_negotiation(hu); + if (err < 0) { + dev_err(dev, "Negotiation failed: %d", err); + goto out; + } + + /* 2. verify correct setup using alive packet */ + err = nokia_send_alive_packet(hu); + if (err < 0) { + dev_err(dev, "Alive check failed: %d", err); + goto out; + } + + /* 3. send firmware */ + err = nokia_setup_fw(hu); + if (err < 0) { + dev_err(dev, "Could not setup FW: %d", err); + goto out; + } + + nokia_flow_control(btdev->serdev, false); + serdev_device_set_baudrate(btdev->serdev, MAX_BAUD_RATE); + nokia_flow_control(btdev->serdev, true); + + if (btdev->man_id == NOKIA_ID_BCM2048) { + hu->hdev->set_bdaddr = btbcm_set_bdaddr; + set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); + dev_dbg(dev, "bcm2048 has invalid bluetooth address!"); + } + + dev_dbg(dev, "protocol setup done!"); + + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(dev); + btdev->tx_enabled = false; + btdev->initialized = true; + + return 0; +out: + pm_runtime_put(dev); + + return err; +} + +static int nokia_open(struct hci_uart *hu) +{ + struct device *dev = &hu->serdev->dev; + + dev_dbg(dev, "protocol open"); + + serdev_device_open(hu->serdev); + + pm_runtime_enable(dev); + + return 0; +} + +static int nokia_flush(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + + dev_dbg(&btdev->serdev->dev, "flush device"); + + skb_queue_purge(&btdev->txq); + + return 0; +} + +static int nokia_close(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + + dev_dbg(dev, "close device"); + + btdev->initialized = false; + + skb_queue_purge(&btdev->txq); + + kfree_skb(btdev->rx_skb); + + /* disable module */ + gpiod_set_value(btdev->reset, 1); + gpiod_set_value(btdev->wakeup_bt, 0); + + pm_runtime_disable(&btdev->serdev->dev); + serdev_device_close(btdev->serdev); + + return 0; +} + +/* Enqueue frame for transmittion (padding, crc, etc) */ +static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct nokia_bt_dev *btdev = hu->priv; + int err; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + /* Packets must be word aligned */ + if (skb->len % 2) { + err = skb_pad(skb, 1); + if (err) + return err; + *skb_put(skb, 1) = 0x00; + } + + skb_queue_tail(&btdev->txq, skb); + + return 0; +} + +static int nokia_recv_negotiation_packet(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_neg_hdr *hdr; + struct hci_nokia_neg_evt *evt; + int ret = 0; + + hdr = (struct hci_nokia_neg_hdr *)skb->data; + if (hdr->dlen != sizeof(*evt)) { + btdev->init_error = -EIO; + ret = -EIO; + goto finish_neg; + } + + evt = (struct hci_nokia_neg_evt *)skb_pull(skb, sizeof(*hdr)); + + if (evt->ack != NOKIA_NEG_ACK) { + dev_err(dev, "Negotiation received: wrong reply"); + btdev->init_error = -EINVAL; + ret = -EINVAL; + goto finish_neg; + } + + btdev->man_id = evt->man_id; + btdev->ver_id = evt->ver_id; + + dev_dbg(dev, "Negotiation received: baud=%u:clk=%u:manu=%u:vers=%u", + evt->baud, evt->sys_clk, evt->man_id, evt->ver_id); + +finish_neg: + complete(&btdev->init_completion); + kfree_skb(skb); + return ret; +} + +static int nokia_recv_alive_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct hci_nokia_alive_hdr *hdr; + struct hci_nokia_alive_pkt *pkt; + int ret = 0; + + hdr = (struct hci_nokia_alive_hdr *)skb->data; + if (hdr->dlen != sizeof(*pkt)) { + dev_err(dev, "Corrupted alive message"); + btdev->init_error = -EIO; + ret = -EIO; + goto finish_alive; + } + + pkt = (struct hci_nokia_alive_pkt *)skb_pull(skb, sizeof(*hdr)); + + if (pkt->mid != NOKIA_ALIVE_RESP) { + dev_err(dev, "Alive received: invalid response: 0x%02x!", + pkt->mid); + btdev->init_error = -EINVAL; + ret = -EINVAL; + goto finish_alive; + } + + dev_dbg(dev, "Alive received"); + +finish_alive: + complete(&btdev->init_completion); + kfree_skb(skb); + return ret; +} + +static int nokia_recv_radio(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Packets received on the dedicated radio channel are + * HCI events and so feed them back into the core. + */ + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + return hci_recv_frame(hdev, skb); +} + +/* Recv data */ +static const struct h4_recv_pkt nokia_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { NOKIA_RECV_ALIVE, .recv = nokia_recv_alive_packet }, + { NOKIA_RECV_NEG, .recv = nokia_recv_negotiation_packet }, + { NOKIA_RECV_RADIO, .recv = nokia_recv_radio }, +}; + +static int nokia_recv(struct hci_uart *hu, const void *data, int count) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + int err; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, + nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); + if (IS_ERR(btdev->rx_skb)) { + err = PTR_ERR(btdev->rx_skb); + dev_err(dev, "Frame reassembly failed (%d)", err); + btdev->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *nokia_dequeue(struct hci_uart *hu) +{ + struct nokia_bt_dev *btdev = hu->priv; + struct device *dev = &btdev->serdev->dev; + struct sk_buff *result = skb_dequeue(&btdev->txq); + + if (!btdev->initialized) + return result; + + if (btdev->tx_enabled == !!result) + return result; + + if (result) { + pm_runtime_get_sync(dev); + gpiod_set_value_cansleep(btdev->wakeup_bt, 1); + } else { + serdev_device_wait_until_sent(btdev->serdev, 0); + gpiod_set_value_cansleep(btdev->wakeup_bt, 0); + pm_runtime_put(dev); + } + + btdev->tx_enabled = !!result; + + return result; +} + +static const struct hci_uart_proto nokia_proto = { + .id = HCI_UART_NOKIA, + .name = "Nokia", + .open = nokia_open, + .close = nokia_close, + .recv = nokia_recv, + .enqueue = nokia_enqueue, + .dequeue = nokia_dequeue, + .flush = nokia_flush, + .setup = nokia_setup, + .manufacturer = 1, +}; + +static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) +{ + struct device *dev = &serdev->dev; + struct nokia_bt_dev *btdev; + struct clk *sysclk; + int err = 0; + + btdev = devm_kzalloc(dev, sizeof(*btdev), GFP_KERNEL); + if (!btdev) + return -ENOMEM; + + btdev->hu.serdev = btdev->serdev = serdev; + serdev_device_set_drvdata(serdev, btdev); + + btdev->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(btdev->reset)) { + err = PTR_ERR(btdev->reset); + dev_err(dev, "could not get reset gpio: %d", err); + return err; + } + + btdev->wakeup_host = devm_gpiod_get(dev, "host-wakeup", GPIOD_IN); + if (IS_ERR(btdev->wakeup_host)) { + err = PTR_ERR(btdev->wakeup_host); + dev_err(dev, "could not get host wakeup gpio: %d", err); + return err; + } + + btdev->wake_irq = gpiod_to_irq(btdev->wakeup_host); + + err = devm_request_threaded_irq(dev, btdev->wake_irq, NULL, + wakeup_handler, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "wakeup", btdev); + if (err) { + dev_err(dev, "could request wakeup irq: %d", err); + return err; + } + + btdev->wakeup_bt = devm_gpiod_get(dev, "bluetooth-wakeup", + GPIOD_OUT_LOW); + if (IS_ERR(btdev->wakeup_bt)) { + err = PTR_ERR(btdev->wakeup_bt); + dev_err(dev, "could not get BT wakeup gpio: %d", err); + return err; + } + + sysclk = devm_clk_get(dev, "sysclk"); + if (IS_ERR(sysclk)) { + err = PTR_ERR(sysclk); + dev_err(dev, "could not get sysclk: %d", err); + return err; + } + + clk_prepare_enable(sysclk); + btdev->sysclk_speed = clk_get_rate(sysclk); + clk_disable_unprepare(sysclk); + + skb_queue_head_init(&btdev->txq); + + btdev->hu.priv = btdev; + btdev->hu.alignment = 2; /* Nokia H4+ is word aligned */ + + err = hci_uart_register_device(&btdev->hu, &nokia_proto); + if (err) { + dev_err(dev, "could not register bluetooth uart: %d", err); + return err; + } + + return 0; +} + +static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) +{ + struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); + struct hci_uart *hu = &btdev->hu; + struct hci_dev *hdev = hu->hdev; + + cancel_work_sync(&hu->write_work); + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + hu->proto->close(hu); + + pm_runtime_disable(&btdev->serdev->dev); +} + +static int nokia_bluetooth_runtime_suspend(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + + nokia_flow_control(serdev, false); + return 0; +} + +static int nokia_bluetooth_runtime_resume(struct device *dev) +{ + struct serdev_device *serdev = to_serdev_device(dev); + + nokia_flow_control(serdev, true); + return 0; +} + +static const struct dev_pm_ops nokia_bluetooth_pm_ops = { + SET_RUNTIME_PM_OPS(nokia_bluetooth_runtime_suspend, + nokia_bluetooth_runtime_resume, + NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id nokia_bluetooth_of_match[] = { + { .compatible = "nokia,h4p-bluetooth", }, + {}, +}; +MODULE_DEVICE_TABLE(of, nokia_bluetooth_of_match); +#endif + +static struct serdev_device_driver nokia_bluetooth_serdev_driver = { + .probe = nokia_bluetooth_serdev_probe, + .remove = nokia_bluetooth_serdev_remove, + .driver = { + .name = "nokia-bluetooth", + .pm = &nokia_bluetooth_pm_ops, + .of_match_table = of_match_ptr(nokia_bluetooth_of_match), + }, +}; + +module_serdev_device_driver(nokia_bluetooth_serdev_driver); + +MODULE_AUTHOR("Sebastian Reichel "); +MODULE_DESCRIPTION("Bluetooth HCI UART Nokia H4+ driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c new file mode 100644 index 0000000000000000000000000000000000000000..7de0edc0ff8cd73bca5d81bc1a96794c35234a17 --- /dev/null +++ b/drivers/bluetooth/hci_serdev.c @@ -0,0 +1,356 @@ +/* + * Bluetooth HCI serdev driver lib + * + * Copyright (C) 2017 Linaro, Ltd., Rob Herring + * + * Based on hci_ldisc.c: + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +struct serdev_device_ops hci_serdev_client_ops; + +static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) +{ + struct hci_dev *hdev = hu->hdev; + + /* Update HCI stat counters */ + switch (pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } +} + +static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) +{ + struct sk_buff *skb = hu->tx_skb; + + if (!skb) + skb = hu->proto->dequeue(hu); + else + hu->tx_skb = NULL; + + return skb; +} + +static void hci_uart_write_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, write_work); + struct serdev_device *serdev = hu->serdev; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + /* REVISIT: + * should we cope with bad skbs or ->write() returning an error value? + */ + do { + clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + + while ((skb = hci_uart_dequeue(hu))) { + int len; + + len = serdev_device_write_buf(serdev, + skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len) { + hu->tx_skb = skb; + break; + } + + hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); + kfree_skb(skb); + } + } while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); + + clear_bit(HCI_UART_SENDING, &hu->tx_state); +} + +/* ------- Interface to HCI layer ------ */ + +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + return 0; +} + +/* Reset device */ +static int hci_uart_flush(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hdev %p serdev %p", hdev, hu->serdev); + + if (hu->tx_skb) { + kfree_skb(hu->tx_skb); hu->tx_skb = NULL; + } + + /* Flush any pending characters in the driver and discipline. */ + serdev_device_write_flush(hu->serdev); + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hu->proto->flush(hu); + + return 0; +} + +/* Close device */ +static int hci_uart_close(struct hci_dev *hdev) +{ + BT_DBG("hdev %p", hdev); + + hci_uart_flush(hdev); + hdev->flush = NULL; + + return 0; +} + +/* Send frames from HCI layer */ +static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); + + hu->proto->enqueue(hu, skb); + + hci_uart_tx_wakeup(hu); + + return 0; +} + +static int hci_uart_setup(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct hci_rp_read_local_version *ver; + struct sk_buff *skb; + unsigned int speed; + int err; + + /* Init speed if any */ + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + else + speed = 0; + + if (speed) + serdev_device_set_baudrate(hu->serdev, speed); + + /* Operational speed if any */ + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + else + speed = 0; + + if (hu->proto->set_baudrate && speed) { + err = hu->proto->set_baudrate(hu, speed); + if (err) + BT_ERR("%s: failed to set baudrate", hdev->name); + else + serdev_device_set_baudrate(hu->serdev, speed); + } + + if (hu->proto->setup) + return hu->proto->setup(hu); + + if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) + return 0; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading local version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return 0; + } + + if (skb->len != sizeof(*ver)) { + BT_ERR("%s: Event length mismatch for version information", + hdev->name); + } + + kfree_skb(skb); + return 0; +} + +/** hci_uart_write_wakeup - transmit buffer wakeup + * @serdev: serial device + * + * This function is called by the serdev framework when it accepts + * more data being sent. + */ +static void hci_uart_write_wakeup(struct serdev_device *serdev) +{ + struct hci_uart *hu = serdev_device_get_drvdata(serdev); + + BT_DBG(""); + + if (!hu || serdev != hu->serdev) { + WARN_ON(1); + return; + } + + if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + hci_uart_tx_wakeup(hu); +} + +/** hci_uart_receive_buf - receive buffer wakeup + * @serdev: serial device + * @data: pointer to received data + * @count: count of received data in bytes + * + * This function is called by the serdev framework when it received data + * in the RX buffer. + * + * Return: number of processed bytes + */ +static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct hci_uart *hu = serdev_device_get_drvdata(serdev); + + if (!hu || serdev != hu->serdev) { + WARN_ON(1); + return 0; + } + + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + return 0; + + /* It does not need a lock here as it is already protected by a mutex in + * tty caller + */ + hu->proto->recv(hu, data, count); + + if (hu->hdev) + hu->hdev->stat.byte_rx += count; + + return count; +} + +struct serdev_device_ops hci_serdev_client_ops = { + .receive_buf = hci_uart_receive_buf, + .write_wakeup = hci_uart_write_wakeup, +}; + +int hci_uart_register_device(struct hci_uart *hu, + const struct hci_uart_proto *p) +{ + int err; + struct hci_dev *hdev; + + BT_DBG(""); + + serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); + + err = p->open(hu); + if (err) + return err; + + hu->proto = p; + set_bit(HCI_UART_PROTO_READY, &hu->flags); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + err = -ENOMEM; + goto err_alloc; + } + + hu->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, hu); + + INIT_WORK(&hu->write_work, hci_uart_write_work); + + /* Only when vendor specific setup callback is provided, consider + * the manufacturer information valid. This avoids filling in the + * value for Ericsson when nothing is specified. + */ + if (hu->proto->setup) + hdev->manufacturer = hu->proto->manufacturer; + + hdev->open = hci_uart_open; + hdev->close = hci_uart_close; + hdev->flush = hci_uart_flush; + hdev->send = hci_uart_send_frame; + hdev->setup = hci_uart_setup; + SET_HCIDEV_DEV(hdev, &hu->serdev->dev); + + if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) + set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); + + if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) + set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + + if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_PRIMARY; + + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) + return 0; + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + err = -ENODEV; + goto err_register; + } + + set_bit(HCI_UART_REGISTERED, &hu->flags); + + return 0; + +err_register: + hci_free_dev(hdev); +err_alloc: + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + p->close(hu); + return err; +} +EXPORT_SYMBOL_GPL(hci_uart_register_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 070139513e651792c8058f5665227cf1b54bd499..2b05e557fad01ef9ea0177f78ba5ef15850f4016 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -58,6 +58,7 @@ #define HCI_UART_VND_DETECT 5 struct hci_uart; +struct serdev_device; struct hci_uart_proto { unsigned int id; @@ -77,6 +78,7 @@ struct hci_uart_proto { struct hci_uart { struct tty_struct *tty; + struct serdev_device *serdev; struct hci_dev *hdev; unsigned long flags; unsigned long hdev_flags; @@ -92,6 +94,9 @@ struct hci_uart { unsigned int init_speed; unsigned int oper_speed; + + u8 alignment; + u8 padding; }; /* HCI_UART proto flag bits */ @@ -105,9 +110,10 @@ struct hci_uart { int hci_uart_register_proto(const struct hci_uart_proto *p); int hci_uart_unregister_proto(const struct hci_uart_proto *p); +int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p); + int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); -void hci_uart_init_tty(struct hci_uart *hu); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 87799e81af97697cb4879acb227a30ea0bf792bd..c3f1a9e33cef0852a947295fefcb765ffed80a88 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -32,6 +32,8 @@ #include "hyperv_vmbus.h" +#define VMBUS_PKT_TRAILER 8 + /* * When we write to the ring buffer, check if the host needs to * be signaled. Here is the details of this protocol: @@ -336,6 +338,12 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, return 0; } +static inline void +init_cached_read_index(struct hv_ring_buffer_info *rbi) +{ + rbi->cached_read_index = rbi->ring_buffer->read_index; +} + int hv_ringbuffer_read(struct vmbus_channel *channel, void *buffer, u32 buflen, u32 *buffer_actual_len, u64 *requestid, bool raw) @@ -366,7 +374,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, return ret; } - init_cached_read_index(channel); + init_cached_read_index(inring_info); + next_read_location = hv_get_next_read_location(inring_info); next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, sizeof(desc), @@ -410,3 +419,86 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, return ret; } + +/* + * Determine number of bytes available in ring buffer after + * the current iterator (priv_read_index) location. + * + * This is similar to hv_get_bytes_to_read but with private + * read index instead. + */ +static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) +{ + u32 priv_read_loc = rbi->priv_read_index; + u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); + + if (write_loc >= priv_read_loc) + return write_loc - priv_read_loc; + else + return (rbi->ring_datasize - priv_read_loc) + write_loc; +} + +/* + * Get first vmbus packet from ring buffer after read_index + * + * If ring buffer is empty, returns NULL and no other action needed. + */ +struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; + + /* set state for later hv_signal_on_read() */ + init_cached_read_index(rbi); + + if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) + return NULL; + + return hv_get_ring_buffer(rbi) + rbi->priv_read_index; +} +EXPORT_SYMBOL_GPL(hv_pkt_iter_first); + +/* + * Get next vmbus packet from ring buffer. + * + * Advances the current location (priv_read_index) and checks for more + * data. If the end of the ring buffer is reached, then return NULL. + */ +struct vmpacket_descriptor * +__hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; + u32 packetlen = desc->len8 << 3; + u32 dsize = rbi->ring_datasize; + + /* bump offset to next potential packet */ + rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; + if (rbi->priv_read_index >= dsize) + rbi->priv_read_index -= dsize; + + /* more data? */ + if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) + return NULL; + else + return hv_get_ring_buffer(rbi) + rbi->priv_read_index; +} +EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); + +/* + * Update host ring buffer after iterating over packets. + */ +void hv_pkt_iter_close(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *rbi = &channel->inbound; + + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + rbi->ring_buffer->read_index = rbi->priv_read_index; + + hv_signal_on_read(channel); +} +EXPORT_SYMBOL_GPL(hv_pkt_iter_close); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 0f58f46dbad7e0b4059cbab4ffca95623cad8bb9..329d08c884f66b3aa7647cf5b3266b4e053e7ebc 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -88,7 +88,7 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) return false; ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_addr_policy); + nlmsg_len(nlh), ib_nl_addr_policy, NULL); if (ret) return false; diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 3ef51a96bbf1128905c59688ad64d3fc2a34e035..f13870e69ccd869a1c0c25adad55cf98d895c524 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -472,12 +472,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, int ret; const char *err_str = ""; - ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy); + ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy, + NULL); if (ret) { err_str = "Invalid attribute"; goto parse_nlmsg_error; } - ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy); + ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1, + nlmsg_policy, NULL); if (ret) { err_str = "Unable to parse the nlmsg"; goto parse_nlmsg_error; diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 10469b0088b500fbe2012bc4bd43d249dad3b705..b784055423c8346b80e00f98d58f6499e7adc8ef 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -146,7 +146,8 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, } EXPORT_SYMBOL(ibnl_put_attr); -static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct ibnl_client *client; int type = nlh->nlmsg_type; @@ -209,7 +210,7 @@ static void ibnl_rcv_reply_skb(struct sk_buff *skb) if (nlh->nlmsg_flags & NLM_F_REQUEST) return; - ibnl_rcv_msg(skb, nlh); + ibnl_rcv_msg(skb, nlh, NULL); msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 81b742ca163911f3cfe6ffc42ea624931e7f3089..ceae153997d0796995d680b7fdd289dc35871352 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -808,7 +808,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, return -EPERM; ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy); + nlmsg_len(nlh), ib_nl_policy, NULL); attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; if (ret || !attr) goto settimeout_out; @@ -860,7 +860,7 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) return 0; ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy); + nlmsg_len(nlh), ib_nl_policy, NULL); if (ret) return 0; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 3cd064b5f0bff1d86af9f448c5ecc9f4322280c5..ce8ba617d46e6e93c85678c71255d97cc3ad8e91 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -729,16 +729,6 @@ static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) return container_of(ibmw, struct mlx5_ib_mw, ibmw); } -struct mlx5_ib_ah { - struct ib_ah ibah; - struct mlx5_av av; -}; - -static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) -{ - return container_of(ibah, struct mlx5_ib_ah, ibah); -} - int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, struct mlx5_db *db); void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ad8a2638e339b4bf0d7e866cfbf0fa08b8f13a1b..ed6320186f8979a4d38616ba5ec7aba9b391fbe1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -897,6 +897,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_IPOIB_UD_LSO | + IB_QP_CREATE_NETIF_QP | mlx5_ib_create_qp_sqpn_qp1())) return -EINVAL; diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 85acd0843b503807e92eafb26bce1cf60abc55a7..3f9e56e8b3796d3f225e89c67df130d32d55d583 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -36,6 +36,7 @@ #include #include +#include #include #include #include diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index b9b47e5cc8b3bde5a053107c0ba6a4997754c1ee..ced0461d6e9ff822633d60505d4b390450f7a082 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) #define EVENT_TYPE_CQ 1 #define EVENT_TYPE_QP 2 struct qedr_dev *dev = (struct qedr_dev *)context; - union event_ring_data *data = fw_handle; - u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + - data->roce_handle.lo; + struct regpair *async_handle = (struct regpair *)fw_handle; + u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo; u8 event_type = EVENT_TYPE_NOT_DEFINED; struct ib_event event; struct ib_cq *ibcq; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index bb32e4792ec9f022d201c0585bcce7a7cbae179c..5cb9195513bdd4c754bf7e19b2f0c7d0ee30dc08 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -38,7 +38,8 @@ #include #include #include -#include "qedr_hsi.h" +#include +#include "qedr_hsi_rdma.h" #define QEDR_MODULE_VERSION "8.10.10.0" #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 699632893dd9842c3a3b47153f631ae85765a531..a6280ce3e2a54c6cef7617c79862680e425f81d4 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -43,14 +43,11 @@ #include #include -#include "qedr_hsi.h" #include #include #include "qedr.h" -#include "qedr_hsi.h" #include "verbs.h" #include -#include "qedr_hsi.h" #include "qedr_cm.h" void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6b3bb32803bd8661d9efebd14dcefff0b601f6f3..2091902848e6c47bb1f3dfdc4cc08cec99386dc0 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -43,7 +43,8 @@ #include #include -#include "qedr_hsi.h" +#include +#include "qedr_hsi_rdma.h" #include #include "qedr.h" #include "verbs.h" diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c index 50749a70c5cacb99ee2dfbb0a1957b6baf336247..060d357f107f8c7720b08e6791dc352893c08fff 100644 --- a/drivers/isdn/divert/isdn_divert.c +++ b/drivers/isdn/divert/isdn_divert.c @@ -157,10 +157,8 @@ int cf_command(int drvid, int mode, /* allocate mem for information struct */ if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) return (-ENOMEM); /* no memory */ - init_timer(&cs->timer); + setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs); cs->info[0] = '\0'; - cs->timer.function = deflect_timer_expire; - cs->timer.data = (ulong) cs; /* pointer to own structure */ cs->ics.driver = drvid; cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */ cs->ics.arg = DSS1_CMD_INVOKE; /* invoke supplementary service */ @@ -452,10 +450,9 @@ static int isdn_divert_icall(isdn_ctrl *ic) return (0); /* no external deflection needed */ if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC))) return (0); /* no memory */ - init_timer(&cs->timer); + setup_timer(&cs->timer, deflect_timer_expire, + (ulong)cs); cs->info[0] = '\0'; - cs->timer.function = deflect_timer_expire; - cs->timer.data = (ulong) cs; /* pointer to own structure */ cs->ics = *ic; /* copy incoming data */ if (!cs->ics.parm.setup.phone[0]) strcpy(cs->ics.parm.setup.phone, "0"); diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c index cb88090f9cea3036af14b8e8bcaa9eaeb4d58434..c61049585cbd7b67f24e057238244b8938500a9e 100644 --- a/drivers/isdn/hardware/eicon/divasi.c +++ b/drivers/isdn/hardware/eicon/divasi.c @@ -300,9 +300,8 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr) p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e); init_waitqueue_head(&p_os->read_wait); init_waitqueue_head(&p_os->close_wait); - init_timer(&p_os->diva_timer_id); - p_os->diva_timer_id.function = (void *) diva_um_timer_function; - p_os->diva_timer_id.data = (unsigned long) p_os; + setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function, + (unsigned long)p_os); p_os->aborted = 0; p_os->adapter_nr = adapter_nr; return (1); diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig index 09df54fc1fef2162bf06228dddc431f2f7a9feeb..fda912b0833ff9dad7c6e04898487074f094b2a3 100644 --- a/drivers/isdn/hardware/mISDN/Kconfig +++ b/drivers/isdn/hardware/mISDN/Kconfig @@ -13,7 +13,7 @@ config MISDN_HFCPCI config MISDN_HFCMULTI tristate "Support for HFC multiport cards (HFC-4S/8S/E1)" - depends on PCI || 8xx + depends on PCI || CPM1 depends on MISDN help Enable support for cards with Cologne Chip AG's HFC multiport @@ -27,8 +27,8 @@ config MISDN_HFCMULTI_8xx bool "Support for XHFC embedded board in HFC multiport driver" depends on MISDN depends on MISDN_HFCMULTI - depends on 8xx - default 8xx + depends on CPM1 + default CPM1 help Enable support for the XHFC embedded solution from Speech Design. diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h index 0eafe9f04fca326a40f6e5bb549b78420bd39e26..8a254747768e9de4181d6b133da05b292830e0ae 100644 --- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h +++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h @@ -6,7 +6,7 @@ * */ -#include +#include /* Change this to the value used by your board */ #ifndef IMAP_ADDR diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 480c2d7794ebdcfbcb38667a0807a156a1b2fac3..961c07ee47b7337005a2997b557b0539aebff2fa 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -3878,9 +3878,8 @@ hfcmulti_initmode(struct dchannel *dch) if (hc->dnum[pt]) { mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol, -1, 0, -1, 0); - dch->timer.function = (void *) hfcmulti_dbusy_timer; - dch->timer.data = (long) dch; - init_timer(&dch->timer); + setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer, + (long)dch); } for (i = 1; i <= 31; i++) { if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */ @@ -3986,9 +3985,8 @@ hfcmulti_initmode(struct dchannel *dch) hc->chan[i].slot_rx = -1; hc->chan[i].conf = -1; mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0); - dch->timer.function = (void *) hfcmulti_dbusy_timer; - dch->timer.data = (long) dch; - init_timer(&dch->timer); + setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer, + (long)dch); hc->chan[i - 2].slot_tx = -1; hc->chan[i - 2].slot_rx = -1; hc->chan[i - 2].conf = -1; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index ff48da61c94c849bf06cbb9ab9cb149515dcd626..5dc246d71c167d5a69f449603cbd4a329a85fa9c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1717,9 +1717,8 @@ static void inithfcpci(struct hfc_pci *hc) { printk(KERN_DEBUG "inithfcpci: entered\n"); - hc->dch.timer.function = (void *) hfcpci_dbusy_timer; - hc->dch.timer.data = (long) &hc->dch; - init_timer(&hc->dch.timer); + setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer, + (long)&hc->dch); hc->chanlimit = 2; mode_hfcpci(&hc->bch[0], 1, -1); mode_hfcpci(&hc->bch[1], 2, -1); @@ -2044,9 +2043,7 @@ setup_hw(struct hfc_pci *hc) Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* At this point the needed PCI config is done */ /* fifos are still not enabled */ - hc->hw.timer.function = (void *) hfcpci_Timer; - hc->hw.timer.data = (long) hc; - init_timer(&hc->hw.timer); + setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc); /* default PCM master */ test_and_set_bit(HFC_CFG_MASTER, &hc->cfg); return 0; diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index 77dec28ba874c7f220944afc07e6ebcb67fa00a0..6742b0dc082115df347955c77c619e5a47ffb2d6 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -796,9 +796,8 @@ isac_init(struct isac_hw *isac) } isac->mon_tx = NULL; isac->mon_rx = NULL; - isac->dch.timer.function = (void *) dbusy_timer_handler; - isac->dch.timer.data = (long)isac; - init_timer(&isac->dch.timer); + setup_timer(&isac->dch.timer, (void *)dbusy_timer_handler, + (long)isac); isac->mocr = 0xaa; if (isac->type & IPAC_TYPE_ISACX) { /* Disable all IRQ */ diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index feafa91c2ed99088c0958059e0de6c04dd832128..5b078591b6ee846455ac25642c25c8d932bde99d 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -1635,13 +1635,11 @@ init_isar(struct isar_hw *isar) } if (isar->version != 1) return -EINVAL; - isar->ch[0].ftimer.function = &ftimer_handler; - isar->ch[0].ftimer.data = (long)&isar->ch[0]; - init_timer(&isar->ch[0].ftimer); + setup_timer(&isar->ch[0].ftimer, &ftimer_handler, + (long)&isar->ch[0]); test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags); - isar->ch[1].ftimer.function = &ftimer_handler; - isar->ch[1].ftimer.data = (long)&isar->ch[1]; - init_timer(&isar->ch[1].ftimer); + setup_timer(&isar->ch[1].ftimer, &ftimer_handler, + (long)&isar->ch[1]); test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags); return 0; } diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index 3b067ea656bd9260d0172d7e2387d602d459d170..3052c836b89f70bc441520e439635a6e91dd2248 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -852,9 +852,8 @@ static void initW6692(struct w6692_hw *card) { u8 val; - card->dch.timer.function = (void *)dbusy_timer_handler; - card->dch.timer.data = (u_long)&card->dch; - init_timer(&card->dch.timer); + setup_timer(&card->dch.timer, (void *)dbusy_timer_handler, + (u_long)&card->dch); w6692_mode(&card->bc[0], ISDN_P_NONE); w6692_mode(&card->bc[1], ISDN_P_NONE); WriteW6692(card, W_D_CTL, 0x00); diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index 36817e0a0b9465df6d1f9f8c4eee7cb0e5d24157..3a4c2f9e19e9adf81c4d93eb7847ed8b549745e2 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c @@ -789,7 +789,5 @@ void Amd7930_init(struct IsdnCardState *cs) void setup_Amd7930(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, Amd7930_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs); } diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c index 29ec2dfbd155521022438964f24ea91ff333926d..9826bad49e2c1f60f11108661ecb610bcaf878b2 100644 --- a/drivers/isdn/hisax/arcofi.c +++ b/drivers/isdn/hisax/arcofi.c @@ -125,9 +125,7 @@ clear_arcofi(struct IsdnCardState *cs) { void init_arcofi(struct IsdnCardState *cs) { - cs->dc.isac.arcofitimer.function = (void *) arcofi_timer; - cs->dc.isac.arcofitimer.data = (long) cs; - init_timer(&cs->dc.isac.arcofitimer); + setup_timer(&cs->dc.isac.arcofitimer, (void *)arcofi_timer, (long)cs); init_waitqueue_head(&cs->dc.isac.arcofi_wait); test_and_set_bit(HW_ARCOFI, &cs->HW_Flags); } diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c index 4fc90de68d18a46941cfd8c629dd5f6624600173..079336e593f95a2970b1b43606ee92b4b7d7ae0f 100644 --- a/drivers/isdn/hisax/diva.c +++ b/drivers/isdn/hisax/diva.c @@ -976,9 +976,8 @@ static int setup_diva_common(struct IsdnCardState *cs) printk(KERN_INFO "Diva: IPACX Design Id: %x\n", MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F); } else { /* DIVA 2.0 */ - cs->hw.diva.tl.function = (void *) diva_led_handler; - cs->hw.diva.tl.data = (long) cs; - init_timer(&cs->hw.diva.tl); + setup_timer(&cs->hw.diva.tl, (void *)diva_led_handler, + (long)cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c index d8ef64da26f1fe6e0a25c5515401079ed19ff3cb..03bc5d504e2266774ab866c695a52546d72816b4 100644 --- a/drivers/isdn/hisax/elsa.c +++ b/drivers/isdn/hisax/elsa.c @@ -1147,9 +1147,7 @@ static int setup_elsa_common(struct IsdnCard *card) init_arcofi(cs); #endif setup_isac(cs); - cs->hw.elsa.tl.function = (void *) elsa_led_handler; - cs->hw.elsa.tl.data = (long) cs; - init_timer(&cs->hw.elsa.tl); + setup_timer(&cs->hw.elsa.tl, (void *)elsa_led_handler, (long)cs); /* Teste Timer */ if (cs->hw.elsa.timer) { byteout(cs->hw.elsa.trig, 0xff); diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c index c7a94713e9ec98fae0c36d61ffdc40cd27884d70..d63266fa8cbdb93bf37e630af6663f511292786d 100644 --- a/drivers/isdn/hisax/fsm.c +++ b/drivers/isdn/hisax/fsm.c @@ -98,13 +98,11 @@ void FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft) { ft->fi = fi; - ft->tl.function = (void *) FsmExpireTimer; - ft->tl.data = (long) ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "FsmInitTimer %lx", (long) ft); #endif - init_timer(&ft->tl); + setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft); } void diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c index e034ed847ff32ca1fcf0a55e66eebe2171e3d32f..90f051ce02590f3e70ee2e1227d7aa507441a24c 100644 --- a/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/drivers/isdn/hisax/hfc4s8s_l1.c @@ -1396,9 +1396,8 @@ setup_instance(hfc4s8s_hw *hw) l1p = hw->l1 + i; spin_lock_init(&l1p->lock); l1p->hw = hw; - l1p->l1_timer.function = (void *) hfc_l1_timer; - l1p->l1_timer.data = (long) (l1p); - init_timer(&l1p->l1_timer); + setup_timer(&l1p->l1_timer, (void *)hfc_l1_timer, + (long)(l1p)); l1p->st_num = i; skb_queue_head_init(&l1p->d_tx_queue); l1p->d_if.ifc.priv = hw->l1 + i; diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c index a756e5cb6871cbbee83810875bfa29ed8ca93529..ad8597a1a07efd75e1979054fdcda31ecab8b7a6 100644 --- a/drivers/isdn/hisax/hfc_2bds0.c +++ b/drivers/isdn/hisax/hfc_2bds0.c @@ -1073,8 +1073,6 @@ set_cs_func(struct IsdnCardState *cs) cs->writeisacfifo = &dummyf; cs->BC_Read_Reg = &ReadReg; cs->BC_Write_Reg = &WriteReg; - cs->dbusytimer.function = (void *) hfc_dbusy_timer; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)hfc_dbusy_timer, (long)cs); INIT_WORK(&cs->tqueue, hfcd_bh); } diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 90449e1e91e5a27924da01df6215dfa6c326651e..f9ca35cc32b135bc2ae8d31b7f2c87c05d5cfb73 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c @@ -1582,9 +1582,7 @@ inithfcpci(struct IsdnCardState *cs) cs->bcs[1].BC_SetStack = setstack_2b; cs->bcs[0].BC_Close = close_hfcpci; cs->bcs[1].BC_Close = close_hfcpci; - cs->dbusytimer.function = (void *) hfcpci_dbusy_timer; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)hfcpci_dbusy_timer, (long)cs); mode_hfcpci(cs->bcs, 0, 0); mode_hfcpci(cs->bcs + 1, 0, 1); } @@ -1746,9 +1744,7 @@ setup_hfcpci(struct IsdnCard *card) cs->BC_Write_Reg = NULL; cs->irq_func = &hfcpci_interrupt; cs->irq_flags |= IRQF_SHARED; - cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer; - cs->hw.hfcpci.timer.data = (long) cs; - init_timer(&cs->hw.hfcpci.timer); + setup_timer(&cs->hw.hfcpci.timer, (void *)hfcpci_Timer, (long)cs); cs->cardmsg = &hfcpci_card_msg; cs->auxcmd = &hfcpci_auxcmd; diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index 13b2151c10f54ff9fd5bcec76e1ceef2b99aa472..3aef8e1a90e4455c6b1aa1e5eb935c95cd018210 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c @@ -1495,9 +1495,7 @@ int setup_hfcsx(struct IsdnCard *card) } else return (0); /* no valid card type */ - cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)hfcsx_dbusy_timer, (long)cs); INIT_WORK(&cs->tqueue, hfcsx_bh); cs->readisac = NULL; cs->writeisac = NULL; @@ -1507,11 +1505,9 @@ int setup_hfcsx(struct IsdnCard *card) cs->BC_Write_Reg = NULL; cs->irq_func = &hfcsx_interrupt; - cs->hw.hfcsx.timer.function = (void *) hfcsx_Timer; - cs->hw.hfcsx.timer.data = (long) cs; cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */ cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */ - init_timer(&cs->hw.hfcsx.timer); + setup_timer(&cs->hw.hfcsx.timer, (void *)hfcsx_Timer, (long)cs); reset_hfcsx(cs); cs->cardmsg = &hfcsx_card_msg; diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 678bd5224bc338a2767a106fb8057f544f218050..6dbd1f1da14f1e8752caadf6c9f2ca192cc63a3d 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -1165,14 +1165,10 @@ hfc_usb_init(hfcusb_data *hfc) hfc->old_led_state = 0; /* init the t3 timer */ - init_timer(&hfc->t3_timer); - hfc->t3_timer.data = (long) hfc; - hfc->t3_timer.function = (void *) l1_timer_expire_t3; + setup_timer(&hfc->t3_timer, (void *)l1_timer_expire_t3, (long)hfc); /* init the t4 timer */ - init_timer(&hfc->t4_timer); - hfc->t4_timer.data = (long) hfc; - hfc->t4_timer.function = (void *) l1_timer_expire_t4; + setup_timer(&hfc->t4_timer, (void *)l1_timer_expire_t4, (long)hfc); /* init the background machinery for control requests */ hfc->ctrl_read.bRequestType = 0xc0; diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c index 394da646e97b7316e618f054519095f83c338caa..467287096918a6661c93ab552a1759fb83fd45c8 100644 --- a/drivers/isdn/hisax/hfcscard.c +++ b/drivers/isdn/hisax/hfcscard.c @@ -253,9 +253,7 @@ int setup_hfcs(struct IsdnCard *card) outb(0x57, cs->hw.hfcD.addr | 1); } set_cs_func(cs); - cs->hw.hfcD.timer.function = (void *) hfcs_Timer; - cs->hw.hfcD.timer.data = (long) cs; - init_timer(&cs->hw.hfcD.timer); + setup_timer(&cs->hw.hfcD.timer, (void *)hfcs_Timer, (long)cs); cs->cardmsg = &hfcs_card_msg; cs->irq_func = &hfcs_interrupt; return (1); diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c index 96d1df05044fb48ffceb988dd90540db9f125cdd..c7c3797a817ebecc285248985fbb4dedc50d6493 100644 --- a/drivers/isdn/hisax/icc.c +++ b/drivers/isdn/hisax/icc.c @@ -676,7 +676,5 @@ clear_pending_icc_ints(struct IsdnCardState *cs) void setup_icc(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, icc_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs); } diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c index 9cc26b40a43771dee4d670f2416711897bc9ac1a..43effe7082ed9bba9537648eef9d950a0e172785 100644 --- a/drivers/isdn/hisax/ipacx.c +++ b/drivers/isdn/hisax/ipacx.c @@ -424,9 +424,7 @@ dch_init(struct IsdnCardState *cs) cs->setstack_d = dch_setstack; - cs->dbusytimer.function = (void *) dbusy_timer_handler; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs); cs->writeisac(cs, IPACX_TR_CONF0, 0x00); // clear LDD cs->writeisac(cs, IPACX_TR_CONF2, 0x00); // enable transmitter diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c index df7e05ca8f9c197acf94040606555fcd45a51067..4273b4548825136624a3687cae20b971fa3e0244 100644 --- a/drivers/isdn/hisax/isac.c +++ b/drivers/isdn/hisax/isac.c @@ -677,7 +677,5 @@ void clear_pending_isac_ints(struct IsdnCardState *cs) void setup_isac(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, isac_bh); - cs->dbusytimer.function = (void *) dbusy_timer_handler; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs); } diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index f4956c73aa116de71a99a9ae705c81088f3fbed2..0dc60b287c4b000b94f3bb97183ad10bfc1254b9 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1902,10 +1902,8 @@ void initisar(struct IsdnCardState *cs) cs->bcs[1].BC_SetStack = setstack_isar; cs->bcs[0].BC_Close = close_isarstate; cs->bcs[1].BC_Close = close_isarstate; - cs->bcs[0].hw.isar.ftimer.function = (void *) ftimer_handler; - cs->bcs[0].hw.isar.ftimer.data = (long) &cs->bcs[0]; - init_timer(&cs->bcs[0].hw.isar.ftimer); - cs->bcs[1].hw.isar.ftimer.function = (void *) ftimer_handler; - cs->bcs[1].hw.isar.ftimer.data = (long) &cs->bcs[1]; - init_timer(&cs->bcs[1].hw.isar.ftimer); + setup_timer(&cs->bcs[0].hw.isar.ftimer, (void *)ftimer_handler, + (long)&cs->bcs[0]); + setup_timer(&cs->bcs[1].hw.isar.ftimer, (void *)ftimer_handler, + (long)&cs->bcs[1]); } diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c index c754706f83cdc190ca18e299896590430e7b9824..569ce52c567b2beb9f42097bebe88af025d6dbed 100644 --- a/drivers/isdn/hisax/isdnl3.c +++ b/drivers/isdn/hisax/isdnl3.c @@ -169,9 +169,7 @@ void L3InitTimer(struct l3_process *pc, struct L3Timer *t) { t->pc = pc; - t->tl.function = (void *) L3ExpireTimer; - t->tl.data = (long) t; - init_timer(&t->tl); + setup_timer(&t->tl, (void *)L3ExpireTimer, (long)t); } void diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c index bf647545c70c45b9bc5b0e381af27f67f17a3662..950399f066ef109cc558f09e438dc729a0843643 100644 --- a/drivers/isdn/hisax/teleint.c +++ b/drivers/isdn/hisax/teleint.c @@ -278,9 +278,7 @@ int setup_TeleInt(struct IsdnCard *card) cs->bcs[0].hw.hfc.send = NULL; cs->bcs[1].hw.hfc.send = NULL; cs->hw.hfc.fifosize = 7 * 1024 + 512; - cs->hw.hfc.timer.function = (void *) TeleInt_Timer; - cs->hw.hfc.timer.data = (long) cs; - init_timer(&cs->hw.hfc.timer); + setup_timer(&cs->hw.hfc.timer, (void *)TeleInt_Timer, (long)cs); if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) { printk(KERN_WARNING "HiSax: TeleInt config port %x-%x already in use\n", diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index a85895585d906a6367db5cce95b00f031484d05d..c99f0ec58a0189885cdfdea5ae612f73e30c5925 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c @@ -901,9 +901,8 @@ static void initW6692(struct IsdnCardState *cs, int part) if (part & 1) { cs->setstack_d = setstack_W6692; cs->DC_Close = DC_Close_W6692; - cs->dbusytimer.function = (void *) dbusy_timer_handler; - cs->dbusytimer.data = (long) cs; - init_timer(&cs->dbusytimer); + setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, + (long)cs); resetW6692(cs); ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 9c1e8adaf4fc825c54ff84e9c85d36a68ecb5da7..d07dd5196ffca59c11532051fb88e2ecdc7326c9 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -2370,9 +2370,8 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s rs->state = CCPResetIdle; rs->is = is; rs->id = id; - init_timer(&rs->timer); - rs->timer.data = (unsigned long)rs; - rs->timer.function = isdn_ppp_ccp_timer_callback; + setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback, + (unsigned long)rs); is->reset->rs[id] = rs; } return rs; diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 1b169559a240b0c41b9fb8c34d4837ea55d96f77..ddd8207e4e54cf617e76a2f784bc56aeff76b22e 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -1812,9 +1812,8 @@ isdn_tty_modem_init(void) info->isdn_channel = -1; info->drv_index = -1; info->xmit_size = ISDN_SERIAL_XMIT_SIZE; - init_timer(&info->nc_timer); - info->nc_timer.function = isdn_tty_modem_do_ncarrier; - info->nc_timer.data = (unsigned long) info; + setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier, + (unsigned long)info); skb_queue_head_init(&info->xmit_queue); #ifdef CONFIG_ISDN_AUDIO skb_queue_head_init(&info->dtmf_queue); diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 9b85295aa6578f5ac5c86803e0923f29404b2e13..880e9d367a399ae6efc450a4eca70623717b8f51 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1092,9 +1092,7 @@ dspcreate(struct channel_req *crq) ndsp->pcm_bank_tx = -1; ndsp->hfc_conf = -1; /* current conference number */ /* set tone timer */ - ndsp->tone.tl.function = (void *)dsp_tone_timeout; - ndsp->tone.tl.data = (long) ndsp; - init_timer(&ndsp->tone.tl); + setup_timer(&ndsp->tone.tl, (void *)dsp_tone_timeout, (long)ndsp); if (dtmfthreshold < 20 || dtmfthreshold > 500) dtmfthreshold = 200; diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c index 26477d48bbda99cba2697e93f075703047098bab..78fc5d5e90514353b258658da350a85c408b89db 100644 --- a/drivers/isdn/mISDN/fsm.c +++ b/drivers/isdn/mISDN/fsm.c @@ -110,13 +110,11 @@ void mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft) { ft->fi = fi; - ft->tl.function = (void *) FsmExpireTimer; - ft->tl.data = (long) ft; #if FSM_TIMER_DEBUG if (ft->fi->debug) ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft); #endif - init_timer(&ft->tl); + setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft); } EXPORT_SYMBOL(mISDN_FsmInitTimer); diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 6ceca7db62ad42c91c10561a2e6f3330ddbefa2a..6be2041248d34832e0ee9af75d753b96c1ca8df2 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -1443,9 +1443,7 @@ init_card(struct l1oip *hc, int pri, int bundle) hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */ add_timer(&hc->keep_tl); - hc->timeout_tl.function = (void *)l1oip_timeout; - hc->timeout_tl.data = (ulong)hc; - init_timer(&hc->timeout_tl); + setup_timer(&hc->timeout_tl, (void *)l1oip_timeout, (ulong)hc); hc->timeout_on = 0; /* state that we have timer off */ return 0; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 100fbdc9b95c8475a2ab42928254996b1b2beef6..83a1616903f8ab05f9e70419482169de746d0a5b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -355,6 +355,14 @@ config NET_VRF This option enables the support for mapping interfaces into VRF's. The support enables VRF devices. +config VSOCKMON + tristate "Virtual vsock monitoring device" + depends on VHOST_VSOCK + ---help--- + This option enables a monitoring net device for vsock sockets. It is + mostly intended for developers or support to debug vsock issues. If + unsure, say N. + endif # NET_CORE config SUNGEM_PHY diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 98ed4d96987c87fda074a219a234cbf8e48f2b9a..b2f6556d8848e74b0864b8ec291430bd7dc4a2ca 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_MII) += mii.o obj-$(CONFIG_MDIO) += mdio.o obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_NETCONSOLE) += netconsole.o -obj-$(CONFIG_PHYLIB) += phy/ +obj-y += phy/ obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_NET_TEAM) += team/ obj-$(CONFIG_TUN) += tun.o @@ -30,6 +30,7 @@ obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o +obj-$(CONFIG_VSOCKMON) += vsockmon.o # # Networking Drivers diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index edc70ffad6607ac06d0a40b48316bef554c5f4c2..c5fd4259da331b27503644938ab22787e2eea8ae 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -92,6 +92,7 @@ enum ad_link_speed_type { AD_LINK_SPEED_2500MBPS, AD_LINK_SPEED_10000MBPS, AD_LINK_SPEED_20000MBPS, + AD_LINK_SPEED_25000MBPS, AD_LINK_SPEED_40000MBPS, AD_LINK_SPEED_56000MBPS, AD_LINK_SPEED_100000MBPS, @@ -260,6 +261,7 @@ static inline int __check_agg_selection_timer(struct port *port) * %AD_LINK_SPEED_2500MBPS, * %AD_LINK_SPEED_10000MBPS * %AD_LINK_SPEED_20000MBPS + * %AD_LINK_SPEED_25000MBPS * %AD_LINK_SPEED_40000MBPS * %AD_LINK_SPEED_56000MBPS * %AD_LINK_SPEED_100000MBPS @@ -302,6 +304,10 @@ static u16 __get_link_speed(struct port *port) speed = AD_LINK_SPEED_20000MBPS; break; + case SPEED_25000: + speed = AD_LINK_SPEED_25000MBPS; + break; + case SPEED_40000: speed = AD_LINK_SPEED_40000MBPS; break; @@ -707,6 +713,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator) case AD_LINK_SPEED_20000MBPS: bandwidth = nports * 20000; break; + case AD_LINK_SPEED_25000MBPS: + bandwidth = nports * 25000; + break; case AD_LINK_SPEED_40000MBPS: bandwidth = nports * 40000; break; @@ -1052,8 +1061,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->sm_rx_state = AD_RX_INITIALIZE; port->sm_vars |= AD_PORT_CHURNED; /* check if port is not enabled */ - } else if (!(port->sm_vars & AD_PORT_BEGIN) - && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) + } else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled) port->sm_rx_state = AD_RX_PORT_DISABLED; /* check if new lacpdu arrived */ else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || @@ -1081,11 +1089,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) /* if no lacpdu arrived and no timer is on */ switch (port->sm_rx_state) { case AD_RX_PORT_DISABLED: - if (port->sm_vars & AD_PORT_MOVED) - port->sm_rx_state = AD_RX_INITIALIZE; - else if (port->is_enabled - && (port->sm_vars - & AD_PORT_LACP_ENABLED)) + if (port->is_enabled && + (port->sm_vars & AD_PORT_LACP_ENABLED)) port->sm_rx_state = AD_RX_EXPIRED; else if (port->is_enabled && ((port->sm_vars @@ -1115,7 +1120,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->sm_vars &= ~AD_PORT_SELECTED; __record_default(port); port->actor_oper_port_state &= ~AD_STATE_EXPIRED; - port->sm_vars &= ~AD_PORT_MOVED; port->sm_rx_state = AD_RX_PORT_DISABLED; /* Fall Through */ @@ -2442,9 +2446,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave) spin_lock_bh(&slave->bond->mode_lock); ad_update_actor_keys(port, false); + spin_unlock_bh(&slave->bond->mode_lock); netdev_dbg(slave->bond->dev, "Port %d slave %s changed speed/duplex\n", port->actor_port_number, slave->dev->name); - spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2488,12 +2492,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) agg = __get_first_agg(port); ad_agg_selection_logic(agg, &dummy); + spin_unlock_bh(&slave->bond->mode_lock); + netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n", port->actor_port_number, link == BOND_LINK_UP ? "UP" : "DOWN"); - spin_unlock_bh(&slave->bond->mode_lock); - /* RTNL is held and mode_lock is released so it's safe * to update slave_array here. */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c80b023092dde89f6b361e3879e06d70a9d3ad6f..7d7a3cec149a61241715c092a2fff02c0ce30fb0 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -687,7 +687,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) /* the arp must be sent on the selected rx channel */ tx_slave = rlb_choose_channel(skb, bond); if (tx_slave) - ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr); + bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr, + tx_slave->dev->addr_len); netdev_dbg(bond->dev, "Server sent ARP Reply packet\n"); } else if (arp->op_code == htons(ARPOP_REQUEST)) { /* Create an entry in the rx_hashtbl for this client as a @@ -1017,22 +1018,23 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], rcu_read_unlock(); } -static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) +static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], + unsigned int len) { struct net_device *dev = slave->dev; - struct sockaddr s_addr; + struct sockaddr_storage ss; if (BOND_MODE(slave->bond) == BOND_MODE_TLB) { - memcpy(dev->dev_addr, addr, dev->addr_len); + memcpy(dev->dev_addr, addr, len); return 0; } /* for rlb each slave must have a unique hw mac addresses so that * each slave will receive packets destined to a different mac */ - memcpy(s_addr.sa_data, addr, dev->addr_len); - s_addr.sa_family = dev->type; - if (dev_set_mac_address(dev, &s_addr)) { + memcpy(ss.__data, addr, len); + ss.ss_family = dev->type; + if (dev_set_mac_address(dev, (struct sockaddr *)&ss)) { netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n", dev->name); return -EOPNOTSUPP; @@ -1046,11 +1048,14 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) */ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) { - u8 tmp_mac_addr[ETH_ALEN]; + u8 tmp_mac_addr[MAX_ADDR_LEN]; - ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr); - alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr); - alb_set_slave_mac_addr(slave2, tmp_mac_addr); + bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr, + slave1->dev->addr_len); + alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, + slave2->dev->addr_len); + alb_set_slave_mac_addr(slave2, tmp_mac_addr, + slave1->dev->addr_len); } @@ -1177,7 +1182,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /* Try setting slave mac to bond address and fall-through * to code handling that situation below... */ - alb_set_slave_mac_addr(slave, bond->dev->dev_addr); + alb_set_slave_mac_addr(slave, bond->dev->dev_addr, + bond->dev->addr_len); } /* The slave's address is equal to the address of the bond. @@ -1202,7 +1208,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav } if (free_mac_slave) { - alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr); + alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, + free_mac_slave->dev->addr_len); netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", slave->dev->name, free_mac_slave->dev->name); @@ -1234,8 +1241,8 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) { struct slave *slave, *rollback_slave; struct list_head *iter; - struct sockaddr sa; - char tmp_addr[ETH_ALEN]; + struct sockaddr_storage ss; + char tmp_addr[MAX_ADDR_LEN]; int res; if (bond->alb_info.rlb_enabled) @@ -1243,12 +1250,14 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) bond_for_each_slave(bond, slave, iter) { /* save net_device's current hw address */ - ether_addr_copy(tmp_addr, slave->dev->dev_addr); + bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr, + slave->dev->addr_len); res = dev_set_mac_address(slave->dev, addr); /* restore net_device's hw address */ - ether_addr_copy(slave->dev->dev_addr, tmp_addr); + bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr, + slave->dev->addr_len); if (res) goto unwind; @@ -1257,16 +1266,19 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) return 0; unwind: - memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len); - sa.sa_family = bond->dev->type; + memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len); + ss.ss_family = bond->dev->type; /* unwind from head to the slave that failed */ bond_for_each_slave(bond, rollback_slave, iter) { if (rollback_slave == slave) break; - ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr); - dev_set_mac_address(rollback_slave->dev, &sa); - ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr); + bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr, + rollback_slave->dev->addr_len); + dev_set_mac_address(rollback_slave->dev, + (struct sockaddr *)&ss); + bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr, + rollback_slave->dev->addr_len); } return res; @@ -1582,7 +1594,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) { int res; - res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr); + res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, + slave->dev->addr_len); if (res) return res; @@ -1696,17 +1709,20 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave * and thus filter bond->dev_addr's packets, so force bond's mac */ if (BOND_MODE(bond) == BOND_MODE_TLB) { - struct sockaddr sa; - u8 tmp_addr[ETH_ALEN]; + struct sockaddr_storage ss; + u8 tmp_addr[MAX_ADDR_LEN]; - ether_addr_copy(tmp_addr, new_slave->dev->dev_addr); + bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr, + new_slave->dev->addr_len); - memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len); - sa.sa_family = bond->dev->type; + bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, + bond->dev->addr_len); + ss.ss_family = bond->dev->type; /* we don't care if it can't change its mac, best effort */ - dev_set_mac_address(new_slave->dev, &sa); + dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss); - ether_addr_copy(new_slave->dev->dev_addr, tmp_addr); + bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr, + new_slave->dev->addr_len); } /* curr_active_slave must be set before calling alb_swap_mac_addr */ @@ -1716,7 +1732,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave alb_fasten_mac_swap(bond, swap_slave, new_slave); } else { /* set the new_slave to the bond mac address */ - alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); + alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, + bond->dev->addr_len); alb_send_learning_packets(new_slave, bond->dev->dev_addr, false); } @@ -1726,19 +1743,19 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) { struct bonding *bond = netdev_priv(bond_dev); - struct sockaddr *sa = addr; + struct sockaddr_storage *ss = addr; struct slave *curr_active; struct slave *swap_slave; int res; - if (!is_valid_ether_addr(sa->sa_data)) + if (!is_valid_ether_addr(ss->__data)) return -EADDRNOTAVAIL; res = alb_set_mac_address(bond, addr); if (res) return res; - memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len); + bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len); /* If there is no curr_active_slave there is nothing else to do. * Otherwise we'll need to pass the new address to it and handle @@ -1754,7 +1771,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_swap_mac_addr(swap_slave, curr_active); alb_fasten_mac_swap(bond, swap_slave, curr_active); } else { - alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr); + alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr, + bond_dev->addr_len); alb_send_learning_packets(curr_active, bond_dev->dev_addr, false); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 34481c9be1d192137e2dc7e3c8475184dfa3bec0..2be78807fd6e1318487961708394caead76af268 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -201,12 +201,6 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0); unsigned int bond_net_id __read_mostly; -static __be32 arp_target[BOND_MAX_ARP_TARGETS]; -static int arp_ip_count; -static int bond_mode = BOND_MODE_ROUNDROBIN; -static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; -static int lacp_fast; - /*-------------------------- Forward declarations ---------------------------*/ static int bond_init(struct net_device *bond_dev); @@ -371,9 +365,10 @@ int bond_set_carrier(struct bonding *bond) /* Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, set speed and duplex to -1, - * and return. + * and return. Return 1 if speed or duplex settings are + * UNKNOWN; 0 otherwise. */ -static void bond_update_speed_duplex(struct slave *slave) +static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; struct ethtool_link_ksettings ecmd; @@ -384,23 +379,21 @@ static void bond_update_speed_duplex(struct slave *slave) res = __ethtool_get_link_ksettings(slave_dev, &ecmd); if (res < 0) - return; - + return 1; if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) - return; - + return 1; switch (ecmd.base.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; default: - return; + return 1; } slave->speed = ecmd.base.speed; slave->duplex = ecmd.base.duplex; - return; + return 0; } const char *bond_slave_link_status(s8 link) @@ -652,8 +645,8 @@ static void bond_do_fail_over_mac(struct bonding *bond, struct slave *new_active, struct slave *old_active) { - u8 tmp_mac[ETH_ALEN]; - struct sockaddr saddr; + u8 tmp_mac[MAX_ADDR_LEN]; + struct sockaddr_storage ss; int rv; switch (bond->params.fail_over_mac) { @@ -673,16 +666,20 @@ static void bond_do_fail_over_mac(struct bonding *bond, old_active = bond_get_old_active(bond, new_active); if (old_active) { - ether_addr_copy(tmp_mac, new_active->dev->dev_addr); - ether_addr_copy(saddr.sa_data, - old_active->dev->dev_addr); - saddr.sa_family = new_active->dev->type; + bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, + new_active->dev->addr_len); + bond_hw_addr_copy(ss.__data, + old_active->dev->dev_addr, + old_active->dev->addr_len); + ss.ss_family = new_active->dev->type; } else { - ether_addr_copy(saddr.sa_data, bond->dev->dev_addr); - saddr.sa_family = bond->dev->type; + bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, + bond->dev->addr_len); + ss.ss_family = bond->dev->type; } - rv = dev_set_mac_address(new_active->dev, &saddr); + rv = dev_set_mac_address(new_active->dev, + (struct sockaddr *)&ss); if (rv) { netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); @@ -692,10 +689,12 @@ static void bond_do_fail_over_mac(struct bonding *bond, if (!old_active) goto out; - ether_addr_copy(saddr.sa_data, tmp_mac); - saddr.sa_family = old_active->dev->type; + bond_hw_addr_copy(ss.__data, tmp_mac, + new_active->dev->addr_len); + ss.ss_family = old_active->dev->type; - rv = dev_set_mac_address(old_active->dev, &saddr); + rv = dev_set_mac_address(old_active->dev, + (struct sockaddr *)&ss); if (rv) netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); @@ -1177,6 +1176,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } + /* don't change skb->dev for link-local packets */ + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + return RX_HANDLER_PASS; if (bond_should_deliver_exact_match(skb, slave, bond)) return RX_HANDLER_EXACT; @@ -1191,7 +1193,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) kfree_skb(skb); return RX_HANDLER_CONSUMED; } - ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr); + bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, + bond->dev->addr_len); } return ret; @@ -1330,7 +1333,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL, *prev_slave; - struct sockaddr addr; + struct sockaddr_storage ss; int link_reporting; int res = 0, i; @@ -1481,16 +1484,17 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * that need it, and for restoring it upon release, and then * set it to the master's address */ - ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr); + bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr, + slave_dev->addr_len); if (!bond->params.fail_over_mac || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* Set slave to master's mac address. The application already * set the master's mac address to that of the first slave */ - memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); - addr.sa_family = slave_dev->type; - res = dev_set_mac_address(slave_dev, &addr); + memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); + ss.ss_family = slave_dev->type; + res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); if (res) { netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res); goto err_restore_mtu; @@ -1565,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->delay = 0; new_slave->link_failure_count = 0; - bond_update_speed_duplex(new_slave); + if (bond_update_speed_duplex(new_slave)) + new_slave->link = BOND_LINK_DOWN; new_slave->last_rx = jiffies - (msecs_to_jiffies(bond->params.arp_interval) + 1); @@ -1773,9 +1778,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) * MAC if this slave's MAC is in use by the bond, or at * least print a warning. */ - ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); + bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr, + new_slave->dev->addr_len); + ss.ss_family = slave_dev->type; + dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); } err_restore_mtu: @@ -1818,7 +1824,7 @@ static int __bond_release_one(struct net_device *bond_dev, { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; - struct sockaddr addr; + struct sockaddr_storage ss; int old_flags = bond_dev->flags; netdev_features_t old_features = bond_dev->features; @@ -1953,9 +1959,10 @@ static int __bond_release_one(struct net_device *bond_dev, if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* restore original ("permanent") mac address */ - ether_addr_copy(addr.sa_data, slave->perm_hwaddr); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); + bond_hw_addr_copy(ss.__data, slave->perm_hwaddr, + slave->dev->addr_len); + ss.ss_family = slave_dev->type; + dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); } dev_set_mtu(slave_dev, slave->original_mtu); @@ -2039,8 +2046,7 @@ static int bond_miimon_inspect(struct bonding *bond) if (link_state) continue; - bond_set_slave_link_state(slave, BOND_LINK_FAIL, - BOND_SLAVE_NOTIFY_LATER); + bond_propose_link_state(slave, BOND_LINK_FAIL); slave->delay = bond->params.downdelay; if (slave->delay) { netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", @@ -2055,13 +2061,13 @@ static int bond_miimon_inspect(struct bonding *bond) case BOND_LINK_FAIL: if (link_state) { /* recovered before downdelay expired */ - bond_set_slave_link_state(slave, BOND_LINK_UP, - BOND_SLAVE_NOTIFY_LATER); + bond_propose_link_state(slave, BOND_LINK_UP); slave->last_link_up = jiffies; netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", (bond->params.downdelay - slave->delay) * bond->params.miimon, slave->dev->name); + commit++; continue; } @@ -2078,8 +2084,7 @@ static int bond_miimon_inspect(struct bonding *bond) if (!link_state) continue; - bond_set_slave_link_state(slave, BOND_LINK_BACK, - BOND_SLAVE_NOTIFY_LATER); + bond_propose_link_state(slave, BOND_LINK_BACK); slave->delay = bond->params.updelay; if (slave->delay) { @@ -2092,14 +2097,12 @@ static int bond_miimon_inspect(struct bonding *bond) /*FALLTHRU*/ case BOND_LINK_BACK: if (!link_state) { - bond_set_slave_link_state(slave, - BOND_LINK_DOWN, - BOND_SLAVE_NOTIFY_LATER); + bond_propose_link_state(slave, BOND_LINK_DOWN); netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", (bond->params.updelay - slave->delay) * bond->params.miimon, slave->dev->name); - + commit++; continue; } @@ -2132,7 +2135,13 @@ static void bond_miimon_commit(struct bonding *bond) continue; case BOND_LINK_UP: - bond_update_speed_duplex(slave); + if (bond_update_speed_duplex(slave)) { + slave->link = BOND_LINK_DOWN; + netdev_warn(bond->dev, + "failed to get link speed/duplex for %s\n", + slave->dev->name); + continue; + } bond_set_slave_link_state(slave, BOND_LINK_UP, BOND_SLAVE_NOTIFY_NOW); slave->last_link_up = jiffies; @@ -2231,6 +2240,8 @@ static void bond_mii_monitor(struct work_struct *work) mii_work.work); bool should_notify_peers = false; unsigned long delay; + struct slave *slave; + struct list_head *iter; delay = msecs_to_jiffies(bond->params.miimon); @@ -2251,6 +2262,9 @@ static void bond_mii_monitor(struct work_struct *work) goto re_arm; } + bond_for_each_slave(bond, slave, iter) { + bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); + } bond_miimon_commit(bond); rtnl_unlock(); /* might sleep, hold no other locks */ @@ -2575,10 +2589,8 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, * arp is transmitted to generate traffic. see activebackup_arp_monitor for * arp monitoring in active backup mode. */ -static void bond_loadbalance_arp_mon(struct work_struct *work) +static void bond_loadbalance_arp_mon(struct bonding *bond) { - struct bonding *bond = container_of(work, struct bonding, - arp_work.work); struct slave *slave, *oldcurrent; struct list_head *iter; int do_failover = 0, slave_state_changed = 0; @@ -2916,10 +2928,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } -static void bond_activebackup_arp_mon(struct work_struct *work) +static void bond_activebackup_arp_mon(struct bonding *bond) { - struct bonding *bond = container_of(work, struct bonding, - arp_work.work); bool should_notify_peers = false; bool should_notify_rtnl = false; int delta_in_ticks; @@ -2972,6 +2982,17 @@ static void bond_activebackup_arp_mon(struct work_struct *work) } } +static void bond_arp_monitor(struct work_struct *work) +{ + struct bonding *bond = container_of(work, struct bonding, + arp_work.work); + + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) + bond_activebackup_arp_mon(bond); + else + bond_loadbalance_arp_mon(bond); +} + /*-------------------------- netdev event handling --------------------------*/ /* Change device name */ @@ -3222,16 +3243,13 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) /*-------------------------- Device entry points ----------------------------*/ -static void bond_work_init_all(struct bonding *bond) +void bond_work_init_all(struct bonding *bond) { INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); - if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) - INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); - else - INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); + INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); } @@ -3266,8 +3284,6 @@ static int bond_open(struct net_device *bond_dev) } } - bond_work_init_all(bond); - if (bond_is_lb(bond)) { /* bond_alb_initialize must be called before the timer * is started. @@ -3327,12 +3343,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { u64 nv = new[i]; u64 ov = old[i]; + s64 delta = nv - ov; /* detects if this particular field is 32bit only */ if (((nv | ov) >> 32) == 0) - res[i] += (u32)nv - (u32)ov; - else - res[i] += nv - ov; + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; } } @@ -3619,7 +3640,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *rollback_slave; - struct sockaddr *sa = addr, tmp_sa; + struct sockaddr_storage *ss = addr, tmp_ss; struct list_head *iter; int res = 0; @@ -3636,7 +3657,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) return 0; - if (!is_valid_ether_addr(sa->sa_data)) + if (!is_valid_ether_addr(ss->__data)) return -EADDRNOTAVAIL; bond_for_each_slave(bond, slave, iter) { @@ -3655,12 +3676,12 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) } /* success */ - memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len); + memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len); return 0; unwind: - memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len); - tmp_sa.sa_family = bond_dev->type; + memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len); + tmp_ss.ss_family = bond_dev->type; /* unwind from head to the slave that failed */ bond_for_each_slave(bond, rollback_slave, iter) { @@ -3669,7 +3690,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) if (rollback_slave == slave) break; - tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa); + tmp_res = dev_set_mac_address(rollback_slave->dev, + (struct sockaddr *)&tmp_ss); if (tmp_res) { netdev_dbg(bond_dev, "unwind err %d dev %s\n", tmp_res, rollback_slave->dev->name); @@ -4252,6 +4274,12 @@ static int bond_check_params(struct bond_params *params) int arp_all_targets_value; u16 ad_actor_sys_prio = 0; u16 ad_user_port_key = 0; + __be32 arp_target[BOND_MAX_ARP_TARGETS]; + int arp_ip_count; + int bond_mode = BOND_MODE_ROUNDROBIN; + int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; + int lacp_fast = 0; + int tlb_dynamic_lb = 0; /* Convert string parameters. */ if (mode) { @@ -4564,6 +4592,17 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; + if (bond_mode == BOND_MODE_TLB) { + bond_opt_initstr(&newval, "default"); + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), + &newval); + if (!valptr) { + pr_err("Error: No tlb_dynamic_lb default value"); + return -EINVAL; + } + tlb_dynamic_lb = valptr->value; + } + if (lp_interval == 0) { pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL); @@ -4591,7 +4630,7 @@ static int bond_check_params(struct bond_params *params) params->min_links = min_links; params->lp_interval = lp_interval; params->packets_per_slave = packets_per_slave; - params->tlb_dynamic_lb = 1; /* Default value */ + params->tlb_dynamic_lb = tlb_dynamic_lb; params->ad_actor_sys_prio = ad_actor_sys_prio; eth_zero_addr(params->ad_actor_system); params->ad_user_port_key = ad_user_port_key; @@ -4687,6 +4726,8 @@ int bond_create(struct net *net, const char *name) netif_carrier_off(bond_dev); + bond_work_init_all(bond); + rtnl_unlock(); if (res < 0) bond_destructor(bond_dev); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b8df0f5e8c25ae35b1ce368b04536c40761a1c96..c502c139d3bc6b50eafddc257a60e43ae8100ed0 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -449,6 +449,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, err = register_netdevice(bond_dev); netif_carrier_off(bond_dev); + if (!err) { + struct bonding *bond = netdev_priv(bond_dev); + + bond_work_init_all(bond); + } return err; } diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index f514fe5e80a5369a6cc8f2bf3e293a3f09cb8b4f..d8d4ada034b7d6cbddbdf8f227cfe44fa15b4b27 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -183,7 +183,8 @@ static void bond_info_show_slave(struct seq_file *seq, seq_printf(seq, "Link Failure Count: %u\n", slave->link_failure_count); - seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); + seq_printf(seq, "Permanent HW addr: %*phC\n", + slave->dev->addr_len, slave->perm_hwaddr); seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); if (BOND_MODE(bond) == BOND_MODE_8023AD) { diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 22570ea3a8d217c13fde588a014b4c8010eb28da..ac4ff394bc569273403cd7219dd2d06c62f2f371 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -9,6 +9,24 @@ config CAN_VCAN This driver can also be built as a module. If so, the module will be called vcan. +config CAN_VXCAN + tristate "Virtual CAN Tunnel (vxcan)" + ---help--- + Similar to the virtual ethernet driver veth, vxcan implements a + local CAN traffic tunnel between two virtual CAN network devices. + When creating a vxcan, two vxcan devices are created as pair. + When one end receives the packet it appears on its pair and vice + versa. The vxcan can be used for cross namespace communication. + + In opposite to vcan loopback devices the vxcan only forwards CAN + frames to its pair and does *not* provide a local echo of sent + CAN frames. To disable a potential echo in af_can.c the vxcan driver + announces IFF_ECHO in the interface flags. To have a clean start + in each namespace the CAN GW hop counter is set to zero. + + This driver can also be built as a module. If so, the module + will be called vxcan. + config CAN_SLCAN tristate "Serial / USB serial CAN Adaptors (slcan)" depends on TTY @@ -142,6 +160,7 @@ source "drivers/net/can/cc770/Kconfig" source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" +source "drivers/net/can/peak_canfd/Kconfig" source "drivers/net/can/rcar/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 0da4f2f5c7e3eaef08f9a3ee3e5a0ab8081a80a9..4aabbee133b898a6f30aec7626f6580c12e24054 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_CAN_VCAN) += vcan.o +obj-$(CONFIG_CAN_VXCAN) += vxcan.o obj-$(CONFIG_CAN_SLCAN) += slcan.o obj-$(CONFIG_CAN_DEV) += can-dev.o @@ -26,6 +27,7 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ +obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 7a6554efd42bfb10dbbfdf67d405507fa46488e8..bf8fdaeb955e620e169cc74995e6736bfeea807c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -23,7 +23,7 @@ #include #include #include - +#include #include /* napi related */ @@ -37,17 +37,19 @@ enum m_can_reg { M_CAN_CREL = 0x0, M_CAN_ENDN = 0x4, M_CAN_CUST = 0x8, - M_CAN_FBTP = 0xc, + M_CAN_DBTP = 0xc, M_CAN_TEST = 0x10, M_CAN_RWD = 0x14, M_CAN_CCCR = 0x18, - M_CAN_BTP = 0x1c, + M_CAN_NBTP = 0x1c, M_CAN_TSCC = 0x20, M_CAN_TSCV = 0x24, M_CAN_TOCC = 0x28, M_CAN_TOCV = 0x2c, M_CAN_ECR = 0x40, M_CAN_PSR = 0x44, +/* TDCR Register only available for version >=3.1.x */ + M_CAN_TDCR = 0x48, M_CAN_IR = 0x50, M_CAN_IE = 0x54, M_CAN_ILS = 0x58, @@ -105,21 +107,29 @@ enum m_can_mram_cfg { MRAM_CFG_NUM, }; -/* Fast Bit Timing & Prescaler Register (FBTP) */ -#define FBTR_FBRP_MASK 0x1f -#define FBTR_FBRP_SHIFT 16 -#define FBTR_FTSEG1_SHIFT 8 -#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) -#define FBTR_FTSEG2_SHIFT 4 -#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) -#define FBTR_FSJW_SHIFT 0 -#define FBTR_FSJW_MASK 0x3 +/* Core Release Register (CREL) */ +#define CREL_REL_SHIFT 28 +#define CREL_REL_MASK (0xF << CREL_REL_SHIFT) +#define CREL_STEP_SHIFT 24 +#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT) +#define CREL_SUBSTEP_SHIFT 20 +#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT) + +/* Data Bit Timing & Prescaler Register (DBTP) */ +#define DBTP_TDC BIT(23) +#define DBTP_DBRP_SHIFT 16 +#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT) +#define DBTP_DTSEG1_SHIFT 8 +#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT) +#define DBTP_DTSEG2_SHIFT 4 +#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT) +#define DBTP_DSJW_SHIFT 0 +#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT) /* Test Register (TEST) */ -#define TEST_LBCK BIT(4) +#define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ -#define CCCR_TEST BIT(7) #define CCCR_CMR_MASK 0x3 #define CCCR_CMR_SHIFT 10 #define CCCR_CMR_CANFD 0x1 @@ -130,21 +140,32 @@ enum m_can_mram_cfg { #define CCCR_CME_CAN 0 #define CCCR_CME_CANFD 0x1 #define CCCR_CME_CANFD_BRS 0x2 +#define CCCR_TXP BIT(14) #define CCCR_TEST BIT(7) #define CCCR_MON BIT(5) +#define CCCR_CSR BIT(4) +#define CCCR_CSA BIT(3) +#define CCCR_ASM BIT(2) #define CCCR_CCE BIT(1) #define CCCR_INIT BIT(0) #define CCCR_CANFD 0x10 - -/* Bit Timing & Prescaler Register (BTP) */ -#define BTR_BRP_MASK 0x3ff -#define BTR_BRP_SHIFT 16 -#define BTR_TSEG1_SHIFT 8 -#define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) -#define BTR_TSEG2_SHIFT 4 -#define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) -#define BTR_SJW_SHIFT 0 -#define BTR_SJW_MASK 0xf +/* for version >=3.1.x */ +#define CCCR_EFBI BIT(13) +#define CCCR_PXHD BIT(12) +#define CCCR_BRSE BIT(9) +#define CCCR_FDOE BIT(8) +/* only for version >=3.2.x */ +#define CCCR_NISO BIT(15) + +/* Nominal Bit Timing & Prescaler Register (NBTP) */ +#define NBTP_NSJW_SHIFT 25 +#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT) +#define NBTP_NBRP_SHIFT 16 +#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT) +#define NBTP_NTSEG1_SHIFT 8 +#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT) +#define NBTP_NTSEG2_SHIFT 0 +#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT) /* Error Counter Register(ECR) */ #define ECR_RP BIT(15) @@ -161,6 +182,13 @@ enum m_can_mram_cfg { /* Interrupt Register(IR) */ #define IR_ALL_INT 0xffffffff + +/* Renamed bits for versions > 3.1.x */ +#define IR_ARA BIT(29) +#define IR_PED BIT(28) +#define IR_PEA BIT(27) + +/* Bits for version 3.0.x */ #define IR_STE BIT(31) #define IR_FOE BIT(30) #define IR_ACKE BIT(29) @@ -194,33 +222,40 @@ enum m_can_mram_cfg { #define IR_RF0W BIT(1) #define IR_RF0N BIT(0) #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) -#define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) -#define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ + +/* Interrupts for version 3.0.x */ +#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ IR_RF1L | IR_RF0L) -#define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) +#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) +/* Interrupts for version >= 3.1.x */ +#define IR_ERR_LEC_31X (IR_PED | IR_PEA) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ + IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ + IR_RF1L | IR_RF0L) +#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) /* Interrupt Line Select (ILS) */ #define ILS_ALL_INT0 0x0 #define ILS_ALL_INT1 0xFFFFFFFF /* Interrupt Line Enable (ILE) */ -#define ILE_EINT0 BIT(0) #define ILE_EINT1 BIT(1) +#define ILE_EINT0 BIT(0) /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ -#define RXFC_FWM_OFF 24 -#define RXFC_FWM_MASK 0x7f -#define RXFC_FWM_1 (1 << RXFC_FWM_OFF) -#define RXFC_FS_OFF 16 -#define RXFC_FS_MASK 0x7f +#define RXFC_FWM_SHIFT 24 +#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) +#define RXFC_FS_SHIFT 16 +#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ #define RXFS_RFL BIT(25) #define RXFS_FF BIT(24) -#define RXFS_FPI_OFF 16 +#define RXFS_FPI_SHIFT 16 #define RXFS_FPI_MASK 0x3f0000 -#define RXFS_FGI_OFF 8 +#define RXFS_FGI_SHIFT 8 #define RXFS_FGI_MASK 0x3f00 #define RXFS_FFL_MASK 0x7f @@ -229,23 +264,46 @@ enum m_can_mram_cfg { #define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ -#define TXBC_NDTB_OFF 16 -#define TXBC_NDTB_MASK 0x3f +#define TXBC_NDTB_SHIFT 16 +#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT) +#define TXBC_TFQS_SHIFT 24 +#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT) + +/* Tx FIFO/Queue Status (TXFQS) */ +#define TXFQS_TFQF BIT(21) +#define TXFQS_TFQPI_SHIFT 16 +#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT) +#define TXFQS_TFGI_SHIFT 8 +#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT) +#define TXFQS_TFFL_SHIFT 0 +#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT) /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 #define TXESC_TBDS_64BYTES 0x7 -/* Tx Event FIFO Con.guration (TXEFC) */ -#define TXEFC_EFS_OFF 16 -#define TXEFC_EFS_MASK 0x3f +/* Tx Event FIFO Configuration (TXEFC) */ +#define TXEFC_EFS_SHIFT 16 +#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT) + +/* Tx Event FIFO Status (TXEFS) */ +#define TXEFS_TEFL BIT(25) +#define TXEFS_EFF BIT(24) +#define TXEFS_EFGI_SHIFT 8 +#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT) +#define TXEFS_EFFL_SHIFT 0 +#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT) + +/* Tx Event FIFO Acknowledge (TXEFA) */ +#define TXEFA_EFAI_SHIFT 0 +#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT) /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 #define RXF0_ELEMENT_SIZE 72 #define RXF1_ELEMENT_SIZE 72 -#define RXB_ELEMENT_SIZE 16 +#define RXB_ELEMENT_SIZE 72 #define TXE_ELEMENT_SIZE 8 #define TXB_ELEMENT_SIZE 72 @@ -261,13 +319,25 @@ enum m_can_mram_cfg { #define RX_BUF_RTR BIT(29) /* R1 */ #define RX_BUF_ANMF BIT(31) -#define RX_BUF_EDL BIT(21) +#define RX_BUF_FDF BIT(21) #define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ -/* R0 */ +/* T0 */ +#define TX_BUF_ESI BIT(31) #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) +/* T1 */ +#define TX_BUF_EFC BIT(23) +#define TX_BUF_FDF BIT(21) +#define TX_BUF_BRS BIT(20) +#define TX_BUF_MM_SHIFT 24 +#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT) + +/* Tx event FIFO Element */ +/* E1 */ +#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT +#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) /* address offset and element number for each FIFO/Buffer in the Message RAM */ struct mram_cfg { @@ -285,6 +355,7 @@ struct m_can_priv { struct clk *cclk; void __iomem *base; u32 irqstatus; + int version; /* message ram configuration */ void __iomem *mram_base; @@ -316,6 +387,18 @@ static inline void m_can_fifo_write(const struct m_can_priv *priv, fpi * TXB_ELEMENT_SIZE + offset); } +static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv, + u32 fgi, + u32 offset) { + return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off + + fgi * TXE_ELEMENT_SIZE + offset); +} + +static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv) +{ + return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF); +} + static inline void m_can_config_endisable(const struct m_can_priv *priv, bool enable) { @@ -349,7 +432,8 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) { - m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); + /* Only interrupt line 0 is used in this driver */ + m_can_write(priv, M_CAN_ILE, ILE_EINT0); } static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) @@ -367,9 +451,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) int i; /* calculate the fifo get index for where to read data */ - fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT; dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - if (dlc & RX_BUF_EDL) + if (dlc & RX_BUF_FDF) skb = alloc_canfd_skb(dev, &cf); else skb = alloc_can_skb(dev, (struct can_frame **)&cf); @@ -378,7 +462,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) return; } - if (dlc & RX_BUF_EDL) + if (dlc & RX_BUF_FDF) cf->len = can_dlc2len((dlc >> 16) & 0x0F); else cf->len = get_can_dlc((dlc >> 16) & 0x0F); @@ -394,7 +478,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) netdev_dbg(dev, "ESI Error\n"); } - if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { + if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { if (dlc & RX_BUF_BRS) @@ -532,7 +616,7 @@ static int __m_can_get_berr_counter(const struct net_device *dev, ecr = m_can_read(priv, M_CAN_ECR); bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = ecr & ECR_TEC_MASK; + bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT; return 0; } @@ -723,7 +807,7 @@ static int m_can_poll(struct napi_struct *napi, int quota) if (irqstatus & IR_ERR_STATE) work_done += m_can_handle_state_errors(dev, psr); - if (irqstatus & IR_ERR_BUS) + if (irqstatus & IR_ERR_BUS_30X) work_done += m_can_handle_bus_errors(dev, irqstatus, psr); if (irqstatus & IR_RF0N) @@ -738,6 +822,44 @@ static int m_can_poll(struct napi_struct *napi, int quota) return work_done; } +static void m_can_echo_tx_event(struct net_device *dev) +{ + u32 txe_count = 0; + u32 m_can_txefs; + u32 fgi = 0; + int i = 0; + unsigned int msg_mark; + + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + + /* read tx event fifo status */ + m_can_txefs = m_can_read(priv, M_CAN_TXEFS); + + /* Get Tx Event fifo element count */ + txe_count = (m_can_txefs & TXEFS_EFFL_MASK) + >> TXEFS_EFFL_SHIFT; + + /* Get and process all sent elements */ + for (i = 0; i < txe_count; i++) { + /* retrieve get index */ + fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK) + >> TXEFS_EFGI_SHIFT; + + /* get message marker */ + msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) & + TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; + + /* ack txe element */ + m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK & + (fgi << TXEFA_EFAI_SHIFT))); + + /* update stats */ + stats->tx_bytes += can_get_echo_skb(dev, msg_mark); + stats->tx_packets++; + } +} + static irqreturn_t m_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; @@ -758,24 +880,35 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) * - state change IRQ * - bus error IRQ and bus error reporting */ - if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { + if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { priv->irqstatus = ir; m_can_disable_all_interrupts(priv); napi_schedule(&priv->napi); } - /* transmission complete interrupt */ - if (ir & IR_TC) { - stats->tx_bytes += can_get_echo_skb(dev, 0); - stats->tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); - netif_wake_queue(dev); + if (priv->version == 30) { + if (ir & IR_TC) { + /* Transmission Complete Interrupt*/ + stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); + netif_wake_queue(dev); + } + } else { + if (ir & IR_TEFN) { + /* New TX FIFO Element arrived */ + m_can_echo_tx_event(dev); + can_led_event(dev, CAN_LED_EVENT_TX); + if (netif_queue_stopped(dev) && + !m_can_tx_fifo_full(priv)) + netif_wake_queue(dev); + } } return IRQ_HANDLED; } -static const struct can_bittiming_const m_can_bittiming_const = { +static const struct can_bittiming_const m_can_bittiming_const_30X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 64, @@ -787,7 +920,7 @@ static const struct can_bittiming_const m_can_bittiming_const = { .brp_inc = 1, }; -static const struct can_bittiming_const m_can_data_bittiming_const = { +static const struct can_bittiming_const m_can_data_bittiming_const_30X = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 16, @@ -799,6 +932,30 @@ static const struct can_bittiming_const m_can_data_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const m_can_bittiming_const_31X = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 256, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_31X = { + .name = KBUILD_MODNAME, + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 32, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); @@ -811,19 +968,19 @@ static int m_can_set_bittiming(struct net_device *dev) sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; - reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | - (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); - m_can_write(priv, M_CAN_BTP, reg_btp); + reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) | + (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT); + m_can_write(priv, M_CAN_NBTP, reg_btp); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { brp = dbt->brp - 1; sjw = dbt->sjw - 1; tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; tseg2 = dbt->phase_seg2 - 1; - reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | - (tseg1 << FBTR_FTSEG1_SHIFT) | - (tseg2 << FBTR_FTSEG2_SHIFT); - m_can_write(priv, M_CAN_FBTP, reg_btp); + reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) | + (tseg1 << DBTP_DTSEG1_SHIFT) | + (tseg2 << DBTP_DTSEG2_SHIFT); + m_can_write(priv, M_CAN_DBTP, reg_btp); } return 0; @@ -834,6 +991,7 @@ static int m_can_set_bittiming(struct net_device *dev) * - configure rx fifo * - accept non-matching frame into fifo 0 * - configure tx buffer + * - >= v3.1.x: TX FIFO is used * - configure mode * - setup bittiming */ @@ -850,49 +1008,89 @@ static void m_can_chip_config(struct net_device *dev) /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); - /* only support one Tx Buffer currently */ - m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | - priv->mcfg[MRAM_TXB].off); + if (priv->version == 30) { + /* only support one Tx Buffer currently */ + m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | + priv->mcfg[MRAM_TXB].off); + } else { + /* TX FIFO is used for newer IP Core versions */ + m_can_write(priv, M_CAN_TXBC, + (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | + (priv->mcfg[MRAM_TXB].off)); + } /* support 64 bytes payload */ m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); - m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | - priv->mcfg[MRAM_TXE].off); + /* TX Event FIFO */ + if (priv->version == 30) { + m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | + priv->mcfg[MRAM_TXE].off); + } else { + /* Full TX Event FIFO is used */ + m_can_write(priv, M_CAN_TXEFC, + ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) + & TXEFC_EFS_MASK) | + priv->mcfg[MRAM_TXE].off); + } /* rx fifo configuration, blocking mode, fifo size 1 */ m_can_write(priv, M_CAN_RXF0C, - (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | - RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); + (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | + priv->mcfg[MRAM_RXF0].off); m_can_write(priv, M_CAN_RXF1C, - (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | - RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); + (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | + priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | - (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; + if (priv->version == 30) { + /* Version 3.0.x */ - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - cccr |= CCCR_MON; + cccr &= ~(CCCR_TEST | CCCR_MON | + (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + + } else { + /* Version 3.1.x or 3.2.x */ + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); + /* Only 3.2.x has NISO Bit implemented */ + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + cccr |= CCCR_NISO; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= (CCCR_BRSE | CCCR_FDOE); + } + + /* Loopback Mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { - cccr |= CCCR_TEST; + cccr |= CCCR_TEST | CCCR_MON; test |= TEST_LBCK; } - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) - cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + /* Enable Monitoring (all versions) */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cccr |= CCCR_MON; + /* Write config */ m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); - /* enable interrupts */ + /* Enable interrupts */ m_can_write(priv, M_CAN_IR, IR_ALL_INT); if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) - m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); + if (priv->version == 30) + m_can_write(priv, M_CAN_IE, IR_ALL_INT & + ~(IR_ERR_LEC_30X)); + else + m_can_write(priv, M_CAN_IE, IR_ALL_INT & + ~(IR_ERR_LEC_31X)); else m_can_write(priv, M_CAN_IE, IR_ALL_INT); @@ -936,33 +1134,140 @@ static void free_m_can_dev(struct net_device *dev) free_candev(dev); } -static struct net_device *alloc_m_can_dev(void) +/* Checks core release number of M_CAN + * returns 0 if an unsupported device is detected + * else it returns the release and step coded as: + * return value = 10 * + 1 * + */ +static int m_can_check_core_release(void __iomem *m_can_base) +{ + u32 crel_reg; + u8 rel; + u8 step; + int res; + struct m_can_priv temp_priv = { + .base = m_can_base + }; + + /* Read Core Release Version and split into version number + * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; + */ + crel_reg = m_can_read(&temp_priv, M_CAN_CREL); + rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT); + step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT); + + if (rel == 3) { + /* M_CAN v3.x.y: create return value */ + res = 30 + step; + } else { + /* Unsupported M_CAN version */ + res = 0; + } + + return res; +} + +/* Selectable Non ISO support only in version 3.2.x + * This function checks if the bit is writable. + */ +static bool m_can_niso_supported(const struct m_can_priv *priv) +{ + u32 cccr_reg, cccr_poll; + int niso_timeout; + + m_can_config_endisable(priv, true); + cccr_reg = m_can_read(priv, M_CAN_CCCR); + cccr_reg |= CCCR_NISO; + m_can_write(priv, M_CAN_CCCR, cccr_reg); + + niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll, + (cccr_poll == cccr_reg), 0, 10); + + /* Clear NISO */ + cccr_reg &= ~(CCCR_NISO); + m_can_write(priv, M_CAN_CCCR, cccr_reg); + + m_can_config_endisable(priv, false); + + /* return false if time out (-ETIMEDOUT), else return true */ + return !niso_timeout; +} + +static struct net_device *alloc_m_can_dev(struct platform_device *pdev, + void __iomem *addr, u32 tx_fifo_size) { struct net_device *dev; struct m_can_priv *priv; + int m_can_version; + unsigned int echo_buffer_count; + + m_can_version = m_can_check_core_release(addr); + /* return if unsupported version */ + if (!m_can_version) { + dev = NULL; + goto return_dev; + } - dev = alloc_candev(sizeof(*priv), 1); - if (!dev) - return NULL; + /* If version < 3.1.x, then only one echo buffer is used */ + echo_buffer_count = ((m_can_version == 30) + ? 1U + : (unsigned int)tx_fifo_size); + dev = alloc_candev(sizeof(*priv), echo_buffer_count); + if (!dev) { + dev = NULL; + goto return_dev; + } priv = netdev_priv(dev); netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + /* Shared properties of all M_CAN versions */ + priv->version = m_can_version; priv->dev = dev; - priv->can.bittiming_const = &m_can_bittiming_const; - priv->can.data_bittiming_const = &m_can_data_bittiming_const; + priv->base = addr; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; - /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - - /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ + /* Set M_CAN supported operations */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD; + /* Set properties depending on M_CAN version */ + switch (priv->version) { + case 30: + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + priv->can.bittiming_const = &m_can_bittiming_const_30X; + priv->can.data_bittiming_const = + &m_can_data_bittiming_const_30X; + break; + case 31: + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + priv->can.bittiming_const = &m_can_bittiming_const_31X; + priv->can.data_bittiming_const = + &m_can_data_bittiming_const_31X; + break; + case 32: + priv->can.bittiming_const = &m_can_bittiming_const_31X; + priv->can.data_bittiming_const = + &m_can_data_bittiming_const_31X; + priv->can.ctrlmode_supported |= (m_can_niso_supported(priv) + ? CAN_CTRLMODE_FD_NON_ISO + : 0); + break; + default: + /* Unsupported device: free candev */ + free_m_can_dev(dev); + dev_err(&pdev->dev, "Unsupported version number: %2d", + priv->version); + dev = NULL; + break; + } + +return_dev: return dev; } @@ -1040,19 +1345,34 @@ static int m_can_close(struct net_device *dev) return 0; } +static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) +{ + struct m_can_priv *priv = netdev_priv(dev); + /*get wrap around for loopback skb index */ + unsigned int wrap = priv->can.echo_skb_max; + int next_idx; + + /* calculate next index */ + next_idx = (++putidx >= wrap ? 0 : putidx); + + /* check if occupied */ + return !!priv->can.echo_skb[next_idx]; +} + static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u32 id, cccr; + u32 id, cccr, fdflags; int i; + int putidx; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; - netif_stop_queue(dev); - + /* Generate ID field for TX buffer Element */ + /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { id = cf->can_id & CAN_EFF_MASK; id |= TX_BUF_XTD; @@ -1063,33 +1383,93 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) id |= TX_BUF_RTR; - /* message ram configuration */ - m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); + if (priv->version == 30) { + netif_stop_queue(dev); - for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), - *(u32 *)(cf->data + i)); + /* message ram configuration */ + m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, + can_len2dlc(cf->len) << 16); - can_put_echo_skb(skb, dev, 0); + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, 0, + M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + + can_put_echo_skb(skb, dev, 0); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= CCCR_CMR_CANFD_BRS << + CCCR_CMR_SHIFT; + else + cccr |= CCCR_CMR_CANFD << + CCCR_CMR_SHIFT; + } else { + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + } + m_can_write(priv, M_CAN_CCCR, cccr); + } + m_can_write(priv, M_CAN_TXBTIE, 0x1); + m_can_write(priv, M_CAN_TXBAR, 0x1); + /* End of xmit function for version 3.0.x */ + } else { + /* Transmit routine for version >= v3.1.x */ + + /* Check if FIFO full */ + if (m_can_tx_fifo_full(priv)) { + /* This shouldn't happen */ + netif_stop_queue(dev); + netdev_warn(dev, + "TX queue active although FIFO is full."); + return NETDEV_TX_BUSY; + } - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { - cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + /* get put index for frame */ + putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) + >> TXFQS_TFQPI_SHIFT); + /* Write ID Field to FIFO Element */ + m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id); + + /* get CAN FD configuration of frame */ + fdflags = 0; if (can_is_canfd_skb(skb)) { + fdflags |= TX_BUF_FDF; if (cf->flags & CANFD_BRS) - cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; - else - cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; - } else { - cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + fdflags |= TX_BUF_BRS; } - m_can_write(priv, M_CAN_CCCR, cccr); - } - /* enable first TX buffer to start transfer */ - m_can_write(priv, M_CAN_TXBTIE, 0x1); - m_can_write(priv, M_CAN_TXBAR, 0x1); + /* Construct DLC Field. Also contains CAN-FD configuration + * use put index of fifo as message marker + * it is used in TX interrupt for + * sending the correct echo frame + */ + m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC, + ((putidx << TX_BUF_MM_SHIFT) & + TX_BUF_MM_MASK) | + (can_len2dlc(cf->len) << 16) | + fdflags | TX_BUF_EFC); + + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + + /* Push loopback echo. + * Will be looped back on TX interrupt based on message marker + */ + can_put_echo_skb(skb, dev, putidx); + + /* Enable TX FIFO element to start transfer */ + m_can_write(priv, M_CAN_TXBAR, (1 << putidx)); + + /* stop network queue if fifo full */ + if (m_can_tx_fifo_full(priv) || + m_can_next_echo_skb_occupied(dev, putidx)) + netif_stop_queue(dev); + } return NETDEV_TX_OK; } @@ -1109,55 +1489,37 @@ static int register_m_can_dev(struct net_device *dev) return register_candev(dev); } -static int m_can_of_parse_mram(struct platform_device *pdev, - struct m_can_priv *priv) +static void m_can_of_parse_mram(struct m_can_priv *priv, + const u32 *mram_config_vals) { - struct device_node *np = pdev->dev.of_node; - struct resource *res; - void __iomem *addr; - u32 out_val[MRAM_CFG_LEN]; - int i, start, end, ret; + int i, start, end; - /* message ram could be shared */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); - if (!res) - return -ENODEV; - - addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!addr) - return -ENOMEM; - - /* get message ram configuration */ - ret = of_property_read_u32_array(np, "bosch,mram-cfg", - out_val, sizeof(out_val) / 4); - if (ret) { - dev_err(&pdev->dev, "can not get message ram configuration\n"); - return -ENODEV; - } - - priv->mram_base = addr; - priv->mcfg[MRAM_SIDF].off = out_val[0]; - priv->mcfg[MRAM_SIDF].num = out_val[1]; + priv->mcfg[MRAM_SIDF].off = mram_config_vals[0]; + priv->mcfg[MRAM_SIDF].num = mram_config_vals[1]; priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_XIDF].num = out_val[2]; + priv->mcfg[MRAM_XIDF].num = mram_config_vals[2]; priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] & + (RXFC_FS_MASK >> RXFC_FS_SHIFT); priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; - priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] & + (RXFC_FS_MASK >> RXFC_FS_SHIFT); priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; - priv->mcfg[MRAM_RXB].num = out_val[5]; + priv->mcfg[MRAM_RXB].num = mram_config_vals[5]; priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; - priv->mcfg[MRAM_TXE].num = out_val[6]; + priv->mcfg[MRAM_TXE].num = mram_config_vals[6]; priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; - priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; + priv->mcfg[MRAM_TXB].num = mram_config_vals[7] & + (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); - dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + dev_dbg(priv->device, + "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", priv->mram_base, priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, @@ -1176,7 +1538,6 @@ static int m_can_of_parse_mram(struct platform_device *pdev, for (i = start; i < end; i += 4) writel(0x0, priv->mram_base + i); - return 0; } static int m_can_plat_probe(struct platform_device *pdev) @@ -1185,38 +1546,86 @@ static int m_can_plat_probe(struct platform_device *pdev) struct m_can_priv *priv; struct resource *res; void __iomem *addr; + void __iomem *mram_addr; struct clk *hclk, *cclk; int irq, ret; + struct device_node *np; + u32 mram_config_vals[MRAM_CFG_LEN]; + u32 tx_fifo_size; + + np = pdev->dev.of_node; hclk = devm_clk_get(&pdev->dev, "hclk"); cclk = devm_clk_get(&pdev->dev, "cclk"); + if (IS_ERR(hclk) || IS_ERR(cclk)) { - dev_err(&pdev->dev, "no clock find\n"); - return -ENODEV; + dev_err(&pdev->dev, "no clock found\n"); + ret = -ENODEV; + goto failed_ret; } + /* Enable clocks. Necessary to read Core Release in order to determine + * M_CAN version + */ + ret = clk_prepare_enable(hclk); + if (ret) + goto disable_hclk_ret; + + ret = clk_prepare_enable(cclk); + if (ret) + goto disable_cclk_ret; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); addr = devm_ioremap_resource(&pdev->dev, res); irq = platform_get_irq_byname(pdev, "int0"); - if (IS_ERR(addr) || irq < 0) - return -EINVAL; - /* allocate the m_can device */ - dev = alloc_m_can_dev(); - if (!dev) - return -ENOMEM; + if (IS_ERR(addr) || irq < 0) { + ret = -EINVAL; + goto disable_cclk_ret; + } + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) { + ret = -ENODEV; + goto disable_cclk_ret; + } + + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!mram_addr) { + ret = -ENOMEM; + goto disable_cclk_ret; + } + + /* get message ram configuration */ + ret = of_property_read_u32_array(np, "bosch,mram-cfg", + mram_config_vals, + sizeof(mram_config_vals) / 4); + if (ret) { + dev_err(&pdev->dev, "Could not get Message RAM configuration."); + goto disable_cclk_ret; + } + /* Get TX FIFO size + * Defines the total amount of echo buffers for loopback + */ + tx_fifo_size = mram_config_vals[7]; + + /* allocate the m_can device */ + dev = alloc_m_can_dev(pdev, addr, tx_fifo_size); + if (!dev) { + ret = -ENOMEM; + goto disable_cclk_ret; + } priv = netdev_priv(dev); dev->irq = irq; - priv->base = addr; priv->device = &pdev->dev; priv->hclk = hclk; priv->cclk = cclk; priv->can.clock.freq = clk_get_rate(cclk); + priv->mram_base = mram_addr; - ret = m_can_of_parse_mram(pdev, priv); - if (ret) - goto failed_free_dev; + m_can_of_parse_mram(priv, mram_config_vals); platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -1230,13 +1639,22 @@ static int m_can_plat_probe(struct platform_device *pdev) devm_can_led_init(dev); - dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", - KBUILD_MODNAME, priv->base, dev->irq); + dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", + KBUILD_MODNAME, dev->irq, priv->version); - return 0; + /* Probe finished + * Stop clocks. They will be reactivated once the M_CAN device is opened + */ + + goto disable_cclk_ret; failed_free_dev: free_m_can_dev(dev); +disable_cclk_ret: + clk_disable_unprepare(cclk); +disable_hclk_ret: + clk_disable_unprepare(hclk); +failed_ret: return ret; } diff --git a/drivers/net/can/peak_canfd/Kconfig b/drivers/net/can/peak_canfd/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..84b30978a19f60ee979cd55f83fc77818afe6e63 --- /dev/null +++ b/drivers/net/can/peak_canfd/Kconfig @@ -0,0 +1,13 @@ +config CAN_PEAK_PCIEFD + depends on PCI + tristate "PEAK-System PCAN-PCIe FD cards" + ---help--- + This driver adds support for the PEAK-System PCI Express FD + CAN-FD cards family. + These 1x or 2x CAN-FD channels cards offer CAN 2.0 a/b as well as + CAN-FD access to the CAN bus. Besides the nominal bitrate of up to + 1 Mbit/s, the data bytes of CAN-FD frames can be transmitted with + up to 12 Mbit/s. A galvanic isolation of the CAN ports protects the + electronics of the card and the respective computer against + disturbances of up to 500 Volts. The PCAN-PCI Express FD can be + operated with ambient temperatures in a range of -40 to +85 °C. diff --git a/drivers/net/can/peak_canfd/Makefile b/drivers/net/can/peak_canfd/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3dc7a6a0ba59794a096da690d9bc6e91361e7a73 --- /dev/null +++ b/drivers/net/can/peak_canfd/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the PEAK-System CAN-FD IP module drivers +# +obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_pciefd.o +peak_pciefd-y := peak_pciefd_main.o peak_canfd.o diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c new file mode 100644 index 0000000000000000000000000000000000000000..0d57be5ea97bafb2e9a119d28429549a91b80012 --- /dev/null +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) 2007, 2011 Wolfgang Grandegger + * Copyright (C) 2012 Stephane Grosjean + * + * Copyright (C) 2016 PEAK System-Technik GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "peak_canfd_user.h" + +/* internal IP core cache size (used as default echo skbs max number) */ +#define PCANFD_ECHO_SKB_MAX 24 + +/* bittiming ranges of the PEAK-System PC CAN-FD interfaces */ +static const struct can_bittiming_const peak_canfd_nominal_const = { + .name = "peak_canfd", + .tseg1_min = 1, + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), + .tseg2_min = 1, + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), + .brp_min = 1, + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), + .brp_inc = 1, +}; + +static const struct can_bittiming_const peak_canfd_data_const = { + .name = "peak_canfd", + .tseg1_min = 1, + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), + .tseg2_min = 1, + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), + .brp_min = 1, + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), + .brp_inc = 1, +}; + +static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv) +{ + priv->cmd_len = 0; + return priv; +} + +static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op) +{ + struct pucan_command *cmd; + + if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen) + return NULL; + + cmd = priv->cmd_buffer + priv->cmd_len; + + /* reset all unused bit to default */ + memset(cmd, 0, sizeof(*cmd)); + + cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op); + priv->cmd_len += sizeof(*cmd); + + return cmd; +} + +static int pucan_write_cmd(struct peak_canfd_priv *priv) +{ + int err; + + if (priv->pre_cmd) { + err = priv->pre_cmd(priv); + if (err) + return err; + } + + err = priv->write_cmd(priv); + if (err) + return err; + + if (priv->post_cmd) + err = priv->post_cmd(priv); + + return err; +} + +/* uCAN commands interface functions */ +static int pucan_set_reset_mode(struct peak_canfd_priv *priv) +{ + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE); + return pucan_write_cmd(priv); +} + +static int pucan_set_normal_mode(struct peak_canfd_priv *priv) +{ + int err; + + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE); + err = pucan_write_cmd(priv); + if (!err) + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return err; +} + +static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv) +{ + int err; + + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE); + err = pucan_write_cmd(priv); + if (!err) + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return err; +} + +static int pucan_set_timing_slow(struct peak_canfd_priv *priv, + const struct can_bittiming *pbt) +{ + struct pucan_timing_slow *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); + + cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1, + priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); + cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); + cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1); + cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1)); + + cmd->ewl = 96; /* default */ + + netdev_dbg(priv->ndev, + "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n", + le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t); + + return pucan_write_cmd(priv); +} + +static int pucan_set_timing_fast(struct peak_canfd_priv *priv, + const struct can_bittiming *pbt) +{ + struct pucan_timing_fast *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST); + + cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1); + cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); + cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1); + cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1)); + + netdev_dbg(priv->ndev, + "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n", + le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw); + + return pucan_write_cmd(priv); +} + +static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask) +{ + struct pucan_std_filter *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER); + + /* all the 11-bits CAN ID values are represented by one bit in a + * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the + * row while the lowest 5 bits select the bit in that row. + * + * bit filter + * 1 passed + * 0 discarded + */ + + /* select the row */ + cmd->idx = row; + + /* set/unset bits in the row */ + cmd->mask = cpu_to_le32(mask); + + return pucan_write_cmd(priv); +} + +static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags) +{ + struct pucan_tx_abort *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT); + + cmd->flags = cpu_to_le16(flags); + + return pucan_write_cmd(priv); +} + +static int pucan_clr_err_counters(struct peak_canfd_priv *priv) +{ + struct pucan_wr_err_cnt *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT); + + cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE); + cmd->tx_counter = 0; + cmd->rx_counter = 0; + + return pucan_write_cmd(priv); +} + +static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask) +{ + struct pucan_options *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION); + + cmd->options = cpu_to_le16(opt_mask); + + return pucan_write_cmd(priv); +} + +static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask) +{ + struct pucan_options *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION); + + cmd->options = cpu_to_le16(opt_mask); + + return pucan_write_cmd(priv); +} + +static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv) +{ + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER); + + return pucan_write_cmd(priv); +} + +/* handle the reception of one CAN frame */ +static int pucan_handle_can_rx(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + const u16 rx_msg_flags = le16_to_cpu(msg->flags); + u8 cf_len; + + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) + cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg))); + else + cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); + + /* if this frame is an echo, */ + if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && + !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { + int n; + unsigned long flags; + + spin_lock_irqsave(&priv->echo_lock, flags); + n = can_get_echo_skb(priv->ndev, msg->client); + spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* count bytes of the echo instead of skb */ + stats->tx_bytes += cf_len; + stats->tx_packets++; + + if (n) { + /* restart tx queue only if a slot is free */ + netif_wake_queue(priv->ndev); + } + + return 0; + } + + /* otherwise, it should be pushed into rx fifo */ + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { + /* CANFD frame case */ + skb = alloc_canfd_skb(priv->ndev, &cf); + if (!skb) + return -ENOMEM; + + if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH) + cf->flags |= CANFD_BRS; + + if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) + cf->flags |= CANFD_ESI; + } else { + /* CAN 2.0 frame case */ + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); + if (!skb) + return -ENOMEM; + } + + cf->can_id = le32_to_cpu(msg->can_id); + cf->len = cf_len; + + if (rx_msg_flags & PUCAN_MSG_EXT_ID) + cf->can_id |= CAN_EFF_FLAG; + + if (rx_msg_flags & PUCAN_MSG_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, msg->d, cf->len); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + + netif_rx(skb); + + return 0; +} + +/* handle rx/tx error counters notification */ +static int pucan_handle_error(struct peak_canfd_priv *priv, + struct pucan_error_msg *msg) +{ + priv->bec.txerr = msg->tx_err_cnt; + priv->bec.rxerr = msg->rx_err_cnt; + + return 0; +} + +/* handle status notification */ +static int pucan_handle_status(struct peak_canfd_priv *priv, + struct pucan_status_msg *msg) +{ + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ + if (pucan_status_is_rx_barrier(msg)) { + unsigned long flags; + + if (priv->enable_tx_path) { + int err = priv->enable_tx_path(priv); + + if (err) + return err; + } + + /* restart network queue only if echo skb array is free */ + spin_lock_irqsave(&priv->echo_lock, flags); + + if (!priv->can.echo_skb[priv->echo_idx]) { + spin_unlock_irqrestore(&priv->echo_lock, flags); + + netif_wake_queue(ndev); + } else { + spin_unlock_irqrestore(&priv->echo_lock, flags); + } + + return 0; + } + + skb = alloc_can_err_skb(ndev, &cf); + + /* test state error bits according to their priority */ + if (pucan_status_is_busoff(msg)) { + netdev_dbg(ndev, "Bus-off entry status\n"); + priv->can.state = CAN_STATE_BUS_OFF; + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + + } else if (pucan_status_is_passive(msg)) { + netdev_dbg(ndev, "Error passive status\n"); + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + } + + } else if (pucan_status_is_warning(msg)) { + netdev_dbg(ndev, "Error warning status\n"); + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + } + + } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) { + /* back to ERROR_ACTIVE */ + netdev_dbg(ndev, "Error active status\n"); + can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE, + CAN_STATE_ERROR_ACTIVE); + } else { + dev_kfree_skb(skb); + return 0; + } + + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + + return 0; +} + +/* handle uCAN Rx overflow notification */ +static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + stats->rx_over_errors++; + stats->rx_errors++; + + skb = alloc_can_err_skb(priv->ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + + stats->rx_bytes += cf->can_dlc; + stats->rx_packets++; + netif_rx(skb); + + return 0; +} + +/* handle a single uCAN message */ +int peak_canfd_handle_msg(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg) +{ + u16 msg_type = le16_to_cpu(msg->type); + int msg_size = le16_to_cpu(msg->size); + int err; + + if (!msg_size || !msg_type) { + /* null packet found: end of list */ + goto exit; + } + + switch (msg_type) { + case PUCAN_MSG_CAN_RX: + err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg); + break; + case PUCAN_MSG_ERROR: + err = pucan_handle_error(priv, (struct pucan_error_msg *)msg); + break; + case PUCAN_MSG_STATUS: + err = pucan_handle_status(priv, (struct pucan_status_msg *)msg); + break; + case PUCAN_MSG_CACHE_CRITICAL: + err = pucan_handle_cache_critical(priv); + break; + default: + err = 0; + } + + if (err < 0) + return err; + +exit: + return msg_size; +} + +/* handle a list of rx_count messages from rx_msg memory address */ +int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg_list, int msg_count) +{ + void *msg_ptr = msg_list; + int i, msg_size; + + for (i = 0; i < msg_count; i++) { + msg_size = peak_canfd_handle_msg(priv, msg_ptr); + + /* a null packet can be found at the end of a list */ + if (msg_size <= 0) + break; + + msg_ptr += msg_size; + } + + if (msg_size < 0) + return msg_size; + + return i; +} + +static int peak_canfd_start(struct peak_canfd_priv *priv) +{ + int err; + + err = pucan_clr_err_counters(priv); + if (err) + goto err_exit; + + priv->echo_idx = 0; + + priv->bec.txerr = 0; + priv->bec.rxerr = 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + err = pucan_set_listen_only_mode(priv); + else + err = pucan_set_normal_mode(priv); + +err_exit: + return err; +} + +static void peak_canfd_stop(struct peak_canfd_priv *priv) +{ + int err; + + /* go back to RESET mode */ + err = pucan_set_reset_mode(priv); + if (err) { + netdev_err(priv->ndev, "channel %u reset failed\n", + priv->index); + } else { + /* abort last Tx (MUST be done in RESET mode only!) */ + pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH); + } +} + +static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + + switch (mode) { + case CAN_MODE_START: + peak_canfd_start(priv); + netif_wake_queue(ndev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int peak_canfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + + *bec = priv->bec; + return 0; +} + +static int peak_canfd_open(struct net_device *ndev) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + int i, err = 0; + + err = open_candev(ndev); + if (err) { + netdev_err(ndev, "open_candev() failed, error %d\n", err); + goto err_exit; + } + + err = pucan_set_reset_mode(priv); + if (err) + goto err_close; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO); + else + err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO); + + if (err) + goto err_close; + } + + /* set option: get rx/tx error counters */ + err = pucan_set_options(priv, PUCAN_OPTION_ERROR); + if (err) + goto err_close; + + /* accept all standard CAN ID */ + for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++) + pucan_set_std_filter(priv, i, 0xffffffff); + + err = peak_canfd_start(priv); + if (err) + goto err_close; + + /* receiving the RB status says when Tx path is ready */ + err = pucan_setup_rx_barrier(priv); + if (!err) + goto err_exit; + +err_close: + close_candev(ndev); +err_exit: + return err; +} + +static int peak_canfd_set_bittiming(struct net_device *ndev) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + + return pucan_set_timing_slow(priv, &priv->can.bittiming); +} + +static int peak_canfd_set_data_bittiming(struct net_device *ndev) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + + return pucan_set_timing_fast(priv, &priv->can.data_bittiming); +} + +static int peak_canfd_close(struct net_device *ndev) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + peak_canfd_stop(priv); + close_candev(ndev); + + return 0; +} + +static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct peak_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + struct pucan_tx_msg *msg; + u16 msg_size, msg_flags; + unsigned long flags; + bool should_stop_tx_queue; + int room_left; + u8 can_dlc; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + msg_size = ALIGN(sizeof(*msg) + cf->len, 4); + msg = priv->alloc_tx_msg(priv, msg_size, &room_left); + + /* should never happen except under bus-off condition and (auto-)restart + * mechanism + */ + if (!msg) { + stats->tx_dropped++; + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + msg->size = cpu_to_le16(msg_size); + msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); + msg_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) { + msg_flags |= PUCAN_MSG_EXT_ID; + msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK); + } else { + msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK); + } + + if (can_is_canfd_skb(skb)) { + /* CAN FD frame format */ + can_dlc = can_len2dlc(cf->len); + + msg_flags |= PUCAN_MSG_EXT_DATA_LEN; + + if (cf->flags & CANFD_BRS) + msg_flags |= PUCAN_MSG_BITRATE_SWITCH; + + if (cf->flags & CANFD_ESI) + msg_flags |= PUCAN_MSG_ERROR_STATE_IND; + } else { + /* CAN 2.0 frame format */ + can_dlc = cf->len; + + if (cf->can_id & CAN_RTR_FLAG) + msg_flags |= PUCAN_MSG_RTR; + } + + /* always ask loopback for echo management */ + msg_flags |= PUCAN_MSG_LOOPED_BACK; + + /* set driver specific bit to differentiate with application loopback */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + msg_flags |= PUCAN_MSG_SELF_RECEIVE; + + msg->flags = cpu_to_le16(msg_flags); + msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc); + memcpy(msg->d, cf->data, cf->len); + + /* struct msg client field is used as an index in the echo skbs ring */ + msg->client = priv->echo_idx; + + spin_lock_irqsave(&priv->echo_lock, flags); + + /* prepare and save echo skb in internal slot */ + can_put_echo_skb(skb, ndev, priv->echo_idx); + + /* move echo index to the next slot */ + priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max; + + /* if next slot is not free, stop network queue (no slot free in echo + * skb ring means that the controller did not write these frames on + * the bus: no need to continue). + */ + should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); + + spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* write the skb on the interface */ + priv->write_tx_msg(priv, msg); + + /* stop network tx queue if not enough room to save one more msg too */ + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + should_stop_tx_queue |= (room_left < + (sizeof(*msg) + CANFD_MAX_DLEN)); + else + should_stop_tx_queue |= (room_left < + (sizeof(*msg) + CAN_MAX_DLEN)); + + if (should_stop_tx_queue) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops peak_canfd_netdev_ops = { + .ndo_open = peak_canfd_open, + .ndo_stop = peak_canfd_close, + .ndo_start_xmit = peak_canfd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, + int echo_skb_max) +{ + struct net_device *ndev; + struct peak_canfd_priv *priv; + + /* we DO support local echo */ + if (echo_skb_max < 0) + echo_skb_max = PCANFD_ECHO_SKB_MAX; + + /* allocate the candev object */ + ndev = alloc_candev(sizeof_priv, echo_skb_max); + if (!ndev) + return NULL; + + priv = netdev_priv(ndev); + + /* complete now socket-can initialization side */ + priv->can.state = CAN_STATE_STOPPED; + priv->can.bittiming_const = &peak_canfd_nominal_const; + priv->can.data_bittiming_const = &peak_canfd_data_const; + + priv->can.do_set_mode = peak_canfd_set_mode; + priv->can.do_get_berr_counter = peak_canfd_get_berr_counter; + priv->can.do_set_bittiming = peak_canfd_set_bittiming; + priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO | + CAN_CTRLMODE_BERR_REPORTING; + + priv->ndev = ndev; + priv->index = index; + priv->cmd_len = 0; + spin_lock_init(&priv->echo_lock); + + ndev->flags |= IFF_ECHO; + ndev->netdev_ops = &peak_canfd_netdev_ops; + ndev->dev_id = index; + + return ndev; +} diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h new file mode 100644 index 0000000000000000000000000000000000000000..bf6de47f69c2acb7754142a842b71204baea9306 --- /dev/null +++ b/drivers/net/can/peak_canfd/peak_canfd_user.h @@ -0,0 +1,55 @@ +/* + * CAN driver for PEAK System micro-CAN based adapters + * + * Copyright (C) 2003-2011 PEAK System-Technik GmbH + * Copyright (C) 2011-2013 Stephane Grosjean + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef PEAK_CANFD_USER_H +#define PEAK_CANFD_USER_H + +#include + +#define PCANFD_ECHO_SKB_DEF -1 + +/* data structure private to each uCAN interface */ +struct peak_canfd_priv { + struct can_priv can; /* socket-can private data */ + struct net_device *ndev; /* network device */ + int index; /* channel index */ + + struct can_berr_counter bec; /* rx/tx err counters */ + + int echo_idx; /* echo skb free slot index */ + spinlock_t echo_lock; + + int cmd_len; + void *cmd_buffer; + int cmd_maxlen; + + int (*pre_cmd)(struct peak_canfd_priv *priv); + int (*write_cmd)(struct peak_canfd_priv *priv); + int (*post_cmd)(struct peak_canfd_priv *priv); + + int (*enable_tx_path)(struct peak_canfd_priv *priv); + void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size, + int *room_left); + int (*write_tx_msg)(struct peak_canfd_priv *priv, + struct pucan_tx_msg *msg); +}; + +struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, + int echo_skb_max); +int peak_canfd_handle_msg(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg); +int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, + struct pucan_rx_msg *rx_msg, int rx_count); +#endif diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c new file mode 100644 index 0000000000000000000000000000000000000000..51c2d182a33ae5b2ae96fa7df772ac3ab5f6a7cc --- /dev/null +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -0,0 +1,842 @@ +/* + * Copyright (C) 2007, 2011 Wolfgang Grandegger + * Copyright (C) 2012 Stephane Grosjean + * + * Derived from the PCAN project file driver/src/pcan_pci.c: + * + * Copyright (C) 2001-2006 PEAK System-Technik GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "peak_canfd_user.h" + +MODULE_AUTHOR("Stephane Grosjean "); +MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); +MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); +MODULE_LICENSE("GPL v2"); + +#define PCIEFD_DRV_NAME "peak_pciefd" + +#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ +#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ + +/* PEAK PCIe board access description */ +#define PCIEFD_BAR0_SIZE (64 * 1024) +#define PCIEFD_RX_DMA_SIZE (4 * 1024) +#define PCIEFD_TX_DMA_SIZE (4 * 1024) + +#define PCIEFD_TX_PAGE_SIZE (2 * 1024) + +/* System Control Registers */ +#define PCIEFD_REG_SYS_CTL_SET 0x0000 /* set bits */ +#define PCIEFD_REG_SYS_CTL_CLR 0x0004 /* clear bits */ + +/* Version info registers */ +#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ +#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ + +/* System Control Registers Bits */ +#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ +#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ + +/* CAN-FD channel addresses */ +#define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000) + +#define PCIEFD_ECHO_SKB_MAX PCANFD_ECHO_SKB_DEF + +/* CAN-FD channel registers */ +#define PCIEFD_REG_CAN_MISC 0x0000 /* Misc. control */ +#define PCIEFD_REG_CAN_CLK_SEL 0x0008 /* Clock selector */ +#define PCIEFD_REG_CAN_CMD_PORT_L 0x0010 /* 64-bits command port */ +#define PCIEFD_REG_CAN_CMD_PORT_H 0x0014 +#define PCIEFD_REG_CAN_TX_REQ_ACC 0x0020 /* Tx request accumulator */ +#define PCIEFD_REG_CAN_TX_CTL_SET 0x0030 /* Tx control set register */ +#define PCIEFD_REG_CAN_TX_CTL_CLR 0x0038 /* Tx control clear register */ +#define PCIEFD_REG_CAN_TX_DMA_ADDR_L 0x0040 /* 64-bits addr for Tx DMA */ +#define PCIEFD_REG_CAN_TX_DMA_ADDR_H 0x0044 +#define PCIEFD_REG_CAN_RX_CTL_SET 0x0050 /* Rx control set register */ +#define PCIEFD_REG_CAN_RX_CTL_CLR 0x0058 /* Rx control clear register */ +#define PCIEFD_REG_CAN_RX_CTL_WRT 0x0060 /* Rx control write register */ +#define PCIEFD_REG_CAN_RX_CTL_ACK 0x0068 /* Rx control ACK register */ +#define PCIEFD_REG_CAN_RX_DMA_ADDR_L 0x0070 /* 64-bits addr for Rx DMA */ +#define PCIEFD_REG_CAN_RX_DMA_ADDR_H 0x0074 + +/* CAN-FD channel misc register bits */ +#define CANFD_MISC_TS_RST 0x00000001 /* timestamp cnt rst */ + +/* CAN-FD channel Clock SELector Source & DIVider */ +#define CANFD_CLK_SEL_DIV_MASK 0x00000007 +#define CANFD_CLK_SEL_DIV_60MHZ 0x00000000 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_40MHZ 0x00000001 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_30MHZ 0x00000002 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_24MHZ 0x00000003 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_20MHZ 0x00000004 /* SRC=240MHz only */ + +#define CANFD_CLK_SEL_SRC_MASK 0x00000008 /* 0=80MHz, 1=240MHz */ +#define CANFD_CLK_SEL_SRC_240MHZ 0x00000008 +#define CANFD_CLK_SEL_SRC_80MHZ (~CANFD_CLK_SEL_SRC_240MHZ & \ + CANFD_CLK_SEL_SRC_MASK) + +#define CANFD_CLK_SEL_20MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_20MHZ) +#define CANFD_CLK_SEL_24MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_24MHZ) +#define CANFD_CLK_SEL_30MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_30MHZ) +#define CANFD_CLK_SEL_40MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_40MHZ) +#define CANFD_CLK_SEL_60MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_60MHZ) +#define CANFD_CLK_SEL_80MHZ (CANFD_CLK_SEL_SRC_80MHZ) + +/* CAN-FD channel Rx/Tx control register bits */ +#define CANFD_CTL_UNC_BIT 0x00010000 /* Uncached DMA mem */ +#define CANFD_CTL_RST_BIT 0x00020000 /* reset DMA action */ +#define CANFD_CTL_IEN_BIT 0x00040000 /* IRQ enable */ + +/* Rx IRQ Count and Time Limits */ +#define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */ +#define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */ + +#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD) + +/* Tx anticipation window (link logical address should be aligned on 2K + * boundary) + */ +#define PCIEFD_TX_PAGE_COUNT (PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE) + +#define CANFD_MSG_LNK_TX 0x1001 /* Tx msgs link */ + +/* 32-bits IRQ status fields, heading Rx DMA area */ +static inline int pciefd_irq_tag(u32 irq_status) +{ + return irq_status & 0x0000000f; +} + +static inline int pciefd_irq_rx_cnt(u32 irq_status) +{ + return (irq_status & 0x000007f0) >> 4; +} + +static inline int pciefd_irq_is_lnk(u32 irq_status) +{ + return irq_status & 0x00010000; +} + +/* Rx record */ +struct pciefd_rx_dma { + __le32 irq_status; + __le32 sys_time_low; + __le32 sys_time_high; + struct pucan_rx_msg msg[0]; +} __packed __aligned(4); + +/* Tx Link record */ +struct pciefd_tx_link { + __le16 size; + __le16 type; + __le32 laddr_lo; + __le32 laddr_hi; +} __packed __aligned(4); + +/* Tx page descriptor */ +struct pciefd_page { + void *vbase; /* page virtual address */ + dma_addr_t lbase; /* page logical address */ + u32 offset; + u32 size; +}; + +#define CANFD_IRQ_SET 0x00000001 +#define CANFD_TX_PATH_SET 0x00000002 + +/* CAN-FD channel object */ +struct pciefd_board; +struct pciefd_can { + struct peak_canfd_priv ucan; /* must be the first member */ + void __iomem *reg_base; /* channel config base addr */ + struct pciefd_board *board; /* reverse link */ + + struct pucan_command pucan_cmd; /* command buffer */ + + dma_addr_t rx_dma_laddr; /* DMA virtual and logical addr */ + void *rx_dma_vaddr; /* for Rx and Tx areas */ + dma_addr_t tx_dma_laddr; + void *tx_dma_vaddr; + + struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT]; + u16 tx_pages_free; /* free Tx pages counter */ + u16 tx_page_index; /* current page used for Tx */ + spinlock_t tx_lock; + + u32 irq_status; + u32 irq_tag; /* next irq tag */ +}; + +/* PEAK-PCIe FD board object */ +struct pciefd_board { + void __iomem *reg_base; + struct pci_dev *pci_dev; + int can_count; + spinlock_t cmd_lock; /* 64-bits cmds must be atomic */ + struct pciefd_can *can[0]; /* array of network devices */ +}; + +/* supported device ids. */ +static const struct pci_device_id peak_pciefd_tbl[] = { + {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl); + +/* read a 32 bits value from a SYS block register */ +static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg) +{ + return readl(priv->reg_base + reg); +} + +/* write a 32 bits value into a SYS block register */ +static inline void pciefd_sys_writereg(const struct pciefd_board *priv, + u32 val, u16 reg) +{ + writel(val, priv->reg_base + reg); +} + +/* read a 32 bits value from CAN-FD block register */ +static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg) +{ + return readl(priv->reg_base + reg); +} + +/* write a 32 bits value into a CAN-FD block register */ +static inline void pciefd_can_writereg(const struct pciefd_can *priv, + u32 val, u16 reg) +{ + writel(val, priv->reg_base + reg); +} + +/* give a channel logical Rx DMA address to the board */ +static void pciefd_can_setup_rx_dma(struct pciefd_can *priv) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32); +#else + const u32 dma_addr_h = 0; +#endif + + /* (DMA must be reset for Rx) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); + + /* write the logical address of the Rx DMA area for this channel */ + pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr, + PCIEFD_REG_CAN_RX_DMA_ADDR_L); + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H); + + /* also indicates that Rx DMA is cacheable */ + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); +} + +/* clear channel logical Rx DMA address from the board */ +static void pciefd_can_clear_rx_dma(struct pciefd_can *priv) +{ + /* DMA must be reset for Rx */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); + + /* clear the logical address of the Rx DMA area for this channel */ + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L); + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H); +} + +/* give a channel logical Tx DMA address to the board */ +static void pciefd_can_setup_tx_dma(struct pciefd_can *priv) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32); +#else + const u32 dma_addr_h = 0; +#endif + + /* (DMA must be reset for Tx) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); + + /* write the logical address of the Tx DMA area for this channel */ + pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr, + PCIEFD_REG_CAN_TX_DMA_ADDR_L); + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H); + + /* also indicates that Tx DMA is cacheable */ + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); +} + +/* clear channel logical Tx DMA address from the board */ +static void pciefd_can_clear_tx_dma(struct pciefd_can *priv) +{ + /* DMA must be reset for Tx */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); + + /* clear the logical address of the Tx DMA area for this channel */ + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L); + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H); +} + +static void pciefd_can_ack_rx_dma(struct pciefd_can *priv) +{ + /* read value of current IRQ tag and inc it for next one */ + priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr); + priv->irq_tag++; + priv->irq_tag &= 0xf; + + /* write the next IRQ tag for this CAN */ + pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK); +} + +/* IRQ handler */ +static irqreturn_t pciefd_irq_handler(int irq, void *arg) +{ + struct pciefd_can *priv = arg; + struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr; + + /* INTA mode only to sync with PCIe transaction */ + if (!pci_dev_msi_enabled(priv->board->pci_dev)) + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); + + /* read IRQ status from the first 32-bits of the Rx DMA area */ + priv->irq_status = le32_to_cpu(rx_dma->irq_status); + + /* check if this (shared) IRQ is for this CAN */ + if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag) + return IRQ_NONE; + + /* handle rx messages (if any) */ + peak_canfd_handle_msgs_list(&priv->ucan, + rx_dma->msg, + pciefd_irq_rx_cnt(priv->irq_status)); + + /* handle tx link interrupt (if any) */ + if (pciefd_irq_is_lnk(priv->irq_status)) { + unsigned long flags; + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_pages_free++; + spin_unlock_irqrestore(&priv->tx_lock, flags); + + /* wake producer up */ + netif_wake_queue(priv->ucan.ndev); + } + + /* re-enable Rx DMA transfer for this CAN */ + pciefd_can_ack_rx_dma(priv); + + return IRQ_HANDLED; +} + +static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + int i; + + /* initialize the Tx pages descriptors */ + priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1; + priv->tx_page_index = 0; + + priv->tx_pages[0].vbase = priv->tx_dma_vaddr; + priv->tx_pages[0].lbase = priv->tx_dma_laddr; + + for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) { + priv->tx_pages[i].offset = 0; + priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE - + sizeof(struct pciefd_tx_link); + if (i) { + priv->tx_pages[i].vbase = + priv->tx_pages[i - 1].vbase + + PCIEFD_TX_PAGE_SIZE; + priv->tx_pages[i].lbase = + priv->tx_pages[i - 1].lbase + + PCIEFD_TX_PAGE_SIZE; + } + } + + /* setup Tx DMA addresses into IP core */ + pciefd_can_setup_tx_dma(priv); + + /* start (TX_RST=0) Tx Path */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); + + return 0; +} + +/* board specific CANFD command pre-processing */ +static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); + int err; + + /* pre-process command */ + switch (cmd) { + case PUCAN_CMD_NORMAL_MODE: + case PUCAN_CMD_LISTEN_ONLY_MODE: + + if (ucan->can.state == CAN_STATE_BUS_OFF) + break; + + /* going into operational mode: setup IRQ handler */ + err = request_irq(priv->board->pci_dev->irq, + pciefd_irq_handler, + IRQF_SHARED, + PCIEFD_DRV_NAME, + priv); + if (err) + return err; + + /* setup Rx DMA address */ + pciefd_can_setup_rx_dma(priv); + + /* setup max count of msgs per IRQ */ + pciefd_can_writereg(priv, (CANFD_CTL_IRQ_TL_DEF) << 8 | + CANFD_CTL_IRQ_CL_DEF, + PCIEFD_REG_CAN_RX_CTL_WRT); + + /* clear DMA RST for Rx (Rx start) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + + /* reset timestamps */ + pciefd_can_writereg(priv, !CANFD_MISC_TS_RST, + PCIEFD_REG_CAN_MISC); + + /* do an initial ACK */ + pciefd_can_ack_rx_dma(priv); + + /* enable IRQ for this CAN after having set next irq_tag */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_SET); + + /* Tx path will be setup as soon as RX_BARRIER is received */ + break; + default: + break; + } + + return 0; +} + +/* write a command */ +static int pciefd_write_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + unsigned long flags; + + /* 64-bits command is atomic */ + spin_lock_irqsave(&priv->board->cmd_lock, flags); + + pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer, + PCIEFD_REG_CAN_CMD_PORT_L); + pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4), + PCIEFD_REG_CAN_CMD_PORT_H); + + spin_unlock_irqrestore(&priv->board->cmd_lock, flags); + + return 0; +} + +/* board specific CANFD command post-processing */ +static int pciefd_post_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); + + switch (cmd) { + case PUCAN_CMD_RESET_MODE: + + if (ucan->can.state == CAN_STATE_STOPPED) + break; + + /* controller now in reset mode: */ + + /* stop and reset DMA addresses in Tx/Rx engines */ + pciefd_can_clear_tx_dma(priv); + pciefd_can_clear_rx_dma(priv); + + /* disable IRQ for this CAN */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + + free_irq(priv->board->pci_dev->irq, priv); + + ucan->can.state = CAN_STATE_STOPPED; + + break; + } + + return 0; +} + +static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size, + int *room_left) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; + unsigned long flags; + void *msg; + + spin_lock_irqsave(&priv->tx_lock, flags); + + if (page->offset + msg_size > page->size) { + struct pciefd_tx_link *lk; + + /* not enough space in this page: try another one */ + if (!priv->tx_pages_free) { + spin_unlock_irqrestore(&priv->tx_lock, flags); + + /* Tx overflow */ + return NULL; + } + + priv->tx_pages_free--; + + /* keep address of the very last free slot of current page */ + lk = page->vbase + page->offset; + + /* next, move on a new free page */ + priv->tx_page_index = (priv->tx_page_index + 1) % + PCIEFD_TX_PAGE_COUNT; + page = priv->tx_pages + priv->tx_page_index; + + /* put link record to this new page at the end of prev one */ + lk->size = cpu_to_le16(sizeof(*lk)); + lk->type = cpu_to_le16(CANFD_MSG_LNK_TX); + lk->laddr_lo = cpu_to_le32(page->lbase); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + lk->laddr_hi = cpu_to_le32(page->lbase >> 32); +#else + lk->laddr_hi = 0; +#endif + /* next msgs will be put from the begininng of this new page */ + page->offset = 0; + } + + *room_left = priv->tx_pages_free * page->size; + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + msg = page->vbase + page->offset; + + /* give back room left in the tx ring */ + *room_left += page->size - (page->offset + msg_size); + + return msg; +} + +static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan, + struct pucan_tx_msg *msg) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; + + /* this slot is now reserved for writing the frame */ + page->offset += le16_to_cpu(msg->size); + + /* tell the board a frame has been written in Tx DMA area */ + pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC); + + return 0; +} + +/* probe for CAN-FD channel #pciefd_board->can_count */ +static int pciefd_can_probe(struct pciefd_board *pciefd) +{ + struct net_device *ndev; + struct pciefd_can *priv; + u32 clk; + int err; + + /* allocate the candev object with default isize of echo skbs ring */ + ndev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count, + PCIEFD_ECHO_SKB_MAX); + if (!ndev) { + dev_err(&pciefd->pci_dev->dev, + "failed to alloc candev object\n"); + goto failure; + } + + priv = netdev_priv(ndev); + + /* fill-in candev private object: */ + + /* setup PCIe-FD own callbacks */ + priv->ucan.pre_cmd = pciefd_pre_cmd; + priv->ucan.write_cmd = pciefd_write_cmd; + priv->ucan.post_cmd = pciefd_post_cmd; + priv->ucan.enable_tx_path = pciefd_enable_tx_path; + priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg; + priv->ucan.write_tx_msg = pciefd_write_tx_msg; + + /* setup PCIe-FD own command buffer */ + priv->ucan.cmd_buffer = &priv->pucan_cmd; + priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd); + + priv->board = pciefd; + + /* CAN config regs block address */ + priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index); + + /* allocate non-cacheable DMA'able 4KB memory area for Rx */ + priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, + PCIEFD_RX_DMA_SIZE, + &priv->rx_dma_laddr, + GFP_KERNEL); + if (!priv->rx_dma_vaddr) { + dev_err(&pciefd->pci_dev->dev, + "Rx dmam_alloc_coherent(%u) failure\n", + PCIEFD_RX_DMA_SIZE); + goto err_free_candev; + } + + /* allocate non-cacheable DMA'able 4KB memory area for Tx */ + priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, + PCIEFD_TX_DMA_SIZE, + &priv->tx_dma_laddr, + GFP_KERNEL); + if (!priv->tx_dma_vaddr) { + dev_err(&pciefd->pci_dev->dev, + "Tx dmaim_alloc_coherent(%u) failure\n", + PCIEFD_TX_DMA_SIZE); + goto err_free_candev; + } + + /* CAN clock in RST mode */ + pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC); + + /* read current clock value */ + clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL); + switch (clk) { + case CANFD_CLK_SEL_20MHZ: + priv->ucan.can.clock.freq = 20 * 1000 * 1000; + break; + case CANFD_CLK_SEL_24MHZ: + priv->ucan.can.clock.freq = 24 * 1000 * 1000; + break; + case CANFD_CLK_SEL_30MHZ: + priv->ucan.can.clock.freq = 30 * 1000 * 1000; + break; + case CANFD_CLK_SEL_40MHZ: + priv->ucan.can.clock.freq = 40 * 1000 * 1000; + break; + case CANFD_CLK_SEL_60MHZ: + priv->ucan.can.clock.freq = 60 * 1000 * 1000; + break; + default: + pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, + PCIEFD_REG_CAN_CLK_SEL); + + /* fallthough */ + case CANFD_CLK_SEL_80MHZ: + priv->ucan.can.clock.freq = 80 * 1000 * 1000; + break; + } + + ndev->irq = pciefd->pci_dev->irq; + + SET_NETDEV_DEV(ndev, &pciefd->pci_dev->dev); + + err = register_candev(ndev); + if (err) { + dev_err(&pciefd->pci_dev->dev, + "couldn't register CAN device: %d\n", err); + goto err_free_candev; + } + + spin_lock_init(&priv->tx_lock); + + /* save the object address in the board structure */ + pciefd->can[pciefd->can_count] = priv; + + dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", + ndev->name, priv->reg_base, pciefd->pci_dev->irq); + + return 0; + +err_free_candev: + free_candev(ndev); + +failure: + return -ENOMEM; +} + +/* remove a CAN-FD channel by releasing all of its resources */ +static void pciefd_can_remove(struct pciefd_can *priv) +{ + /* unregister (close) the can device to go back to RST mode first */ + unregister_candev(priv->ucan.ndev); + + /* finally, free the candev object */ + free_candev(priv->ucan.ndev); +} + +/* remove all CAN-FD channels by releasing their own resources */ +static void pciefd_can_remove_all(struct pciefd_board *pciefd) +{ + while (pciefd->can_count > 0) + pciefd_can_remove(pciefd->can[--pciefd->can_count]); +} + +/* probe for the entire device */ +static int peak_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct pciefd_board *pciefd; + int err, can_count; + u16 sub_sys_id; + u8 hw_ver_major; + u8 hw_ver_minor; + u8 hw_ver_sub; + u32 v2; + + err = pci_enable_device(pdev); + if (err) + return err; + err = pci_request_regions(pdev, PCIEFD_DRV_NAME); + if (err) + goto err_disable_pci; + + /* the number of channels depends on sub-system id */ + err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id); + if (err) + goto err_release_regions; + + dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", + pdev->vendor, pdev->device, sub_sys_id); + + if (sub_sys_id >= 0x0012) + can_count = 4; + else if (sub_sys_id >= 0x0010) + can_count = 3; + else if (sub_sys_id >= 0x0004) + can_count = 2; + else + can_count = 1; + + /* allocate board structure object */ + pciefd = devm_kzalloc(&pdev->dev, sizeof(*pciefd) + + can_count * sizeof(*pciefd->can), + GFP_KERNEL); + if (!pciefd) { + err = -ENOMEM; + goto err_release_regions; + } + + /* initialize the board structure */ + pciefd->pci_dev = pdev; + spin_lock_init(&pciefd->cmd_lock); + + /* save the PCI BAR0 virtual address for further system regs access */ + pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE); + if (!pciefd->reg_base) { + dev_err(&pdev->dev, "failed to map PCI resource #0\n"); + err = -ENOMEM; + goto err_release_regions; + } + + /* read the firmware version number */ + v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2); + + hw_ver_major = (v2 & 0x0000f000) >> 12; + hw_ver_minor = (v2 & 0x00000f00) >> 8; + hw_ver_sub = (v2 & 0x000000f0) >> 4; + + dev_info(&pdev->dev, + "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, + hw_ver_major, hw_ver_minor, hw_ver_sub); + + /* stop system clock */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, + PCIEFD_REG_SYS_CTL_CLR); + + pci_set_master(pdev); + + /* create now the corresponding channels objects */ + while (pciefd->can_count < can_count) { + err = pciefd_can_probe(pciefd); + if (err) + goto err_free_canfd; + + pciefd->can_count++; + } + + /* set system timestamps counter in RST mode */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, + PCIEFD_REG_SYS_CTL_SET); + + /* wait a bit (read cycle) */ + (void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1); + + /* free all clocks */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, + PCIEFD_REG_SYS_CTL_CLR); + + /* start system clock */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, + PCIEFD_REG_SYS_CTL_SET); + + /* remember the board structure address in the device user data */ + pci_set_drvdata(pdev, pciefd); + + return 0; + +err_free_canfd: + pciefd_can_remove_all(pciefd); + + pci_iounmap(pdev, pciefd->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + + return err; +} + +/* free the board structure object, as well as its resources: */ +static void peak_pciefd_remove(struct pci_dev *pdev) +{ + struct pciefd_board *pciefd = pci_get_drvdata(pdev); + + /* release CAN-FD channels resources */ + pciefd_can_remove_all(pciefd); + + pci_iounmap(pdev, pciefd->reg_base); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver peak_pciefd_driver = { + .name = PCIEFD_DRV_NAME, + .id_table = peak_pciefd_tbl, + .probe = peak_pciefd_probe, + .remove = peak_pciefd_remove, +}; + +module_pci_driver(peak_pciefd_driver); diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index 148cae5871a6baffa17b43f9e6104a6422ad3f09..8f2e0dd7b7565769c768543e1097caff53d555ff 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -1,6 +1,12 @@ menu "CAN SPI interfaces" depends on SPI +config CAN_HI311X + tristate "Holt HI311x SPI CAN controllers" + depends on CAN_DEV && SPI && HAS_DMA + ---help--- + Driver for the Holt HI311x SPI CAN controllers. + config CAN_MCP251X tristate "Microchip MCP251x SPI CAN controllers" depends on HAS_DMA diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index 0e86040cdd8ce9c2ca037ed27c234dd362812a5f..f59fa37310736531f6927ff3b75e8503f481de68 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -3,4 +3,5 @@ # +obj-$(CONFIG_CAN_HI311X) += hi311x.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c new file mode 100644 index 0000000000000000000000000000000000000000..5590c559a8ca5305d4eb2ecac18fc8d406afebad --- /dev/null +++ b/drivers/net/can/spi/hi311x.c @@ -0,0 +1,1076 @@ +/* CAN bus driver for Holt HI3110 CAN Controller with SPI Interface + * + * Copyright(C) Timesys Corporation 2016 + * + * Based on Microchip 251x CAN Controller (mcp251x) Linux kernel driver + * Copyright 2009 Christian Pellegrin EVOL S.r.l. + * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved. + * Copyright 2006 Arcom Control Systems Ltd. + * + * Based on CAN bus driver for the CCAN controller written by + * - Sascha Hauer, Marc Kleine-Budde, Pengutronix + * - Simon Kallweit, intefo AG + * Copyright 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HI3110_MASTER_RESET 0x56 +#define HI3110_READ_CTRL0 0xD2 +#define HI3110_READ_CTRL1 0xD4 +#define HI3110_READ_STATF 0xE2 +#define HI3110_WRITE_CTRL0 0x14 +#define HI3110_WRITE_CTRL1 0x16 +#define HI3110_WRITE_INTE 0x1C +#define HI3110_WRITE_BTR0 0x18 +#define HI3110_WRITE_BTR1 0x1A +#define HI3110_READ_BTR0 0xD6 +#define HI3110_READ_BTR1 0xD8 +#define HI3110_READ_INTF 0xDE +#define HI3110_READ_ERR 0xDC +#define HI3110_READ_FIFO_WOTIME 0x48 +#define HI3110_WRITE_FIFO 0x12 +#define HI3110_READ_MESSTAT 0xDA +#define HI3110_READ_REC 0xEA +#define HI3110_READ_TEC 0xEC + +#define HI3110_CTRL0_MODE_MASK (7 << 5) +#define HI3110_CTRL0_NORMAL_MODE (0 << 5) +#define HI3110_CTRL0_LOOPBACK_MODE (1 << 5) +#define HI3110_CTRL0_MONITOR_MODE (2 << 5) +#define HI3110_CTRL0_SLEEP_MODE (3 << 5) +#define HI3110_CTRL0_INIT_MODE (4 << 5) + +#define HI3110_CTRL1_TXEN BIT(7) + +#define HI3110_INT_RXTMP BIT(7) +#define HI3110_INT_RXFIFO BIT(6) +#define HI3110_INT_TXCPLT BIT(5) +#define HI3110_INT_BUSERR BIT(4) +#define HI3110_INT_MCHG BIT(3) +#define HI3110_INT_WAKEUP BIT(2) +#define HI3110_INT_F1MESS BIT(1) +#define HI3110_INT_F0MESS BIT(0) + +#define HI3110_ERR_BUSOFF BIT(7) +#define HI3110_ERR_TXERRP BIT(6) +#define HI3110_ERR_RXERRP BIT(5) +#define HI3110_ERR_BITERR BIT(4) +#define HI3110_ERR_FRMERR BIT(3) +#define HI3110_ERR_CRCERR BIT(2) +#define HI3110_ERR_ACKERR BIT(1) +#define HI3110_ERR_STUFERR BIT(0) +#define HI3110_ERR_PROTOCOL_MASK (0x1F) +#define HI3110_ERR_PASSIVE_MASK (0x60) + +#define HI3110_STAT_RXFMTY BIT(1) +#define HI3110_STAT_BUSOFF BIT(2) +#define HI3110_STAT_ERRP BIT(3) +#define HI3110_STAT_ERRW BIT(4) + +#define HI3110_BTR0_SJW_SHIFT 6 +#define HI3110_BTR0_BRP_SHIFT 0 + +#define HI3110_BTR1_SAMP_3PERBIT (1 << 7) +#define HI3110_BTR1_SAMP_1PERBIT (0 << 7) +#define HI3110_BTR1_TSEG2_SHIFT 4 +#define HI3110_BTR1_TSEG1_SHIFT 0 + +#define HI3110_FIFO_WOTIME_TAG_OFF 0 +#define HI3110_FIFO_WOTIME_ID_OFF 1 +#define HI3110_FIFO_WOTIME_DLC_OFF 5 +#define HI3110_FIFO_WOTIME_DAT_OFF 6 + +#define HI3110_FIFO_WOTIME_TAG_IDE BIT(7) +#define HI3110_FIFO_WOTIME_ID_RTR BIT(0) + +#define HI3110_FIFO_TAG_OFF 0 +#define HI3110_FIFO_ID_OFF 1 +#define HI3110_FIFO_STD_DLC_OFF 3 +#define HI3110_FIFO_STD_DATA_OFF 4 +#define HI3110_FIFO_EXT_DLC_OFF 5 +#define HI3110_FIFO_EXT_DATA_OFF 6 + +#define HI3110_CAN_MAX_DATA_LEN 8 +#define HI3110_RX_BUF_LEN 15 +#define HI3110_TX_STD_BUF_LEN 12 +#define HI3110_TX_EXT_BUF_LEN 14 +#define HI3110_CAN_FRAME_MAX_BITS 128 +#define HI3110_EFF_FLAGS 0x18 /* IDE + SRR */ + +#define HI3110_TX_ECHO_SKB_MAX 1 + +#define HI3110_OST_DELAY_MS (10) + +#define DEVICE_NAME "hi3110" + +static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */ +module_param(hi3110_enable_dma, int, 0444); +MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)"); + +static const struct can_bittiming_const hi3110_bittiming_const = { + .name = DEVICE_NAME, + .tseg1_min = 2, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +enum hi3110_model { + CAN_HI3110_HI3110 = 0x3110, +}; + +struct hi3110_priv { + struct can_priv can; + struct net_device *net; + struct spi_device *spi; + enum hi3110_model model; + + struct mutex hi3110_lock; /* SPI device lock */ + + u8 *spi_tx_buf; + u8 *spi_rx_buf; + dma_addr_t spi_tx_dma; + dma_addr_t spi_rx_dma; + + struct sk_buff *tx_skb; + int tx_len; + + struct workqueue_struct *wq; + struct work_struct tx_work; + struct work_struct restart_work; + + int force_quit; + int after_suspend; +#define HI3110_AFTER_SUSPEND_UP 1 +#define HI3110_AFTER_SUSPEND_DOWN 2 +#define HI3110_AFTER_SUSPEND_POWER 4 +#define HI3110_AFTER_SUSPEND_RESTART 8 + int restart_tx; + struct regulator *power; + struct regulator *transceiver; + struct clk *clk; +}; + +static void hi3110_clean(struct net_device *net) +{ + struct hi3110_priv *priv = netdev_priv(net); + + if (priv->tx_skb || priv->tx_len) + net->stats.tx_errors++; + if (priv->tx_skb) + dev_kfree_skb(priv->tx_skb); + if (priv->tx_len) + can_free_echo_skb(priv->net, 0); + priv->tx_skb = NULL; + priv->tx_len = 0; +} + +/* Note about handling of error return of hi3110_spi_trans: accessing + * registers via SPI is not really different conceptually than using + * normal I/O assembler instructions, although it's much more + * complicated from a practical POV. So it's not advisable to always + * check the return value of this function. Imagine that every + * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0) + * error();", it would be a great mess (well there are some situation + * when exception handling C++ like could be useful after all). So we + * just check that transfers are OK at the beginning of our + * conversation with the chip and to avoid doing really nasty things + * (like injecting bogus packets in the network stack). + */ +static int hi3110_spi_trans(struct spi_device *spi, int len) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + struct spi_transfer t = { + .tx_buf = priv->spi_tx_buf, + .rx_buf = priv->spi_rx_buf, + .len = len, + .cs_change = 0, + }; + struct spi_message m; + int ret; + + spi_message_init(&m); + + if (hi3110_enable_dma) { + t.tx_dma = priv->spi_tx_dma; + t.rx_dma = priv->spi_rx_dma; + m.is_dma_mapped = 1; + } + + spi_message_add_tail(&t, &m); + + ret = spi_sync(spi, &m); + + if (ret) + dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret); + return ret; +} + +static u8 hi3110_cmd(struct spi_device *spi, u8 command) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + + priv->spi_tx_buf[0] = command; + dev_dbg(&spi->dev, "hi3110_cmd: %02X\n", command); + + return hi3110_spi_trans(spi, 1); +} + +static u8 hi3110_read(struct spi_device *spi, u8 command) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + u8 val = 0; + + priv->spi_tx_buf[0] = command; + hi3110_spi_trans(spi, 2); + val = priv->spi_rx_buf[1]; + + return val; +} + +static void hi3110_write(struct spi_device *spi, u8 reg, u8 val) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + + priv->spi_tx_buf[0] = reg; + priv->spi_tx_buf[1] = val; + hi3110_spi_trans(spi, 2); +} + +static void hi3110_hw_tx_frame(struct spi_device *spi, u8 *buf, int len) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + + priv->spi_tx_buf[0] = HI3110_WRITE_FIFO; + memcpy(priv->spi_tx_buf + 1, buf, len); + hi3110_spi_trans(spi, len + 1); +} + +static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) +{ + u8 buf[HI3110_TX_EXT_BUF_LEN]; + + buf[HI3110_FIFO_TAG_OFF] = 0; + + if (frame->can_id & CAN_EFF_FLAG) { + /* Extended frame */ + buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_EFF_MASK) >> 21; + buf[HI3110_FIFO_ID_OFF + 1] = + (((frame->can_id & CAN_EFF_MASK) >> 13) & 0xe0) | + HI3110_EFF_FLAGS | + (((frame->can_id & CAN_EFF_MASK) >> 15) & 0x07); + buf[HI3110_FIFO_ID_OFF + 2] = + (frame->can_id & CAN_EFF_MASK) >> 7; + buf[HI3110_FIFO_ID_OFF + 3] = + ((frame->can_id & CAN_EFF_MASK) << 1) | + ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0); + + buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc; + + memcpy(buf + HI3110_FIFO_EXT_DATA_OFF, + frame->data, frame->can_dlc); + + hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN - + (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + } else { + /* Standard frame */ + buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3; + buf[HI3110_FIFO_ID_OFF + 1] = + ((frame->can_id & CAN_SFF_MASK) << 5) | + ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0); + + buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc; + + memcpy(buf + HI3110_FIFO_STD_DATA_OFF, + frame->data, frame->can_dlc); + + hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN - + (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + } +} + +static void hi3110_hw_rx_frame(struct spi_device *spi, u8 *buf) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + + priv->spi_tx_buf[0] = HI3110_READ_FIFO_WOTIME; + hi3110_spi_trans(spi, HI3110_RX_BUF_LEN); + memcpy(buf, priv->spi_rx_buf + 1, HI3110_RX_BUF_LEN - 1); +} + +static void hi3110_hw_rx(struct spi_device *spi) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + struct sk_buff *skb; + struct can_frame *frame; + u8 buf[HI3110_RX_BUF_LEN - 1]; + + skb = alloc_can_skb(priv->net, &frame); + if (!skb) { + priv->net->stats.rx_dropped++; + return; + } + + hi3110_hw_rx_frame(spi, buf); + if (buf[HI3110_FIFO_WOTIME_TAG_OFF] & HI3110_FIFO_WOTIME_TAG_IDE) { + /* IDE is recessive (1), indicating extended 29-bit frame */ + frame->can_id = CAN_EFF_FLAG; + frame->can_id |= + (buf[HI3110_FIFO_WOTIME_ID_OFF] << 21) | + (((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5) << 18) | + ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0x07) << 15) | + (buf[HI3110_FIFO_WOTIME_ID_OFF + 2] << 7) | + (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] >> 1); + } else { + /* IDE is dominant (0), frame indicating standard 11-bit */ + frame->can_id = + (buf[HI3110_FIFO_WOTIME_ID_OFF] << 3) | + ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5); + } + + /* Data length */ + frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); + + if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) + frame->can_id |= CAN_RTR_FLAG; + else + memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF, + frame->can_dlc); + + priv->net->stats.rx_packets++; + priv->net->stats.rx_bytes += frame->can_dlc; + + can_led_event(priv->net, CAN_LED_EVENT_RX); + + netif_rx_ni(skb); +} + +static void hi3110_hw_sleep(struct spi_device *spi) +{ + hi3110_write(spi, HI3110_WRITE_CTRL0, HI3110_CTRL0_SLEEP_MODE); +} + +static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb, + struct net_device *net) +{ + struct hi3110_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + + if (priv->tx_skb || priv->tx_len) { + dev_err(&spi->dev, "hard_xmit called while tx busy\n"); + return NETDEV_TX_BUSY; + } + + if (can_dropped_invalid_skb(net, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(net); + priv->tx_skb = skb; + queue_work(priv->wq, &priv->tx_work); + + return NETDEV_TX_OK; +} + +static int hi3110_do_set_mode(struct net_device *net, enum can_mode mode) +{ + struct hi3110_priv *priv = netdev_priv(net); + + switch (mode) { + case CAN_MODE_START: + hi3110_clean(net); + /* We have to delay work since SPI I/O may sleep */ + priv->can.state = CAN_STATE_ERROR_ACTIVE; + priv->restart_tx = 1; + if (priv->can.restart_ms == 0) + priv->after_suspend = HI3110_AFTER_SUSPEND_RESTART; + queue_work(priv->wq, &priv->restart_work); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int hi3110_get_berr_counter(const struct net_device *net, + struct can_berr_counter *bec) +{ + struct hi3110_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + + bec->txerr = hi3110_read(spi, HI3110_READ_TEC); + bec->rxerr = hi3110_read(spi, HI3110_READ_REC); + + return 0; +} + +static int hi3110_set_normal_mode(struct spi_device *spi) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + u8 reg = 0; + + hi3110_write(spi, HI3110_WRITE_INTE, HI3110_INT_BUSERR | + HI3110_INT_RXFIFO | HI3110_INT_TXCPLT); + + /* Enable TX */ + hi3110_write(spi, HI3110_WRITE_CTRL1, HI3110_CTRL1_TXEN); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + reg = HI3110_CTRL0_LOOPBACK_MODE; + else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + reg = HI3110_CTRL0_MONITOR_MODE; + else + reg = HI3110_CTRL0_NORMAL_MODE; + + hi3110_write(spi, HI3110_WRITE_CTRL0, reg); + + /* Wait for the device to enter the mode */ + mdelay(HI3110_OST_DELAY_MS); + reg = hi3110_read(spi, HI3110_READ_CTRL0); + if ((reg & HI3110_CTRL0_MODE_MASK) != reg) + return -EBUSY; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + return 0; +} + +static int hi3110_do_set_bittiming(struct net_device *net) +{ + struct hi3110_priv *priv = netdev_priv(net); + struct can_bittiming *bt = &priv->can.bittiming; + struct spi_device *spi = priv->spi; + + hi3110_write(spi, HI3110_WRITE_BTR0, + ((bt->sjw - 1) << HI3110_BTR0_SJW_SHIFT) | + ((bt->brp - 1) << HI3110_BTR0_BRP_SHIFT)); + + hi3110_write(spi, HI3110_WRITE_BTR1, + (priv->can.ctrlmode & + CAN_CTRLMODE_3_SAMPLES ? + HI3110_BTR1_SAMP_3PERBIT : HI3110_BTR1_SAMP_1PERBIT) | + ((bt->phase_seg1 + bt->prop_seg - 1) + << HI3110_BTR1_TSEG1_SHIFT) | + ((bt->phase_seg2 - 1) << HI3110_BTR1_TSEG2_SHIFT)); + + dev_dbg(&spi->dev, "BT: 0x%02x 0x%02x\n", + hi3110_read(spi, HI3110_READ_BTR0), + hi3110_read(spi, HI3110_READ_BTR1)); + + return 0; +} + +static int hi3110_setup(struct net_device *net) +{ + hi3110_do_set_bittiming(net); + return 0; +} + +static int hi3110_hw_reset(struct spi_device *spi) +{ + u8 reg; + int ret; + + /* Wait for oscillator startup timer after power up */ + mdelay(HI3110_OST_DELAY_MS); + + ret = hi3110_cmd(spi, HI3110_MASTER_RESET); + if (ret) + return ret; + + /* Wait for oscillator startup timer after reset */ + mdelay(HI3110_OST_DELAY_MS); + + reg = hi3110_read(spi, HI3110_READ_CTRL0); + if ((reg & HI3110_CTRL0_MODE_MASK) != HI3110_CTRL0_INIT_MODE) + return -ENODEV; + + /* As per the datasheet it appears the error flags are + * not cleared on reset. Explicitly clear them by performing a read + */ + hi3110_read(spi, HI3110_READ_ERR); + + return 0; +} + +static int hi3110_hw_probe(struct spi_device *spi) +{ + u8 statf; + + hi3110_hw_reset(spi); + + /* Confirm correct operation by checking against reset values + * in datasheet + */ + statf = hi3110_read(spi, HI3110_READ_STATF); + + dev_dbg(&spi->dev, "statf: %02X\n", statf); + + if (statf != 0x82) + return -ENODEV; + + return 0; +} + +static int hi3110_power_enable(struct regulator *reg, int enable) +{ + if (IS_ERR_OR_NULL(reg)) + return 0; + + if (enable) + return regulator_enable(reg); + else + return regulator_disable(reg); +} + +static int hi3110_stop(struct net_device *net) +{ + struct hi3110_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + + close_candev(net); + + priv->force_quit = 1; + free_irq(spi->irq, priv); + destroy_workqueue(priv->wq); + priv->wq = NULL; + + mutex_lock(&priv->hi3110_lock); + + /* Disable transmit, interrupts and clear flags */ + hi3110_write(spi, HI3110_WRITE_CTRL1, 0x0); + hi3110_write(spi, HI3110_WRITE_INTE, 0x0); + hi3110_read(spi, HI3110_READ_INTF); + + hi3110_clean(net); + + hi3110_hw_sleep(spi); + + hi3110_power_enable(priv->transceiver, 0); + + priv->can.state = CAN_STATE_STOPPED; + + mutex_unlock(&priv->hi3110_lock); + + can_led_event(net, CAN_LED_EVENT_STOP); + + return 0; +} + +static void hi3110_tx_work_handler(struct work_struct *ws) +{ + struct hi3110_priv *priv = container_of(ws, struct hi3110_priv, + tx_work); + struct spi_device *spi = priv->spi; + struct net_device *net = priv->net; + struct can_frame *frame; + + mutex_lock(&priv->hi3110_lock); + if (priv->tx_skb) { + if (priv->can.state == CAN_STATE_BUS_OFF) { + hi3110_clean(net); + } else { + frame = (struct can_frame *)priv->tx_skb->data; + hi3110_hw_tx(spi, frame); + priv->tx_len = 1 + frame->can_dlc; + can_put_echo_skb(priv->tx_skb, net, 0); + priv->tx_skb = NULL; + } + } + mutex_unlock(&priv->hi3110_lock); +} + +static void hi3110_restart_work_handler(struct work_struct *ws) +{ + struct hi3110_priv *priv = container_of(ws, struct hi3110_priv, + restart_work); + struct spi_device *spi = priv->spi; + struct net_device *net = priv->net; + + mutex_lock(&priv->hi3110_lock); + if (priv->after_suspend) { + hi3110_hw_reset(spi); + hi3110_setup(net); + if (priv->after_suspend & HI3110_AFTER_SUSPEND_RESTART) { + hi3110_set_normal_mode(spi); + } else if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) { + netif_device_attach(net); + hi3110_clean(net); + hi3110_set_normal_mode(spi); + netif_wake_queue(net); + } else { + hi3110_hw_sleep(spi); + } + priv->after_suspend = 0; + priv->force_quit = 0; + } + + if (priv->restart_tx) { + priv->restart_tx = 0; + hi3110_hw_reset(spi); + hi3110_setup(net); + hi3110_clean(net); + hi3110_set_normal_mode(spi); + netif_wake_queue(net); + } + mutex_unlock(&priv->hi3110_lock); +} + +static irqreturn_t hi3110_can_ist(int irq, void *dev_id) +{ + struct hi3110_priv *priv = dev_id; + struct spi_device *spi = priv->spi; + struct net_device *net = priv->net; + + mutex_lock(&priv->hi3110_lock); + + while (!priv->force_quit) { + enum can_state new_state; + u8 intf, eflag, statf; + + while (!(HI3110_STAT_RXFMTY & + (statf = hi3110_read(spi, HI3110_READ_STATF)))) { + hi3110_hw_rx(spi); + } + + intf = hi3110_read(spi, HI3110_READ_INTF); + eflag = hi3110_read(spi, HI3110_READ_ERR); + /* Update can state */ + if (eflag & HI3110_ERR_BUSOFF) + new_state = CAN_STATE_BUS_OFF; + else if (eflag & HI3110_ERR_PASSIVE_MASK) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (statf & HI3110_STAT_ERRW) + new_state = CAN_STATE_ERROR_WARNING; + else + new_state = CAN_STATE_ERROR_ACTIVE; + + if (new_state != priv->can.state) { + struct can_frame *cf; + struct sk_buff *skb; + enum can_state rx_state, tx_state; + u8 rxerr, txerr; + + skb = alloc_can_err_skb(net, &cf); + if (!skb) + break; + + txerr = hi3110_read(spi, HI3110_READ_TEC); + rxerr = hi3110_read(spi, HI3110_READ_REC); + cf->data[6] = txerr; + cf->data[7] = rxerr; + tx_state = txerr >= rxerr ? new_state : 0; + rx_state = txerr <= rxerr ? new_state : 0; + can_change_state(net, cf, tx_state, rx_state); + netif_rx_ni(skb); + + if (new_state == CAN_STATE_BUS_OFF) { + can_bus_off(net); + if (priv->can.restart_ms == 0) { + priv->force_quit = 1; + hi3110_hw_sleep(spi); + break; + } + } + } + + /* Update bus errors */ + if ((intf & HI3110_INT_BUSERR) && + (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { + struct can_frame *cf; + struct sk_buff *skb; + + /* Check for protocol errors */ + if (eflag & HI3110_ERR_PROTOCOL_MASK) { + skb = alloc_can_err_skb(net, &cf); + if (!skb) + break; + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + priv->can.can_stats.bus_error++; + priv->net->stats.rx_errors++; + if (eflag & HI3110_ERR_BITERR) + cf->data[2] |= CAN_ERR_PROT_BIT; + else if (eflag & HI3110_ERR_FRMERR) + cf->data[2] |= CAN_ERR_PROT_FORM; + else if (eflag & HI3110_ERR_STUFERR) + cf->data[2] |= CAN_ERR_PROT_STUFF; + else if (eflag & HI3110_ERR_CRCERR) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + else if (eflag & HI3110_ERR_ACKERR) + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + + cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); + cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + netdev_dbg(priv->net, "Bus Error\n"); + netif_rx_ni(skb); + } + } + + if (intf == 0) + break; + + if (intf & HI3110_INT_TXCPLT) { + net->stats.tx_packets++; + net->stats.tx_bytes += priv->tx_len - 1; + can_led_event(net, CAN_LED_EVENT_TX); + if (priv->tx_len) { + can_get_echo_skb(net, 0); + priv->tx_len = 0; + } + netif_wake_queue(net); + } + } + mutex_unlock(&priv->hi3110_lock); + return IRQ_HANDLED; +} + +static int hi3110_open(struct net_device *net) +{ + struct hi3110_priv *priv = netdev_priv(net); + struct spi_device *spi = priv->spi; + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; + int ret; + + ret = open_candev(net); + if (ret) + return ret; + + mutex_lock(&priv->hi3110_lock); + hi3110_power_enable(priv->transceiver, 1); + + priv->force_quit = 0; + priv->tx_skb = NULL; + priv->tx_len = 0; + + ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist, + flags, DEVICE_NAME, priv); + if (ret) { + dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); + goto out_close; + } + + priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, + 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_free_irq; + } + INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); + INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); + + ret = hi3110_hw_reset(spi); + if (ret) + goto out_free_wq; + + ret = hi3110_setup(net); + if (ret) + goto out_free_wq; + + ret = hi3110_set_normal_mode(spi); + if (ret) + goto out_free_wq; + + can_led_event(net, CAN_LED_EVENT_OPEN); + netif_wake_queue(net); + mutex_unlock(&priv->hi3110_lock); + + return 0; + + out_free_wq: + destroy_workqueue(priv->wq); + out_free_irq: + free_irq(spi->irq, priv); + hi3110_hw_sleep(spi); + out_close: + hi3110_power_enable(priv->transceiver, 0); + close_candev(net); + mutex_unlock(&priv->hi3110_lock); + return ret; +} + +static const struct net_device_ops hi3110_netdev_ops = { + .ndo_open = hi3110_open, + .ndo_stop = hi3110_stop, + .ndo_start_xmit = hi3110_hard_start_xmit, +}; + +static const struct of_device_id hi3110_of_match[] = { + { + .compatible = "holt,hi3110", + .data = (void *)CAN_HI3110_HI3110, + }, + { } +}; +MODULE_DEVICE_TABLE(of, hi3110_of_match); + +static const struct spi_device_id hi3110_id_table[] = { + { + .name = "hi3110", + .driver_data = (kernel_ulong_t)CAN_HI3110_HI3110, + }, + { } +}; +MODULE_DEVICE_TABLE(spi, hi3110_id_table); + +static int hi3110_can_probe(struct spi_device *spi) +{ + const struct of_device_id *of_id = of_match_device(hi3110_of_match, + &spi->dev); + struct net_device *net; + struct hi3110_priv *priv; + struct clk *clk; + int freq, ret; + + clk = devm_clk_get(&spi->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&spi->dev, "no CAN clock source defined\n"); + return PTR_ERR(clk); + } + freq = clk_get_rate(clk); + + /* Sanity check */ + if (freq > 40000000) + return -ERANGE; + + /* Allocate can/net device */ + net = alloc_candev(sizeof(struct hi3110_priv), HI3110_TX_ECHO_SKB_MAX); + if (!net) + return -ENOMEM; + + if (!IS_ERR(clk)) { + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; + } + + net->netdev_ops = &hi3110_netdev_ops; + net->flags |= IFF_ECHO; + + priv = netdev_priv(net); + priv->can.bittiming_const = &hi3110_bittiming_const; + priv->can.do_set_mode = hi3110_do_set_mode; + priv->can.do_get_berr_counter = hi3110_get_berr_counter; + priv->can.clock.freq = freq / 2; + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + + if (of_id) + priv->model = (enum hi3110_model)of_id->data; + else + priv->model = spi_get_device_id(spi)->driver_data; + priv->net = net; + priv->clk = clk; + + spi_set_drvdata(spi, priv); + + /* Configure the SPI bus */ + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret) + goto out_clk; + + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); + if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || + (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { + ret = -EPROBE_DEFER; + goto out_clk; + } + + ret = hi3110_power_enable(priv->power, 1); + if (ret) + goto out_clk; + + priv->spi = spi; + mutex_init(&priv->hi3110_lock); + + /* If requested, allocate DMA buffers */ + if (hi3110_enable_dma) { + spi->dev.coherent_dma_mask = ~0; + + /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate + * that much and share it between Tx and Rx DMA buffers. + */ + priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, + PAGE_SIZE, + &priv->spi_tx_dma, + GFP_DMA); + + if (priv->spi_tx_buf) { + priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); + priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma + + (PAGE_SIZE / 2)); + } else { + /* Fall back to non-DMA */ + hi3110_enable_dma = 0; + } + } + + /* Allocate non-DMA buffers */ + if (!hi3110_enable_dma) { + priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); + if (!priv->spi_tx_buf) { + ret = -ENOMEM; + goto error_probe; + } + priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, + GFP_KERNEL); + + if (!priv->spi_rx_buf) { + ret = -ENOMEM; + goto error_probe; + } + } + + SET_NETDEV_DEV(net, &spi->dev); + + ret = hi3110_hw_probe(spi); + if (ret) { + if (ret == -ENODEV) + dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n", + priv->model); + goto error_probe; + } + hi3110_hw_sleep(spi); + + ret = register_candev(net); + if (ret) + goto error_probe; + + devm_can_led_init(net); + netdev_info(net, "%x successfully initialized.\n", priv->model); + + return 0; + + error_probe: + hi3110_power_enable(priv->power, 0); + + out_clk: + if (!IS_ERR(clk)) + clk_disable_unprepare(clk); + + out_free: + free_candev(net); + + dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); + return ret; +} + +static int hi3110_can_remove(struct spi_device *spi) +{ + struct hi3110_priv *priv = spi_get_drvdata(spi); + struct net_device *net = priv->net; + + unregister_candev(net); + + hi3110_power_enable(priv->power, 0); + + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + free_candev(net); + + return 0; +} + +static int __maybe_unused hi3110_can_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct hi3110_priv *priv = spi_get_drvdata(spi); + struct net_device *net = priv->net; + + priv->force_quit = 1; + disable_irq(spi->irq); + + /* Note: at this point neither IST nor workqueues are running. + * open/stop cannot be called anyway so locking is not needed + */ + if (netif_running(net)) { + netif_device_detach(net); + + hi3110_hw_sleep(spi); + hi3110_power_enable(priv->transceiver, 0); + priv->after_suspend = HI3110_AFTER_SUSPEND_UP; + } else { + priv->after_suspend = HI3110_AFTER_SUSPEND_DOWN; + } + + if (!IS_ERR_OR_NULL(priv->power)) { + regulator_disable(priv->power); + priv->after_suspend |= HI3110_AFTER_SUSPEND_POWER; + } + + return 0; +} + +static int __maybe_unused hi3110_can_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct hi3110_priv *priv = spi_get_drvdata(spi); + + if (priv->after_suspend & HI3110_AFTER_SUSPEND_POWER) + hi3110_power_enable(priv->power, 1); + + if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) { + hi3110_power_enable(priv->transceiver, 1); + queue_work(priv->wq, &priv->restart_work); + } else { + priv->after_suspend = 0; + } + + priv->force_quit = 0; + enable_irq(spi->irq); + return 0; +} + +static SIMPLE_DEV_PM_OPS(hi3110_can_pm_ops, hi3110_can_suspend, hi3110_can_resume); + +static struct spi_driver hi3110_can_driver = { + .driver = { + .name = DEVICE_NAME, + .of_match_table = hi3110_of_match, + .pm = &hi3110_can_pm_ops, + }, + .id_table = hi3110_id_table, + .probe = hi3110_can_probe, + .remove = hi3110_can_remove, +}; + +module_spi_driver(hi3110_can_driver); + +MODULE_AUTHOR("Akshay Bhat "); +MODULE_AUTHOR("Casey Fitzpatrick "); +MODULE_DESCRIPTION("Holt HI-3110 CAN driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 6749b1829469411315dedac1634b7be974cf21d8..4d4941469cfc06bfff3aeafaa0c3562b63702730 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -17,25 +17,6 @@ * */ -/* - * Your platform definitions should specify module ram offsets and interrupt - * number to use as follows: - * - * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = { - * .scc_hecc_offset = 0, - * .scc_ram_offset = 0x3000, - * .hecc_ram_offset = 0x3000, - * .mbx_offset = 0x2000, - * .int_line = 0, - * .revision = 1, - * .transceiver_switch = hecc_phy_control, - * }; - * - * Please see include/linux/can/platform/ti_hecc.h for description of - * above fields. - * - */ - #include #include #include @@ -46,11 +27,13 @@ #include #include #include +#include +#include +#include #include #include #include -#include #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" @@ -214,15 +197,14 @@ struct ti_hecc_priv { struct net_device *ndev; struct clk *clk; void __iomem *base; - u32 scc_ram_offset; - u32 hecc_ram_offset; - u32 mbx_offset; - u32 int_line; + void __iomem *hecc_ram; + void __iomem *mbx; + bool use_hecc1int; spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; u32 rx_next; - void (*transceiver_switch)(int); + struct regulator *reg_xceiver; }; static inline int get_tx_head_mb(struct ti_hecc_priv *priv) @@ -242,20 +224,18 @@ static inline int get_tx_head_prio(struct ti_hecc_priv *priv) static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) { - __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4); + __raw_writel(val, priv->hecc_ram + mbxno * 4); } static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg, u32 val) { - __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 + - reg); + __raw_writel(val, priv->mbx + mbxno * 0x10 + reg); } static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg) { - return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 + - reg); + return __raw_readl(priv->mbx + mbxno * 0x10 + reg); } static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val) @@ -311,11 +291,16 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv) return 0; } -static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv, - int on) +static int ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv, + int on) { - if (priv->transceiver_switch) - priv->transceiver_switch(on); + if (!priv->reg_xceiver) + return 0; + + if (on) + return regulator_enable(priv->reg_xceiver); + else + return regulator_disable(priv->reg_xceiver); } static void ti_hecc_reset(struct net_device *ndev) @@ -409,7 +394,7 @@ static void ti_hecc_start(struct net_device *ndev) /* Prevent message over-write & Enable interrupts */ hecc_write(priv, HECC_CANOPC, HECC_SET_REG); - if (priv->int_line) { + if (priv->use_hecc1int) { hecc_write(priv, HECC_CANMIL, HECC_SET_REG); hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | HECC_CANGIM_I1EN | HECC_CANGIM_SIL); @@ -760,7 +745,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) unsigned long ack, flags; int_status = hecc_read(priv, - (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0); + (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0); if (!int_status) return IRQ_NONE; @@ -806,7 +791,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) } /* clear all interrupt conditions - read back to avoid spurious ints */ - if (priv->int_line) { + if (priv->use_hecc1int) { hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); int_status = hecc_read(priv, HECC_CANGIF1); } else { @@ -872,58 +857,87 @@ static const struct net_device_ops ti_hecc_netdev_ops = { .ndo_change_mtu = can_change_mtu, }; +static const struct of_device_id ti_hecc_dt_ids[] = { + { + .compatible = "ti,am3517-hecc", + }, + { } +}; +MODULE_DEVICE_TABLE(of, ti_hecc_dt_ids); + static int ti_hecc_probe(struct platform_device *pdev) { struct net_device *ndev = (struct net_device *)0; struct ti_hecc_priv *priv; - struct ti_hecc_platform_data *pdata; - struct resource *mem, *irq; - void __iomem *addr; + struct device_node *np = pdev->dev.of_node; + struct resource *res, *irq; + struct regulator *reg_xceiver; int err = -ENODEV; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data\n"); - goto probe_exit; + if (!IS_ENABLED(CONFIG_OF) || !np) + return -EINVAL; + + reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); + if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(reg_xceiver)) + reg_xceiver = NULL; + + ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX); + if (!ndev) { + dev_err(&pdev->dev, "alloc_candev failed\n"); + return -ENOMEM; } + priv = netdev_priv(ndev); - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(&pdev->dev, "No mem resources\n"); - goto probe_exit; + /* handle hecc memory */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc"); + if (!res) { + dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n"); + return -EINVAL; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "No irq resource\n"); - goto probe_exit; + + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) { + dev_err(&pdev->dev, "hecc ioremap failed\n"); + return PTR_ERR(priv->base); } - if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) { - dev_err(&pdev->dev, "HECC region already claimed\n"); - err = -EBUSY; - goto probe_exit; + + /* handle hecc-ram memory */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram"); + if (!res) { + dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n"); + return -EINVAL; } - addr = ioremap(mem->start, resource_size(mem)); - if (!addr) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -ENOMEM; - goto probe_exit_free_region; + + priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->hecc_ram)) { + dev_err(&pdev->dev, "hecc-ram ioremap failed\n"); + return PTR_ERR(priv->hecc_ram); } - ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX); - if (!ndev) { - dev_err(&pdev->dev, "alloc_candev failed\n"); - err = -ENOMEM; - goto probe_exit_iounmap; + /* handle mbx memory */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx"); + if (!res) { + dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n"); + return -EINVAL; + } + + priv->mbx = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->mbx)) { + dev_err(&pdev->dev, "mbx ioremap failed\n"); + return PTR_ERR(priv->mbx); + } + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "No irq resource\n"); + goto probe_exit; } - priv = netdev_priv(ndev); priv->ndev = ndev; - priv->base = addr; - priv->scc_ram_offset = pdata->scc_ram_offset; - priv->hecc_ram_offset = pdata->hecc_ram_offset; - priv->mbx_offset = pdata->mbx_offset; - priv->int_line = pdata->int_line; - priv->transceiver_switch = pdata->transceiver_switch; + priv->reg_xceiver = reg_xceiver; + priv->use_hecc1int = of_property_read_bool(np, "ti,use-hecc1int"); priv->can.bittiming_const = &ti_hecc_bittiming_const; priv->can.do_set_mode = ti_hecc_do_set_mode; @@ -971,32 +985,23 @@ static int ti_hecc_probe(struct platform_device *pdev) clk_put(priv->clk); probe_exit_candev: free_candev(ndev); -probe_exit_iounmap: - iounmap(addr); -probe_exit_free_region: - release_mem_region(mem->start, resource_size(mem)); probe_exit: return err; } static int ti_hecc_remove(struct platform_device *pdev) { - struct resource *res; struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - iounmap(priv->base); - release_mem_region(res->start, resource_size(res)); free_candev(ndev); return 0; } - #ifdef CONFIG_PM static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) { @@ -1045,6 +1050,7 @@ static int ti_hecc_resume(struct platform_device *pdev) static struct platform_driver ti_hecc_driver = { .driver = { .name = DRV_NAME, + .of_match_table = ti_hecc_dt_ids, }, .probe = ti_hecc_probe, .remove = ti_hecc_remove, diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 5f9e0e6301d06ecbe466e2f4eb4e8b7fee8ece27..c36f4bdcbf4fba8a22e581b3617269a4e2d2ef61 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -83,4 +83,10 @@ config CAN_8DEV_USB This driver supports the USB2CAN interface from 8 devices (http://www.8devices.com). +config CAN_MCBA_USB + tristate "Microchip CAN BUS Analyzer interface" + ---help--- + This driver supports the CAN BUS Analyzer interface + from Microchip (http://www.microchip.com/development-tools/). + endmenu diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index a64cf983fb87af3111c0b8b8ed276943a20067e4..164453fd55d0acba62c334856baa7fa016f13145 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o +obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c new file mode 100644 index 0000000000000000000000000000000000000000..7f0272558befe9ecdeaf110d2b95f8754cea8939 --- /dev/null +++ b/drivers/net/can/usb/mcba_usb.c @@ -0,0 +1,904 @@ +/* SocketCAN driver for Microchip CAN BUS Analyzer Tool + * + * Copyright (C) 2017 Mobica Limited + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + * + * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* vendor and product id */ +#define MCBA_MODULE_NAME "mcba_usb" +#define MCBA_VENDOR_ID 0x04d8 +#define MCBA_PRODUCT_ID 0x0a30 + +/* driver constants */ +#define MCBA_MAX_RX_URBS 20 +#define MCBA_MAX_TX_URBS 20 +#define MCBA_CTX_FREE MCBA_MAX_TX_URBS + +/* RX buffer must be bigger than msg size since at the + * beggining USB messages are stacked. + */ +#define MCBA_USB_RX_BUFF_SIZE 64 +#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) + +/* MCBA endpoint numbers */ +#define MCBA_USB_EP_IN 1 +#define MCBA_USB_EP_OUT 1 + +/* Microchip command id */ +#define MBCA_CMD_RECEIVE_MESSAGE 0xE3 +#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 +#define MBCA_CMD_I_AM_ALIVE_FROM_USB 0xF7 +#define MBCA_CMD_CHANGE_BIT_RATE 0xA1 +#define MBCA_CMD_TRANSMIT_MESSAGE_EV 0xA3 +#define MBCA_CMD_SETUP_TERMINATION_RESISTANCE 0xA8 +#define MBCA_CMD_READ_FW_VERSION 0xA9 +#define MBCA_CMD_NOTHING_TO_SEND 0xFF +#define MBCA_CMD_TRANSMIT_MESSAGE_RSP 0xE2 + +#define MCBA_VER_REQ_USB 1 +#define MCBA_VER_REQ_CAN 2 + +#define MCBA_SIDL_EXID_MASK 0x8 +#define MCBA_DLC_MASK 0xf +#define MCBA_DLC_RTR_MASK 0x40 + +#define MCBA_CAN_STATE_WRN_TH 95 +#define MCBA_CAN_STATE_ERR_PSV_TH 127 + +#define MCBA_TERMINATION_DISABLED CAN_TERMINATION_DISABLED +#define MCBA_TERMINATION_ENABLED 120 + +struct mcba_usb_ctx { + struct mcba_priv *priv; + u32 ndx; + u8 dlc; + bool can; +}; + +/* Structure to hold all of our device specific stuff */ +struct mcba_priv { + struct can_priv can; /* must be the first member */ + struct sk_buff *echo_skb[MCBA_MAX_TX_URBS]; + struct mcba_usb_ctx tx_context[MCBA_MAX_TX_URBS]; + struct usb_device *udev; + struct net_device *netdev; + struct usb_anchor tx_submitted; + struct usb_anchor rx_submitted; + struct can_berr_counter bec; + bool usb_ka_first_pass; + bool can_ka_first_pass; + bool can_speed_check; + atomic_t free_ctx_cnt; +}; + +/* CAN frame */ +struct __packed mcba_usb_msg_can { + u8 cmd_id; + __be16 eid; + __be16 sid; + u8 dlc; + u8 data[8]; + u8 timestamp[4]; + u8 checksum; +}; + +/* command frame */ +struct __packed mcba_usb_msg { + u8 cmd_id; + u8 unused[18]; +}; + +struct __packed mcba_usb_msg_ka_usb { + u8 cmd_id; + u8 termination_state; + u8 soft_ver_major; + u8 soft_ver_minor; + u8 unused[15]; +}; + +struct __packed mcba_usb_msg_ka_can { + u8 cmd_id; + u8 tx_err_cnt; + u8 rx_err_cnt; + u8 rx_buff_ovfl; + u8 tx_bus_off; + __be16 can_bitrate; + __le16 rx_lost; + u8 can_stat; + u8 soft_ver_major; + u8 soft_ver_minor; + u8 debug_mode; + u8 test_complete; + u8 test_result; + u8 unused[4]; +}; + +struct __packed mcba_usb_msg_change_bitrate { + u8 cmd_id; + __be16 bitrate; + u8 unused[16]; +}; + +struct __packed mcba_usb_msg_termination { + u8 cmd_id; + u8 termination; + u8 unused[17]; +}; + +struct __packed mcba_usb_msg_fw_ver { + u8 cmd_id; + u8 pic; + u8 unused[17]; +}; + +static const struct usb_device_id mcba_usb_table[] = { + { USB_DEVICE(MCBA_VENDOR_ID, MCBA_PRODUCT_ID) }, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, mcba_usb_table); + +static const u16 mcba_termination[] = { MCBA_TERMINATION_DISABLED, + MCBA_TERMINATION_ENABLED }; + +static const u32 mcba_bitrate[] = { 20000, 33333, 50000, 80000, 83333, + 100000, 125000, 150000, 175000, 200000, + 225000, 250000, 275000, 300000, 500000, + 625000, 800000, 1000000 }; + +static inline void mcba_init_ctx(struct mcba_priv *priv) +{ + int i = 0; + + for (i = 0; i < MCBA_MAX_TX_URBS; i++) { + priv->tx_context[i].ndx = MCBA_CTX_FREE; + priv->tx_context[i].priv = priv; + } + + atomic_set(&priv->free_ctx_cnt, ARRAY_SIZE(priv->tx_context)); +} + +static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, + struct can_frame *cf) +{ + int i = 0; + struct mcba_usb_ctx *ctx = NULL; + + for (i = 0; i < MCBA_MAX_TX_URBS; i++) { + if (priv->tx_context[i].ndx == MCBA_CTX_FREE) { + ctx = &priv->tx_context[i]; + ctx->ndx = i; + + if (cf) { + ctx->can = true; + ctx->dlc = cf->can_dlc; + } else { + ctx->can = false; + ctx->dlc = 0; + } + + atomic_dec(&priv->free_ctx_cnt); + break; + } + } + + if (!atomic_read(&priv->free_ctx_cnt)) + /* That was the last free ctx. Slow down tx path */ + netif_stop_queue(priv->netdev); + + return ctx; +} + +/* mcba_usb_free_ctx and mcba_usb_get_free_ctx are executed by different + * threads. The order of execution in below function is important. + */ +static inline void mcba_usb_free_ctx(struct mcba_usb_ctx *ctx) +{ + /* Increase number of free ctxs before freeing ctx */ + atomic_inc(&ctx->priv->free_ctx_cnt); + + ctx->ndx = MCBA_CTX_FREE; + + /* Wake up the queue once ctx is marked free */ + netif_wake_queue(ctx->priv->netdev); +} + +static void mcba_usb_write_bulk_callback(struct urb *urb) +{ + struct mcba_usb_ctx *ctx = urb->context; + struct net_device *netdev; + + WARN_ON(!ctx); + + netdev = ctx->priv->netdev; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + + if (ctx->can) { + if (!netif_device_present(netdev)) + return; + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += ctx->dlc; + + can_led_event(netdev, CAN_LED_EVENT_TX); + can_get_echo_skb(netdev, ctx->ndx); + } + + if (urb->status) + netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); + + /* Release the context */ + mcba_usb_free_ctx(ctx); +} + +/* Send data to device */ +static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, + struct mcba_usb_msg *usb_msg, + struct mcba_usb_ctx *ctx) +{ + struct urb *urb; + u8 *buf; + int err; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + buf = usb_alloc_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, GFP_ATOMIC, + &urb->transfer_dma); + if (!buf) { + err = -ENOMEM; + goto nomembuf; + } + + memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); + + usb_fill_bulk_urb(urb, priv->udev, + usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, + MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, + ctx); + + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) + goto failed; + + /* Release our reference to this URB, the USB core will eventually free + * it entirely. + */ + usb_free_urb(urb); + + return 0; + +failed: + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, buf, + urb->transfer_dma); + + if (err == -ENODEV) + netif_device_detach(priv->netdev); + else + netdev_warn(priv->netdev, "failed tx_urb %d\n", err); + +nomembuf: + usb_free_urb(urb); + + return err; +} + +/* Send data to device */ +static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct mcba_priv *priv = netdev_priv(netdev); + struct can_frame *cf = (struct can_frame *)skb->data; + struct mcba_usb_ctx *ctx = NULL; + struct net_device_stats *stats = &priv->netdev->stats; + u16 sid; + int err; + struct mcba_usb_msg_can usb_msg = { + .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV + }; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + ctx = mcba_usb_get_free_ctx(priv, cf); + if (!ctx) + return NETDEV_TX_BUSY; + + can_put_echo_skb(skb, priv->netdev, ctx->ndx); + + if (cf->can_id & CAN_EFF_FLAG) { + /* SIDH | SIDL | EIDH | EIDL + * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 + */ + sid = MCBA_SIDL_EXID_MASK; + /* store 28-18 bits */ + sid |= (cf->can_id & 0x1ffc0000) >> 13; + /* store 17-16 bits */ + sid |= (cf->can_id & 0x30000) >> 16; + put_unaligned_be16(sid, &usb_msg.sid); + + /* store 15-0 bits */ + put_unaligned_be16(cf->can_id & 0xffff, &usb_msg.eid); + } else { + /* SIDH | SIDL + * 10 - 3 | 2 1 0 x x x x x + */ + put_unaligned_be16((cf->can_id & CAN_SFF_MASK) << 5, + &usb_msg.sid); + usb_msg.eid = 0; + } + + usb_msg.dlc = cf->can_dlc; + + memcpy(usb_msg.data, cf->data, usb_msg.dlc); + + if (cf->can_id & CAN_RTR_FLAG) + usb_msg.dlc |= MCBA_DLC_RTR_MASK; + + err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); + if (err) + goto xmit_failed; + + return NETDEV_TX_OK; + +xmit_failed: + can_free_echo_skb(priv->netdev, ctx->ndx); + mcba_usb_free_ctx(ctx); + dev_kfree_skb(skb); + stats->tx_dropped++; + + return NETDEV_TX_OK; +} + +/* Send cmd to device */ +static void mcba_usb_xmit_cmd(struct mcba_priv *priv, + struct mcba_usb_msg *usb_msg) +{ + struct mcba_usb_ctx *ctx = NULL; + int err; + + ctx = mcba_usb_get_free_ctx(priv, NULL); + if (!ctx) { + netdev_err(priv->netdev, + "Lack of free ctx. Sending (%d) cmd aborted", + usb_msg->cmd_id); + + return; + } + + err = mcba_usb_xmit(priv, usb_msg, ctx); + if (err) + netdev_err(priv->netdev, "Failed to send cmd (%d)", + usb_msg->cmd_id); +} + +static void mcba_usb_xmit_change_bitrate(struct mcba_priv *priv, u16 bitrate) +{ + struct mcba_usb_msg_change_bitrate usb_msg = { + .cmd_id = MBCA_CMD_CHANGE_BIT_RATE + }; + + put_unaligned_be16(bitrate, &usb_msg.bitrate); + + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); +} + +static void mcba_usb_xmit_read_fw_ver(struct mcba_priv *priv, u8 pic) +{ + struct mcba_usb_msg_fw_ver usb_msg = { + .cmd_id = MBCA_CMD_READ_FW_VERSION, + .pic = pic + }; + + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); +} + +static void mcba_usb_process_can(struct mcba_priv *priv, + struct mcba_usb_msg_can *msg) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats = &priv->netdev->stats; + u16 sid; + + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) + return; + + sid = get_unaligned_be16(&msg->sid); + + if (sid & MCBA_SIDL_EXID_MASK) { + /* SIDH | SIDL | EIDH | EIDL + * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 + */ + cf->can_id = CAN_EFF_FLAG; + + /* store 28-18 bits */ + cf->can_id |= (sid & 0xffe0) << 13; + /* store 17-16 bits */ + cf->can_id |= (sid & 3) << 16; + /* store 15-0 bits */ + cf->can_id |= get_unaligned_be16(&msg->eid); + } else { + /* SIDH | SIDL + * 10 - 3 | 2 1 0 x x x x x + */ + cf->can_id = (sid & 0xffe0) >> 5; + } + + if (msg->dlc & MCBA_DLC_RTR_MASK) + cf->can_id |= CAN_RTR_FLAG; + + cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK); + + memcpy(cf->data, msg->data, cf->can_dlc); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + can_led_event(priv->netdev, CAN_LED_EVENT_RX); + netif_rx(skb); +} + +static void mcba_usb_process_ka_usb(struct mcba_priv *priv, + struct mcba_usb_msg_ka_usb *msg) +{ + if (unlikely(priv->usb_ka_first_pass)) { + netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n", + msg->soft_ver_major, msg->soft_ver_minor); + + priv->usb_ka_first_pass = false; + } + + if (msg->termination_state) + priv->can.termination = MCBA_TERMINATION_ENABLED; + else + priv->can.termination = MCBA_TERMINATION_DISABLED; +} + +static u32 convert_can2host_bitrate(struct mcba_usb_msg_ka_can *msg) +{ + const u32 bitrate = get_unaligned_be16(&msg->can_bitrate); + + if ((bitrate == 33) || (bitrate == 83)) + return bitrate * 1000 + 333; + else + return bitrate * 1000; +} + +static void mcba_usb_process_ka_can(struct mcba_priv *priv, + struct mcba_usb_msg_ka_can *msg) +{ + if (unlikely(priv->can_ka_first_pass)) { + netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n", + msg->soft_ver_major, msg->soft_ver_minor); + + priv->can_ka_first_pass = false; + } + + if (unlikely(priv->can_speed_check)) { + const u32 bitrate = convert_can2host_bitrate(msg); + + priv->can_speed_check = false; + + if (bitrate != priv->can.bittiming.bitrate) + netdev_err( + priv->netdev, + "Wrong bitrate reported by the device (%u). Expected %u", + bitrate, priv->can.bittiming.bitrate); + } + + priv->bec.txerr = msg->tx_err_cnt; + priv->bec.rxerr = msg->rx_err_cnt; + + if (msg->tx_bus_off) + priv->can.state = CAN_STATE_BUS_OFF; + + else if ((priv->bec.txerr > MCBA_CAN_STATE_ERR_PSV_TH) || + (priv->bec.rxerr > MCBA_CAN_STATE_ERR_PSV_TH)) + priv->can.state = CAN_STATE_ERROR_PASSIVE; + + else if ((priv->bec.txerr > MCBA_CAN_STATE_WRN_TH) || + (priv->bec.rxerr > MCBA_CAN_STATE_WRN_TH)) + priv->can.state = CAN_STATE_ERROR_WARNING; +} + +static void mcba_usb_process_rx(struct mcba_priv *priv, + struct mcba_usb_msg *msg) +{ + switch (msg->cmd_id) { + case MBCA_CMD_I_AM_ALIVE_FROM_CAN: + mcba_usb_process_ka_can(priv, + (struct mcba_usb_msg_ka_can *)msg); + break; + + case MBCA_CMD_I_AM_ALIVE_FROM_USB: + mcba_usb_process_ka_usb(priv, + (struct mcba_usb_msg_ka_usb *)msg); + break; + + case MBCA_CMD_RECEIVE_MESSAGE: + mcba_usb_process_can(priv, (struct mcba_usb_msg_can *)msg); + break; + + case MBCA_CMD_NOTHING_TO_SEND: + /* Side effect of communication between PIC_USB and PIC_CAN. + * PIC_CAN is telling us that it has nothing to send + */ + break; + + case MBCA_CMD_TRANSMIT_MESSAGE_RSP: + /* Transmission response from the device containing timestamp */ + break; + + default: + netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)", + msg->cmd_id); + break; + } +} + +/* Callback for reading data from device + * + * Check urb status, call read function and resubmit urb read operation. + */ +static void mcba_usb_read_bulk_callback(struct urb *urb) +{ + struct mcba_priv *priv = urb->context; + struct net_device *netdev; + int retval; + int pos = 0; + + netdev = priv->netdev; + + if (!netif_device_present(netdev)) + return; + + switch (urb->status) { + case 0: /* success */ + break; + + case -ENOENT: + case -ESHUTDOWN: + return; + + default: + netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); + + goto resubmit_urb; + } + + while (pos < urb->actual_length) { + struct mcba_usb_msg *msg; + + if (pos + sizeof(struct mcba_usb_msg) > urb->actual_length) { + netdev_err(priv->netdev, "format error\n"); + break; + } + + msg = (struct mcba_usb_msg *)(urb->transfer_buffer + pos); + mcba_usb_process_rx(priv, msg); + + pos += sizeof(struct mcba_usb_msg); + } + +resubmit_urb: + + usb_fill_bulk_urb(urb, priv->udev, + usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), + urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, + mcba_usb_read_bulk_callback, priv); + + retval = usb_submit_urb(urb, GFP_ATOMIC); + + if (retval == -ENODEV) + netif_device_detach(netdev); + else if (retval) + netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", + retval); +} + +/* Start USB device */ +static int mcba_usb_start(struct mcba_priv *priv) +{ + struct net_device *netdev = priv->netdev; + int err, i; + + mcba_init_ctx(priv); + + for (i = 0; i < MCBA_MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, + GFP_KERNEL, &urb->transfer_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + usb_free_urb(urb); + err = -ENOMEM; + break; + } + + usb_fill_bulk_urb(urb, priv->udev, + usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), + buf, MCBA_USB_RX_BUFF_SIZE, + mcba_usb_read_bulk_callback, priv); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &priv->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, + buf, urb->transfer_dma); + usb_free_urb(urb); + break; + } + + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + } + + /* Did we submit any URBs */ + if (i == 0) { + netdev_warn(netdev, "couldn't setup read URBs\n"); + return err; + } + + /* Warn if we've couldn't transmit all the URBs */ + if (i < MCBA_MAX_RX_URBS) + netdev_warn(netdev, "rx performance may be slow\n"); + + mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_USB); + mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_CAN); + + return err; +} + +/* Open USB device */ +static int mcba_usb_open(struct net_device *netdev) +{ + struct mcba_priv *priv = netdev_priv(netdev); + int err; + + /* common open */ + err = open_candev(netdev); + if (err) + return err; + + priv->can_speed_check = true; + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + can_led_event(netdev, CAN_LED_EVENT_OPEN); + netif_start_queue(netdev); + + return 0; +} + +static void mcba_urb_unlink(struct mcba_priv *priv) +{ + usb_kill_anchored_urbs(&priv->rx_submitted); + usb_kill_anchored_urbs(&priv->tx_submitted); +} + +/* Close USB device */ +static int mcba_usb_close(struct net_device *netdev) +{ + struct mcba_priv *priv = netdev_priv(netdev); + + priv->can.state = CAN_STATE_STOPPED; + + netif_stop_queue(netdev); + + /* Stop polling */ + mcba_urb_unlink(priv); + + close_candev(netdev); + can_led_event(netdev, CAN_LED_EVENT_STOP); + + return 0; +} + +/* Set network device mode + * + * Maybe we should leave this function empty, because the device + * set mode variable with open command. + */ +static int mcba_net_set_mode(struct net_device *netdev, enum can_mode mode) +{ + return 0; +} + +static int mcba_net_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct mcba_priv *priv = netdev_priv(netdev); + + bec->txerr = priv->bec.txerr; + bec->rxerr = priv->bec.rxerr; + + return 0; +} + +static const struct net_device_ops mcba_netdev_ops = { + .ndo_open = mcba_usb_open, + .ndo_stop = mcba_usb_close, + .ndo_start_xmit = mcba_usb_start_xmit, +}; + +/* Microchip CANBUS has hardcoded bittiming values by default. + * This function sends request via USB to change the speed and align bittiming + * values for presentation purposes only + */ +static int mcba_net_set_bittiming(struct net_device *netdev) +{ + struct mcba_priv *priv = netdev_priv(netdev); + const u16 bitrate_kbps = priv->can.bittiming.bitrate / 1000; + + mcba_usb_xmit_change_bitrate(priv, bitrate_kbps); + + return 0; +} + +static int mcba_set_termination(struct net_device *netdev, u16 term) +{ + struct mcba_priv *priv = netdev_priv(netdev); + struct mcba_usb_msg_termination usb_msg = { + .cmd_id = MBCA_CMD_SETUP_TERMINATION_RESISTANCE + }; + + if (term == MCBA_TERMINATION_ENABLED) + usb_msg.termination = 1; + else + usb_msg.termination = 0; + + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); + + return 0; +} + +static int mcba_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct net_device *netdev; + struct mcba_priv *priv; + int err = -ENOMEM; + struct usb_device *usbdev = interface_to_usbdev(intf); + + netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); + if (!netdev) { + dev_err(&intf->dev, "Couldn't alloc candev\n"); + return -ENOMEM; + } + + priv = netdev_priv(netdev); + + priv->udev = usbdev; + priv->netdev = netdev; + priv->usb_ka_first_pass = true; + priv->can_ka_first_pass = true; + priv->can_speed_check = false; + + init_usb_anchor(&priv->rx_submitted); + init_usb_anchor(&priv->tx_submitted); + + usb_set_intfdata(intf, priv); + + /* Init CAN device */ + priv->can.state = CAN_STATE_STOPPED; + priv->can.termination_const = mcba_termination; + priv->can.termination_const_cnt = ARRAY_SIZE(mcba_termination); + priv->can.bitrate_const = mcba_bitrate; + priv->can.bitrate_const_cnt = ARRAY_SIZE(mcba_bitrate); + + priv->can.do_set_termination = mcba_set_termination; + priv->can.do_set_mode = mcba_net_set_mode; + priv->can.do_get_berr_counter = mcba_net_get_berr_counter; + priv->can.do_set_bittiming = mcba_net_set_bittiming; + + netdev->netdev_ops = &mcba_netdev_ops; + + netdev->flags |= IFF_ECHO; /* we support local echo */ + + SET_NETDEV_DEV(netdev, &intf->dev); + + err = register_candev(netdev); + if (err) { + netdev_err(netdev, "couldn't register CAN device: %d\n", err); + + goto cleanup_free_candev; + } + + devm_can_led_init(netdev); + + /* Start USB dev only if we have successfully registered CAN device */ + err = mcba_usb_start(priv); + if (err) { + if (err == -ENODEV) + netif_device_detach(priv->netdev); + + netdev_warn(netdev, "couldn't start device: %d\n", err); + + goto cleanup_unregister_candev; + } + + dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n"); + + return 0; + +cleanup_unregister_candev: + unregister_candev(priv->netdev); + +cleanup_free_candev: + free_candev(netdev); + + return err; +} + +/* Called by the usb core when driver is unloaded or device is removed */ +static void mcba_usb_disconnect(struct usb_interface *intf) +{ + struct mcba_priv *priv = usb_get_intfdata(intf); + + usb_set_intfdata(intf, NULL); + + netdev_info(priv->netdev, "device disconnected\n"); + + unregister_candev(priv->netdev); + free_candev(priv->netdev); + + mcba_urb_unlink(priv); +} + +static struct usb_driver mcba_usb_driver = { + .name = MCBA_MODULE_NAME, + .probe = mcba_usb_probe, + .disconnect = mcba_usb_disconnect, + .id_table = mcba_usb_table, +}; + +module_usb_driver(mcba_usb_driver); + +MODULE_AUTHOR("Remigiusz Kołłątaj "); +MODULE_DESCRIPTION("SocketCAN driver for Microchip CAN BUS Analyzer Tool"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 528d3bb4917f1ecb5caf2ef67a71ae8adad51c44..7ccdc3e30c98c25d238d0a124ffb854675b1732c 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -19,10 +19,10 @@ #include #include #include +#include #include "pcan_usb_core.h" #include "pcan_usb_pro.h" -#include "pcan_ucan.h" MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); @@ -238,7 +238,7 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) /* 1st, reset error counters: */ prc = (struct pucan_wr_err_cnt *)pc; - prc->opcode_channel = pucan_cmd_opcode_channel(dev, + prc->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_WR_ERR_CNT); /* select both counters */ @@ -257,9 +257,10 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) puo->opcode_channel = (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? - pucan_cmd_opcode_channel(dev, + pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_CLR_DIS_OPTION) : - pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION); + pucan_cmd_opcode_channel(dev->ctrl_idx, + PUCAN_CMD_SET_EN_OPTION); puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); @@ -274,7 +275,7 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) /* next, go back to operational mode */ cmd = (struct pucan_command *)pc; - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ? PUCAN_CMD_LISTEN_ONLY_MODE : PUCAN_CMD_NORMAL_MODE); @@ -296,7 +297,7 @@ static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff) struct pucan_command *cmd = (struct pucan_command *)pc; /* build cmd to go back to reset mode */ - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_RESET_MODE); l = sizeof(struct pucan_command); } @@ -332,7 +333,7 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, } for (i = idx; i < n; i++, cmd++) { - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_FILTER_STD); cmd->idx = cpu_to_le16(i); cmd->mask = cpu_to_le32(mask); @@ -352,7 +353,7 @@ static int pcan_usb_fd_set_options(struct peak_usb_device *dev, { struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (onoff) ? PUCAN_CMD_SET_EN_OPTION : PUCAN_CMD_CLR_DIS_OPTION); @@ -368,7 +369,7 @@ static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode) { struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev); - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_LED_SET); cmd->mode = led_mode; @@ -382,7 +383,7 @@ static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev, { struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev); - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_CLK_SET); cmd->mode = clk_mode; @@ -396,7 +397,7 @@ static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev, { struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev); - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_SLOW); cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1, dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); @@ -417,7 +418,7 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev, { struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev); - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_FAST); cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1); cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1); diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 674f367087c54ac168d2c0283b1361a1c98d3696..facca33d53e9fd24b996dc6dfd38c925b8128fdb 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -1,7 +1,7 @@ /* * vcan.c - Virtual CAN interface * - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,9 +50,12 @@ #include #include +#define DRV_NAME "vcan" + MODULE_DESCRIPTION("virtual CAN interface"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann "); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); /* @@ -164,7 +167,7 @@ static void vcan_setup(struct net_device *dev) } static struct rtnl_link_ops vcan_link_ops __read_mostly = { - .kind = "vcan", + .kind = DRV_NAME, .setup = vcan_setup, }; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c new file mode 100644 index 0000000000000000000000000000000000000000..7fbb2479568160b3b57fedb187429618937d2248 --- /dev/null +++ b/drivers/net/can/vxcan.c @@ -0,0 +1,316 @@ +/* + * vxcan.c - Virtual CAN Tunnel for cross namespace communication + * + * This code is derived from drivers/net/can/vcan.c for the virtual CAN + * specific parts and from drivers/net/veth.c to implement the netlink API + * for network interface pairs in a common and established way. + * + * Copyright (c) 2017 Oliver Hartkopp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "vxcan" + +MODULE_DESCRIPTION("Virtual CAN Tunnel"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Oliver Hartkopp "); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); + +struct vxcan_priv { + struct net_device __rcu *peer; +}; + +static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vxcan_priv *priv = netdev_priv(dev); + struct net_device *peer; + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct net_device_stats *peerstats, *srcstats = &dev->stats; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + rcu_read_lock(); + peer = rcu_dereference(priv->peer); + if (unlikely(!peer)) { + kfree_skb(skb); + dev->stats.tx_dropped++; + goto out_unlock; + } + + skb = can_create_echo_skb(skb); + if (!skb) + goto out_unlock; + + /* reset CAN GW hop counter */ + skb->csum_start = 0; + skb->pkt_type = PACKET_BROADCAST; + skb->dev = peer; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (netif_rx_ni(skb) == NET_RX_SUCCESS) { + srcstats->tx_packets++; + srcstats->tx_bytes += cfd->len; + peerstats = &peer->stats; + peerstats->rx_packets++; + peerstats->rx_bytes += cfd->len; + } + +out_unlock: + rcu_read_unlock(); + return NETDEV_TX_OK; +} + + +static int vxcan_open(struct net_device *dev) +{ + struct vxcan_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); + + if (!peer) + return -ENOTCONN; + + if (peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(peer); + } + return 0; +} + +static int vxcan_close(struct net_device *dev) +{ + struct vxcan_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); + + netif_carrier_off(dev); + if (peer) + netif_carrier_off(peer); + + return 0; +} + +static int vxcan_get_iflink(const struct net_device *dev) +{ + struct vxcan_priv *priv = netdev_priv(dev); + struct net_device *peer; + int iflink; + + rcu_read_lock(); + peer = rcu_dereference(priv->peer); + iflink = peer ? peer->ifindex : 0; + rcu_read_unlock(); + + return iflink; +} + +static int vxcan_change_mtu(struct net_device *dev, int new_mtu) +{ + /* Do not allow changing the MTU while running */ + if (dev->flags & IFF_UP) + return -EBUSY; + + if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops vxcan_netdev_ops = { + .ndo_open = vxcan_open, + .ndo_stop = vxcan_close, + .ndo_start_xmit = vxcan_xmit, + .ndo_get_iflink = vxcan_get_iflink, + .ndo_change_mtu = vxcan_change_mtu, +}; + +static void vxcan_setup(struct net_device *dev) +{ + dev->type = ARPHRD_CAN; + dev->mtu = CAN_MTU; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->tx_queue_len = 0; + dev->flags = (IFF_NOARP|IFF_ECHO); + dev->netdev_ops = &vxcan_netdev_ops; + dev->destructor = free_netdev; +} + +/* forward declaration for rtnl_create_link() */ +static struct rtnl_link_ops vxcan_link_ops; + +static int vxcan_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vxcan_priv *priv; + struct net_device *peer; + struct net *peer_net; + + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb; + char ifname[IFNAMSIZ]; + unsigned char name_assign_type; + struct ifinfomsg *ifmp = NULL; + int err; + + /* register peer device */ + if (data && data[VXCAN_INFO_PEER]) { + struct nlattr *nla_peer; + + nla_peer = data[VXCAN_INFO_PEER]; + ifmp = nla_data(nla_peer); + err = rtnl_nla_parse_ifla(peer_tb, + nla_data(nla_peer) + + sizeof(struct ifinfomsg), + nla_len(nla_peer) - + sizeof(struct ifinfomsg), + NULL); + if (err < 0) + return err; + + tbp = peer_tb; + } + + if (tbp[IFLA_IFNAME]) { + nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + name_assign_type = NET_NAME_USER; + } else { + snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); + name_assign_type = NET_NAME_ENUM; + } + + peer_net = rtnl_link_get_net(net, tbp); + if (IS_ERR(peer_net)) + return PTR_ERR(peer_net); + + peer = rtnl_create_link(peer_net, ifname, name_assign_type, + &vxcan_link_ops, tbp); + if (IS_ERR(peer)) { + put_net(peer_net); + return PTR_ERR(peer); + } + + if (ifmp && dev->ifindex) + peer->ifindex = ifmp->ifi_index; + + err = register_netdevice(peer); + put_net(peer_net); + peer_net = NULL; + if (err < 0) { + free_netdev(peer); + return err; + } + + netif_carrier_off(peer); + + err = rtnl_configure_link(peer, ifmp); + if (err < 0) { + unregister_netdevice(peer); + return err; + } + + /* register first device */ + if (tb[IFLA_IFNAME]) + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); + + err = register_netdevice(dev); + if (err < 0) { + unregister_netdevice(peer); + return err; + } + + netif_carrier_off(dev); + + /* cross link the device pair */ + priv = netdev_priv(dev); + rcu_assign_pointer(priv->peer, peer); + + priv = netdev_priv(peer); + rcu_assign_pointer(priv->peer, dev); + + return 0; +} + +static void vxcan_dellink(struct net_device *dev, struct list_head *head) +{ + struct vxcan_priv *priv; + struct net_device *peer; + + priv = netdev_priv(dev); + peer = rtnl_dereference(priv->peer); + + /* Note : dellink() is called from default_device_exit_batch(), + * before a rcu_synchronize() point. The devices are guaranteed + * not being freed before one RCU grace period. + */ + RCU_INIT_POINTER(priv->peer, NULL); + unregister_netdevice_queue(dev, head); + + if (peer) { + priv = netdev_priv(peer); + RCU_INIT_POINTER(priv->peer, NULL); + unregister_netdevice_queue(peer, head); + } +} + +static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = { + [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, +}; + +static struct net *vxcan_get_link_net(const struct net_device *dev) +{ + struct vxcan_priv *priv = netdev_priv(dev); + struct net_device *peer = rtnl_dereference(priv->peer); + + return peer ? dev_net(peer) : dev_net(dev); +} + +static struct rtnl_link_ops vxcan_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct vxcan_priv), + .setup = vxcan_setup, + .newlink = vxcan_newlink, + .dellink = vxcan_dellink, + .policy = vxcan_policy, + .maxtype = VXCAN_INFO_MAX, + .get_link_net = vxcan_get_link_net, +}; + +static __init int vxcan_init(void) +{ + pr_info("vxcan: Virtual CAN Tunnel driver\n"); + + return rtnl_link_register(&vxcan_link_ops); +} + +static __exit void vxcan_exit(void) +{ + rtnl_link_unregister(&vxcan_link_ops); +} + +module_init(vxcan_init); +module_exit(vxcan_exit); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 91c876a0a647ba1b17bdc734db168c9ee456409e..da020418a6526bed0465eeaa67a3fd61d83afb34 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1412,31 +1412,39 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return rc; } -static int e100_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int e100_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct net_local *np = netdev_priv(dev); + u32 supported; int err; spin_lock_irq(&np->lock); - err = mii_ethtool_gset(&np->mii_if, cmd); + err = mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); /* The PHY may support 1000baseT, but the Etrax100 does not. */ - cmd->supported &= ~(SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full); + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + return err; } -static int e100_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int e100_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->base.autoneg == AUTONEG_ENABLE) { e100_set_duplex(dev, autoneg); e100_set_speed(dev, 0); } else { - e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full); - e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100); + e100_set_duplex(dev, ecmd->base.duplex == DUPLEX_HALF ? + half : full); + e100_set_speed(dev, ecmd->base.speed == SPEED_10 ? 10 : 100); } return 0; @@ -1459,11 +1467,11 @@ static int e100_nway_reset(struct net_device *dev) } static const struct ethtool_ops e100_ethtool_ops = { - .get_settings = e100_get_settings, - .set_settings = e100_set_settings, .get_drvinfo = e100_get_drvinfo, .nway_reset = e100_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, }; static int diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 065984670ff19a0a03c4818097f574de17907e87..862ee22303c22674504be1d1e4622268c583f252 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -11,7 +11,7 @@ config NET_DSA_MV88E6060 config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA + depends on HAS_IOMEM && NET_DSA && OF_MDIO select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY @@ -34,4 +34,44 @@ config NET_DSA_QCA8K This enables support for the Qualcomm Atheros QCA8K Ethernet switch chips. +config NET_DSA_LOOP + tristate "DSA mock-up Ethernet switch chip support" + depends on NET_DSA + select FIXED_PHY + ---help--- + This enables support for a fake mock-up switch chip which + exercises the DSA APIs. + +config NET_DSA_MT7530 + tristate "Mediatek MT7530 Ethernet switch support" + depends on NET_DSA + select NET_DSA_TAG_MTK + ---help--- + This enables support for the Mediatek MT7530 Ethernet switch + chip. + +config NET_DSA_SMSC_LAN9303 + tristate + select NET_DSA_TAG_LAN9303 + ---help--- + This enables support for the SMSC/Microchip LAN9303 3 port ethernet + switch chips. + +config NET_DSA_SMSC_LAN9303_I2C + tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode" + depends on NET_DSA && I2C + select NET_DSA_SMSC_LAN9303 + select REGMAP_I2C + ---help--- + Enable access functions if the SMSC/Microchip LAN9303 is configured + for I2C managed mode. + +config NET_DSA_SMSC_LAN9303_MDIO + tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode" + depends on NET_DSA + select NET_DSA_SMSC_LAN9303 + ---help--- + Enable access functions if the SMSC/Microchip LAN9303 is configured + for MDIO managed mode. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index a3c94163221723c610791bd0e0a98df05c7dc221..edd63036173650912ba5e6a058c98d9f31e43393 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -2,6 +2,10 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o - +obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o +obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o +obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o +obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o obj-y += b53/ obj-y += mv88e6xxx/ +obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 346dd9a1232dff12e24fef05b6e6352f106ed2be..2fb32d67065f8aa164b3ea03d5cb914120c4e0c6 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -10,10 +10,11 @@ */ #include -#include #include #include #include +#include +#include #include #include "bcm_sf2.h" diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c new file mode 100644 index 0000000000000000000000000000000000000000..f0fc4de4fc9a4854ac6b2d6926ed549fed71ce26 --- /dev/null +++ b/drivers/net/dsa/dsa_loop.c @@ -0,0 +1,328 @@ +/* + * Distributed Switch Architecture loopback driver + * + * Copyright (C) 2016, Florian Fainelli + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dsa_loop.h" + +struct dsa_loop_vlan { + u16 members; + u16 untagged; +}; + +#define DSA_LOOP_VLANS 5 + +struct dsa_loop_priv { + struct mii_bus *bus; + unsigned int port_base; + struct dsa_loop_vlan vlans[DSA_LOOP_VLANS]; + struct net_device *netdev; + u16 pvid; +}; + +static struct phy_device *phydevs[PHY_MAX_ADDR]; + +static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds) +{ + dev_dbg(ds->dev, "%s\n", __func__); + + return DSA_TAG_PROTO_NONE; +} + +static int dsa_loop_setup(struct dsa_switch *ds) +{ + dev_dbg(ds->dev, "%s\n", __func__); + + return 0; +} + +static int dsa_loop_set_addr(struct dsa_switch *ds, u8 *addr) +{ + dev_dbg(ds->dev, "%s\n", __func__); + + return 0; +} + +static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + + dev_dbg(ds->dev, "%s\n", __func__); + + return mdiobus_read_nested(bus, ps->port_base + port, regnum); +} + +static int dsa_loop_phy_write(struct dsa_switch *ds, int port, + int regnum, u16 value) +{ + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + + dev_dbg(ds->dev, "%s\n", __func__); + + return mdiobus_write_nested(bus, ps->port_base + port, regnum, value); +} + +static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + dev_dbg(ds->dev, "%s\n", __func__); + + return 0; +} + +static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + dev_dbg(ds->dev, "%s\n", __func__); +} + +static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + dev_dbg(ds->dev, "%s\n", __func__); +} + +static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + dev_dbg(ds->dev, "%s\n", __func__); + + return 0; +} + +static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + + dev_dbg(ds->dev, "%s\n", __func__); + + /* Just do a sleeping operation to make lockdep checks effective */ + mdiobus_read(bus, ps->port_base + port, MII_BMSR); + + if (vlan->vid_end > DSA_LOOP_VLANS) + return -ERANGE; + + return 0; +} + +static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + struct dsa_loop_vlan *vl; + u16 vid; + + dev_dbg(ds->dev, "%s\n", __func__); + + /* Just do a sleeping operation to make lockdep checks effective */ + mdiobus_read(bus, ps->port_base + port, MII_BMSR); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &ps->vlans[vid]; + + vl->members |= BIT(port); + if (untagged) + vl->untagged |= BIT(port); + else + vl->untagged &= ~BIT(port); + } + + if (pvid) + ps->pvid = vid; +} + +static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + struct dsa_loop_vlan *vl; + u16 vid, pvid = ps->pvid; + + dev_dbg(ds->dev, "%s\n", __func__); + + /* Just do a sleeping operation to make lockdep checks effective */ + mdiobus_read(bus, ps->port_base + port, MII_BMSR); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &ps->vlans[vid]; + + vl->members &= ~BIT(port); + if (untagged) + vl->untagged &= ~BIT(port); + + if (pvid == vid) + pvid = 1; + } + ps->pvid = pvid; + + return 0; +} + +static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct dsa_loop_priv *ps = ds->priv; + struct mii_bus *bus = ps->bus; + struct dsa_loop_vlan *vl; + u16 vid, vid_start = 0; + int err = 0; + + dev_dbg(ds->dev, "%s\n", __func__); + + /* Just do a sleeping operation to make lockdep checks effective */ + mdiobus_read(bus, ps->port_base + port, MII_BMSR); + + for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) { + vl = &ps->vlans[vid]; + + if (!(vl->members & BIT(port))) + continue; + + vlan->vid_begin = vlan->vid_end = vid; + vlan->flags = 0; + + if (vl->untagged & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + if (ps->pvid == vid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static struct dsa_switch_ops dsa_loop_driver = { + .get_tag_protocol = dsa_loop_get_protocol, + .setup = dsa_loop_setup, + .set_addr = dsa_loop_set_addr, + .phy_read = dsa_loop_phy_read, + .phy_write = dsa_loop_phy_write, + .port_bridge_join = dsa_loop_port_bridge_join, + .port_bridge_leave = dsa_loop_port_bridge_leave, + .port_stp_state_set = dsa_loop_port_stp_state_set, + .port_vlan_filtering = dsa_loop_port_vlan_filtering, + .port_vlan_prepare = dsa_loop_port_vlan_prepare, + .port_vlan_add = dsa_loop_port_vlan_add, + .port_vlan_del = dsa_loop_port_vlan_del, + .port_vlan_dump = dsa_loop_port_vlan_dump, +}; + +static int dsa_loop_drv_probe(struct mdio_device *mdiodev) +{ + struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data; + struct dsa_loop_priv *ps; + struct dsa_switch *ds; + + if (!pdata) + return -ENODEV; + + dev_info(&mdiodev->dev, "%s: 0x%0x\n", + pdata->name, pdata->enabled_ports); + + ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + if (!ds) + return -ENOMEM; + + ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL); + ps->netdev = dev_get_by_name(&init_net, pdata->netdev); + if (!ps->netdev) + return -EPROBE_DEFER; + + pdata->cd.netdev[DSA_LOOP_CPU_PORT] = &ps->netdev->dev; + + ds->dev = &mdiodev->dev; + ds->ops = &dsa_loop_driver; + ds->priv = ps; + ps->bus = mdiodev->bus; + + dev_set_drvdata(&mdiodev->dev, ds); + + return dsa_register_switch(ds, ds->dev); +} + +static void dsa_loop_drv_remove(struct mdio_device *mdiodev) +{ + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); + struct dsa_loop_priv *ps = ds->priv; + + dsa_unregister_switch(ds); + dev_put(ps->netdev); +} + +static struct mdio_driver dsa_loop_drv = { + .mdiodrv.driver = { + .name = "dsa-loop", + }, + .probe = dsa_loop_drv_probe, + .remove = dsa_loop_drv_remove, +}; + +#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2) + +static void unregister_fixed_phys(void) +{ + unsigned int i; + + for (i = 0; i < NUM_FIXED_PHYS; i++) + if (phydevs[i]) + fixed_phy_unregister(phydevs[i]); +} + +static int __init dsa_loop_init(void) +{ + struct fixed_phy_status status = { + .link = 1, + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + }; + unsigned int i; + + for (i = 0; i < NUM_FIXED_PHYS; i++) + phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL); + + return mdio_driver_register(&dsa_loop_drv); +} +module_init(dsa_loop_init); + +static void __exit dsa_loop_exit(void) +{ + mdio_driver_unregister(&dsa_loop_drv); + unregister_fixed_phys(); +} +module_exit(dsa_loop_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Florian Fainelli"); +MODULE_DESCRIPTION("DSA loopback driver"); diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h new file mode 100644 index 0000000000000000000000000000000000000000..dc396877fc954e9850cef21faa0a70f80565e206 --- /dev/null +++ b/drivers/net/dsa/dsa_loop.h @@ -0,0 +1,19 @@ +#ifndef __DSA_LOOP_H +#define __DSA_LOOP_H + +struct dsa_chip_data; + +struct dsa_loop_pdata { + /* Must be first, such that dsa_register_switch() can access this + * without gory pointer manipulations + */ + struct dsa_chip_data cd; + const char *name; + unsigned int enabled_ports; + const char *netdev; +}; + +#define DSA_LOOP_NUM_PORTS 6 +#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1) + +#endif /* __DSA_LOOP_H */ diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c new file mode 100644 index 0000000000000000000000000000000000000000..fb8d5dc71013c40ad9e0341c9c2d7998c14ae17e --- /dev/null +++ b/drivers/net/dsa/dsa_loop_bdinfo.c @@ -0,0 +1,34 @@ +#include +#include +#include +#include + +#include "dsa_loop.h" + +static struct dsa_loop_pdata dsa_loop_pdata = { + .cd = { + .port_names[0] = "lan1", + .port_names[1] = "lan2", + .port_names[2] = "lan3", + .port_names[3] = "lan4", + .port_names[DSA_LOOP_CPU_PORT] = "cpu", + }, + .name = "DSA mockup driver", + .enabled_ports = 0x1f, + .netdev = "eth0", +}; + +static const struct mdio_board_info bdinfo = { + .bus_id = "fixed-0", + .modalias = "dsa-loop", + .mdio_addr = 31, + .platform_data = &dsa_loop_pdata, +}; + +static int __init dsa_loop_bdinfo_init(void) +{ + return mdiobus_register_board_info(&bdinfo, 1); +} +arch_initcall(dsa_loop_bdinfo_init) + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c new file mode 100644 index 0000000000000000000000000000000000000000..c8b2423c8ef74dda07ec5a61b5ad202189adc523 --- /dev/null +++ b/drivers/net/dsa/lan9303-core.c @@ -0,0 +1,879 @@ +/* + * Copyright (C) 2017 Pengutronix, Juergen Borleis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + +#include "lan9303.h" + +#define LAN9303_CHIP_REV 0x14 +# define LAN9303_CHIP_ID 0x9303 +#define LAN9303_IRQ_CFG 0x15 +# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8) +# define LAN9303_IRQ_CFG_IRQ_POL BIT(4) +# define LAN9303_IRQ_CFG_IRQ_TYPE BIT(0) +#define LAN9303_INT_STS 0x16 +# define LAN9303_INT_STS_PHY_INT2 BIT(27) +# define LAN9303_INT_STS_PHY_INT1 BIT(26) +#define LAN9303_INT_EN 0x17 +# define LAN9303_INT_EN_PHY_INT2_EN BIT(27) +# define LAN9303_INT_EN_PHY_INT1_EN BIT(26) +#define LAN9303_HW_CFG 0x1D +# define LAN9303_HW_CFG_READY BIT(27) +# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26) +# define LAN9303_HW_CFG_AMDX_EN_PORT1 BIT(25) +#define LAN9303_PMI_DATA 0x29 +#define LAN9303_PMI_ACCESS 0x2A +# define LAN9303_PMI_ACCESS_PHY_ADDR(x) (((x) & 0x1f) << 11) +# define LAN9303_PMI_ACCESS_MIIRINDA(x) (((x) & 0x1f) << 6) +# define LAN9303_PMI_ACCESS_MII_BUSY BIT(0) +# define LAN9303_PMI_ACCESS_MII_WRITE BIT(1) +#define LAN9303_MANUAL_FC_1 0x68 +#define LAN9303_MANUAL_FC_2 0x69 +#define LAN9303_MANUAL_FC_0 0x6a +#define LAN9303_SWITCH_CSR_DATA 0x6b +#define LAN9303_SWITCH_CSR_CMD 0x6c +#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31) +#define LAN9303_SWITCH_CSR_CMD_RW BIT(30) +#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16)) +#define LAN9303_VIRT_PHY_BASE 0x70 +#define LAN9303_VIRT_SPECIAL_CTRL 0x77 + +#define LAN9303_SW_DEV_ID 0x0000 +#define LAN9303_SW_RESET 0x0001 +#define LAN9303_SW_RESET_RESET BIT(0) +#define LAN9303_SW_IMR 0x0004 +#define LAN9303_SW_IPR 0x0005 +#define LAN9303_MAC_VER_ID_0 0x0400 +#define LAN9303_MAC_RX_CFG_0 0x0401 +# define LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES BIT(1) +# define LAN9303_MAC_RX_CFG_X_RX_ENABLE BIT(0) +#define LAN9303_MAC_RX_UNDSZE_CNT_0 0x0410 +#define LAN9303_MAC_RX_64_CNT_0 0x0411 +#define LAN9303_MAC_RX_127_CNT_0 0x0412 +#define LAN9303_MAC_RX_255_CNT_0 0x413 +#define LAN9303_MAC_RX_511_CNT_0 0x0414 +#define LAN9303_MAC_RX_1023_CNT_0 0x0415 +#define LAN9303_MAC_RX_MAX_CNT_0 0x0416 +#define LAN9303_MAC_RX_OVRSZE_CNT_0 0x0417 +#define LAN9303_MAC_RX_PKTOK_CNT_0 0x0418 +#define LAN9303_MAC_RX_CRCERR_CNT_0 0x0419 +#define LAN9303_MAC_RX_MULCST_CNT_0 0x041a +#define LAN9303_MAC_RX_BRDCST_CNT_0 0x041b +#define LAN9303_MAC_RX_PAUSE_CNT_0 0x041c +#define LAN9303_MAC_RX_FRAG_CNT_0 0x041d +#define LAN9303_MAC_RX_JABB_CNT_0 0x041e +#define LAN9303_MAC_RX_ALIGN_CNT_0 0x041f +#define LAN9303_MAC_RX_PKTLEN_CNT_0 0x0420 +#define LAN9303_MAC_RX_GOODPKTLEN_CNT_0 0x0421 +#define LAN9303_MAC_RX_SYMBL_CNT_0 0x0422 +#define LAN9303_MAC_RX_CTLFRM_CNT_0 0x0423 + +#define LAN9303_MAC_TX_CFG_0 0x0440 +# define LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT (21 << 2) +# define LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE BIT(1) +# define LAN9303_MAC_TX_CFG_X_TX_ENABLE BIT(0) +#define LAN9303_MAC_TX_DEFER_CNT_0 0x0451 +#define LAN9303_MAC_TX_PAUSE_CNT_0 0x0452 +#define LAN9303_MAC_TX_PKTOK_CNT_0 0x0453 +#define LAN9303_MAC_TX_64_CNT_0 0x0454 +#define LAN9303_MAC_TX_127_CNT_0 0x0455 +#define LAN9303_MAC_TX_255_CNT_0 0x0456 +#define LAN9303_MAC_TX_511_CNT_0 0x0457 +#define LAN9303_MAC_TX_1023_CNT_0 0x0458 +#define LAN9303_MAC_TX_MAX_CNT_0 0x0459 +#define LAN9303_MAC_TX_UNDSZE_CNT_0 0x045a +#define LAN9303_MAC_TX_PKTLEN_CNT_0 0x045c +#define LAN9303_MAC_TX_BRDCST_CNT_0 0x045d +#define LAN9303_MAC_TX_MULCST_CNT_0 0x045e +#define LAN9303_MAC_TX_LATECOL_0 0x045f +#define LAN9303_MAC_TX_EXCOL_CNT_0 0x0460 +#define LAN9303_MAC_TX_SNGLECOL_CNT_0 0x0461 +#define LAN9303_MAC_TX_MULTICOL_CNT_0 0x0462 +#define LAN9303_MAC_TX_TOTALCOL_CNT_0 0x0463 + +#define LAN9303_MAC_VER_ID_1 0x0800 +#define LAN9303_MAC_RX_CFG_1 0x0801 +#define LAN9303_MAC_TX_CFG_1 0x0840 +#define LAN9303_MAC_VER_ID_2 0x0c00 +#define LAN9303_MAC_RX_CFG_2 0x0c01 +#define LAN9303_MAC_TX_CFG_2 0x0c40 +#define LAN9303_SWE_ALR_CMD 0x1800 +#define LAN9303_SWE_VLAN_CMD 0x180b +# define LAN9303_SWE_VLAN_CMD_RNW BIT(5) +# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4) +#define LAN9303_SWE_VLAN_WR_DATA 0x180c +#define LAN9303_SWE_VLAN_RD_DATA 0x180e +# define LAN9303_SWE_VLAN_MEMBER_PORT2 BIT(17) +# define LAN9303_SWE_VLAN_UNTAG_PORT2 BIT(16) +# define LAN9303_SWE_VLAN_MEMBER_PORT1 BIT(15) +# define LAN9303_SWE_VLAN_UNTAG_PORT1 BIT(14) +# define LAN9303_SWE_VLAN_MEMBER_PORT0 BIT(13) +# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12) +#define LAN9303_SWE_VLAN_CMD_STS 0x1810 +#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840 +#define LAN9303_SWE_PORT_STATE 0x1843 +# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0) +# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5) +# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT2 BIT(4) +# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT1 (0) +# define LAN9303_SWE_PORT_STATE_LEARNING_PORT1 BIT(3) +# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 BIT(2) +# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0) +# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1) +# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0) +#define LAN9303_SWE_PORT_MIRROR 0x1846 +# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8) +# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7) +# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT1 BIT(6) +# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 BIT(5) +# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 BIT(4) +# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 BIT(3) +# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2) +# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1) +# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0) +#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847 +#define LAN9303_BM_CFG 0x1c00 +#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c +# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16)) +# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8)) +# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0)) + +#define LAN9303_PORT_0_OFFSET 0x400 +#define LAN9303_PORT_1_OFFSET 0x800 +#define LAN9303_PORT_2_OFFSET 0xc00 + +/* the built-in PHYs are of type LAN911X */ +#define MII_LAN911X_SPECIAL_MODES 0x12 +#define MII_LAN911X_SPECIAL_CONTROL_STATUS 0x1f + +static const struct regmap_range lan9303_valid_regs[] = { + regmap_reg_range(0x14, 0x17), /* misc, interrupt */ + regmap_reg_range(0x19, 0x19), /* endian test */ + regmap_reg_range(0x1d, 0x1d), /* hardware config */ + regmap_reg_range(0x23, 0x24), /* general purpose timer */ + regmap_reg_range(0x27, 0x27), /* counter */ + regmap_reg_range(0x29, 0x2a), /* PMI index regs */ + regmap_reg_range(0x68, 0x6a), /* flow control */ + regmap_reg_range(0x6b, 0x6c), /* switch fabric indirect regs */ + regmap_reg_range(0x6d, 0x6f), /* misc */ + regmap_reg_range(0x70, 0x77), /* virtual phy */ + regmap_reg_range(0x78, 0x7a), /* GPIO */ + regmap_reg_range(0x7c, 0x7e), /* MAC & reset */ + regmap_reg_range(0x80, 0xb7), /* switch fabric direct regs (wr only) */ +}; + +static const struct regmap_range lan9303_reserved_ranges[] = { + regmap_reg_range(0x00, 0x13), + regmap_reg_range(0x18, 0x18), + regmap_reg_range(0x1a, 0x1c), + regmap_reg_range(0x1e, 0x22), + regmap_reg_range(0x25, 0x26), + regmap_reg_range(0x28, 0x28), + regmap_reg_range(0x2b, 0x67), + regmap_reg_range(0x7b, 0x7b), + regmap_reg_range(0x7f, 0x7f), + regmap_reg_range(0xb8, 0xff), +}; + +const struct regmap_access_table lan9303_register_set = { + .yes_ranges = lan9303_valid_regs, + .n_yes_ranges = ARRAY_SIZE(lan9303_valid_regs), + .no_ranges = lan9303_reserved_ranges, + .n_no_ranges = ARRAY_SIZE(lan9303_reserved_ranges), +}; +EXPORT_SYMBOL(lan9303_register_set); + +static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg) +{ + int ret, i; + + /* we can lose arbitration for the I2C case, because the device + * tries to detect and read an external EEPROM after reset and acts as + * a master on the shared I2C bus itself. This conflicts with our + * attempts to access the device as a slave at the same moment. + */ + for (i = 0; i < 5; i++) { + ret = regmap_read(regmap, offset, reg); + if (!ret) + return 0; + if (ret != -EAGAIN) + break; + msleep(500); + } + + return -EIO; +} + +static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum) +{ + int ret; + u32 val; + + if (regnum > MII_EXPANSION) + return -EINVAL; + + ret = lan9303_read(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, &val); + if (ret) + return ret; + + return val & 0xffff; +} + +static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val) +{ + if (regnum > MII_EXPANSION) + return -EINVAL; + + return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val); +} + +static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) +{ + int ret, i; + u32 reg; + + for (i = 0; i < 25; i++) { + ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, ®); + if (ret) { + dev_err(chip->dev, + "Failed to read pmi access status: %d\n", ret); + return ret; + } + if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY)) + return 0; + msleep(1); + } + + return -EIO; +} + +static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) +{ + int ret; + u32 val; + + val = LAN9303_PMI_ACCESS_PHY_ADDR(addr); + val |= LAN9303_PMI_ACCESS_MIIRINDA(regnum); + + mutex_lock(&chip->indirect_mutex); + + ret = lan9303_port_phy_reg_wait_for_completion(chip); + if (ret) + goto on_error; + + /* start the MII read cycle */ + ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, val); + if (ret) + goto on_error; + + ret = lan9303_port_phy_reg_wait_for_completion(chip); + if (ret) + goto on_error; + + /* read the result of this operation */ + ret = lan9303_read(chip->regmap, LAN9303_PMI_DATA, &val); + if (ret) + goto on_error; + + mutex_unlock(&chip->indirect_mutex); + + return val & 0xffff; + +on_error: + mutex_unlock(&chip->indirect_mutex); + return ret; +} + +static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, + unsigned int val) +{ + int ret; + u32 reg; + + reg = LAN9303_PMI_ACCESS_PHY_ADDR(addr); + reg |= LAN9303_PMI_ACCESS_MIIRINDA(regnum); + reg |= LAN9303_PMI_ACCESS_MII_WRITE; + + mutex_lock(&chip->indirect_mutex); + + ret = lan9303_port_phy_reg_wait_for_completion(chip); + if (ret) + goto on_error; + + /* write the data first... */ + ret = regmap_write(chip->regmap, LAN9303_PMI_DATA, val); + if (ret) + goto on_error; + + /* ...then start the MII write cycle */ + ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, reg); + +on_error: + mutex_unlock(&chip->indirect_mutex); + return ret; +} + +static int lan9303_switch_wait_for_completion(struct lan9303 *chip) +{ + int ret, i; + u32 reg; + + for (i = 0; i < 25; i++) { + ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, ®); + if (ret) { + dev_err(chip->dev, + "Failed to read csr command status: %d\n", ret); + return ret; + } + if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY)) + return 0; + msleep(1); + } + + return -EIO; +} + +static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val) +{ + u32 reg; + int ret; + + reg = regnum; + reg |= LAN9303_SWITCH_CSR_CMD_LANES; + reg |= LAN9303_SWITCH_CSR_CMD_BUSY; + + mutex_lock(&chip->indirect_mutex); + + ret = lan9303_switch_wait_for_completion(chip); + if (ret) + goto on_error; + + ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_DATA, val); + if (ret) { + dev_err(chip->dev, "Failed to write csr data reg: %d\n", ret); + goto on_error; + } + + /* trigger write */ + ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg); + if (ret) + dev_err(chip->dev, "Failed to write csr command reg: %d\n", + ret); + +on_error: + mutex_unlock(&chip->indirect_mutex); + return ret; +} + +static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val) +{ + u32 reg; + int ret; + + reg = regnum; + reg |= LAN9303_SWITCH_CSR_CMD_LANES; + reg |= LAN9303_SWITCH_CSR_CMD_RW; + reg |= LAN9303_SWITCH_CSR_CMD_BUSY; + + mutex_lock(&chip->indirect_mutex); + + ret = lan9303_switch_wait_for_completion(chip); + if (ret) + goto on_error; + + /* trigger read */ + ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg); + if (ret) { + dev_err(chip->dev, "Failed to write csr command reg: %d\n", + ret); + goto on_error; + } + + ret = lan9303_switch_wait_for_completion(chip); + if (ret) + goto on_error; + + ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_DATA, val); + if (ret) + dev_err(chip->dev, "Failed to read csr data reg: %d\n", ret); +on_error: + mutex_unlock(&chip->indirect_mutex); + return ret; +} + +static int lan9303_detect_phy_setup(struct lan9303 *chip) +{ + int reg; + + /* depending on the 'phy_addr_sel_strap' setting, the three phys are + * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the + * 'phy_addr_sel_strap' setting directly, so we need a test, which + * configuration is active: + * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0 + * and the IDs are 0-1-2, else it contains something different from + * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. + */ + reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES); + if (reg < 0) { + dev_err(chip->dev, "Failed to detect phy config: %d\n", reg); + return reg; + } + + if (reg != 0) + chip->phy_addr_sel_strap = 1; + else + chip->phy_addr_sel_strap = 0; + + dev_dbg(chip->dev, "Phy setup '%s' detected\n", + chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2"); + + return 0; +} + +#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET) +#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET) + +static int lan9303_disable_packet_processing(struct lan9303 *chip, + unsigned int port) +{ + int ret; + + /* disable RX, but keep register reset default values else */ + ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); + if (ret) + return ret; + + /* disable TX, but keep register reset default values else */ + return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | + LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE); +} + +static int lan9303_enable_packet_processing(struct lan9303 *chip, + unsigned int port) +{ + int ret; + + /* enable RX and keep register reset default values else */ + ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | + LAN9303_MAC_RX_CFG_X_RX_ENABLE); + if (ret) + return ret; + + /* enable TX and keep register reset default values else */ + return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | + LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE | + LAN9303_MAC_TX_CFG_X_TX_ENABLE); +} + +/* We want a special working switch: + * - do not forward packets between port 1 and 2 + * - forward everything from port 1 to port 0 + * - forward everything from port 2 to port 0 + * - forward special tagged packets from port 0 to port 1 *or* port 2 + */ +static int lan9303_separate_ports(struct lan9303 *chip) +{ + int ret; + + ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR, + LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 | + LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 | + LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 | + LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING | + LAN9303_SWE_PORT_MIRROR_SNIFF_ALL); + if (ret) + return ret; + + /* enable defining the destination port via special VLAN tagging + * for port 0 + */ + ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE, + 0x03); + if (ret) + return ret; + + /* tag incoming packets at port 1 and 2 on their way to port 0 to be + * able to discover their source port + */ + ret = lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE, + LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0); + if (ret) + return ret; + + /* prevent port 1 and 2 from forwarding packets by their own */ + return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE, + LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 | + LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 | + LAN9303_SWE_PORT_STATE_BLOCKING_PORT2); +} + +static int lan9303_handle_reset(struct lan9303 *chip) +{ + if (!chip->reset_gpio) + return 0; + + if (chip->reset_duration != 0) + msleep(chip->reset_duration); + + /* release (deassert) reset and activate the device */ + gpiod_set_value_cansleep(chip->reset_gpio, 0); + + return 0; +} + +/* stop processing packets for all ports */ +static int lan9303_disable_processing(struct lan9303 *chip) +{ + int ret; + + ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + if (ret) + return ret; + ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); + if (ret) + return ret; + return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); +} + +static int lan9303_check_device(struct lan9303 *chip) +{ + int ret; + u32 reg; + + ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®); + if (ret) { + dev_err(chip->dev, "failed to read chip revision register: %d\n", + ret); + if (!chip->reset_gpio) { + dev_dbg(chip->dev, + "hint: maybe failed due to missing reset GPIO\n"); + } + return ret; + } + + if ((reg >> 16) != LAN9303_CHIP_ID) { + dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n", + reg >> 16); + return ret; + } + + /* The default state of the LAN9303 device is to forward packets between + * all ports (if not configured differently by an external EEPROM). + * The initial state of a DSA device must be forwarding packets only + * between the external and the internal ports and no forwarding + * between the external ports. In preparation we stop packet handling + * at all for now until the LAN9303 device is re-programmed accordingly. + */ + ret = lan9303_disable_processing(chip); + if (ret) + dev_warn(chip->dev, "failed to disable switching %d\n", ret); + + dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff); + + ret = lan9303_detect_phy_setup(chip); + if (ret) { + dev_err(chip->dev, + "failed to discover phy bootstrap setup: %d\n", ret); + return ret; + } + + return 0; +} + +/* ---------------------------- DSA -----------------------------------*/ + +static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_LAN9303; +} + +static int lan9303_setup(struct dsa_switch *ds) +{ + struct lan9303 *chip = ds->priv; + int ret; + + /* Make sure that port 0 is the cpu port */ + if (!dsa_is_cpu_port(ds, 0)) { + dev_err(chip->dev, "port 0 is not the CPU port\n"); + return -EINVAL; + } + + ret = lan9303_separate_ports(chip); + if (ret) + dev_err(chip->dev, "failed to separate ports %d\n", ret); + + ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + if (ret) + dev_err(chip->dev, "failed to re-enable switching %d\n", ret); + + return 0; +} + +struct lan9303_mib_desc { + unsigned int offset; /* offset of first MAC */ + const char *name; +}; + +static const struct lan9303_mib_desc lan9303_mib[] = { + { .offset = LAN9303_MAC_RX_BRDCST_CNT_0, .name = "RxBroad", }, + { .offset = LAN9303_MAC_RX_PAUSE_CNT_0, .name = "RxPause", }, + { .offset = LAN9303_MAC_RX_MULCST_CNT_0, .name = "RxMulti", }, + { .offset = LAN9303_MAC_RX_PKTOK_CNT_0, .name = "RxOk", }, + { .offset = LAN9303_MAC_RX_CRCERR_CNT_0, .name = "RxCrcErr", }, + { .offset = LAN9303_MAC_RX_ALIGN_CNT_0, .name = "RxAlignErr", }, + { .offset = LAN9303_MAC_RX_JABB_CNT_0, .name = "RxJabber", }, + { .offset = LAN9303_MAC_RX_FRAG_CNT_0, .name = "RxFragment", }, + { .offset = LAN9303_MAC_RX_64_CNT_0, .name = "Rx64Byte", }, + { .offset = LAN9303_MAC_RX_127_CNT_0, .name = "Rx128Byte", }, + { .offset = LAN9303_MAC_RX_255_CNT_0, .name = "Rx256Byte", }, + { .offset = LAN9303_MAC_RX_511_CNT_0, .name = "Rx512Byte", }, + { .offset = LAN9303_MAC_RX_1023_CNT_0, .name = "Rx1024Byte", }, + { .offset = LAN9303_MAC_RX_MAX_CNT_0, .name = "RxMaxByte", }, + { .offset = LAN9303_MAC_RX_PKTLEN_CNT_0, .name = "RxByteCnt", }, + { .offset = LAN9303_MAC_RX_SYMBL_CNT_0, .name = "RxSymbolCnt", }, + { .offset = LAN9303_MAC_RX_CTLFRM_CNT_0, .name = "RxCfs", }, + { .offset = LAN9303_MAC_RX_OVRSZE_CNT_0, .name = "RxOverFlow", }, + { .offset = LAN9303_MAC_TX_UNDSZE_CNT_0, .name = "TxShort", }, + { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", }, + { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", }, + { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", }, + { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", }, + { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", }, + { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", }, + { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", }, + { .offset = LAN9303_MAC_TX_511_CNT_0, .name = "Tx512Byte", }, + { .offset = LAN9303_MAC_TX_1023_CNT_0, .name = "Tx1024Byte", }, + { .offset = LAN9303_MAC_TX_MAX_CNT_0, .name = "TxMaxByte", }, + { .offset = LAN9303_MAC_TX_PKTLEN_CNT_0, .name = "TxByteCnt", }, + { .offset = LAN9303_MAC_TX_PKTOK_CNT_0, .name = "TxOk", }, + { .offset = LAN9303_MAC_TX_TOTALCOL_CNT_0, .name = "TxCollision", }, + { .offset = LAN9303_MAC_TX_MULTICOL_CNT_0, .name = "TxMultiCol", }, + { .offset = LAN9303_MAC_TX_SNGLECOL_CNT_0, .name = "TxSingleCol", }, + { .offset = LAN9303_MAC_TX_EXCOL_CNT_0, .name = "TxExcCol", }, + { .offset = LAN9303_MAC_TX_DEFER_CNT_0, .name = "TxDefer", }, + { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", }, +}; + +static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + unsigned int u; + + for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { + strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, + ETH_GSTRING_LEN); + } +} + +static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct lan9303 *chip = ds->priv; + u32 reg; + unsigned int u, poff; + int ret; + + poff = port * 0x400; + + for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { + ret = lan9303_read_switch_reg(chip, + lan9303_mib[u].offset + poff, + ®); + if (ret) + dev_warn(chip->dev, "Reading status reg %u failed\n", + lan9303_mib[u].offset + poff); + data[u] = reg; + } +} + +static int lan9303_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(lan9303_mib); +} + +static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) +{ + struct lan9303 *chip = ds->priv; + int phy_base = chip->phy_addr_sel_strap; + + if (phy == phy_base) + return lan9303_virt_phy_reg_read(chip, regnum); + if (phy > phy_base + 2) + return -ENODEV; + + return lan9303_port_phy_reg_read(chip, phy, regnum); +} + +static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, + u16 val) +{ + struct lan9303 *chip = ds->priv; + int phy_base = chip->phy_addr_sel_strap; + + if (phy == phy_base) + return lan9303_virt_phy_reg_write(chip, regnum, val); + if (phy > phy_base + 2) + return -ENODEV; + + return lan9303_phy_reg_write(chip, phy, regnum, val); +} + +static int lan9303_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct lan9303 *chip = ds->priv; + + /* enable internal packet processing */ + switch (port) { + case 1: + return lan9303_enable_packet_processing(chip, + LAN9303_PORT_1_OFFSET); + case 2: + return lan9303_enable_packet_processing(chip, + LAN9303_PORT_2_OFFSET); + default: + dev_dbg(chip->dev, + "Error: request to power up invalid port %d\n", port); + } + + return -ENODEV; +} + +static void lan9303_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct lan9303 *chip = ds->priv; + + /* disable internal packet processing */ + switch (port) { + case 1: + lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); + lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1, + MII_BMCR, BMCR_PDOWN); + break; + case 2: + lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); + lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2, + MII_BMCR, BMCR_PDOWN); + break; + default: + dev_dbg(chip->dev, + "Error: request to power down invalid port %d\n", port); + } +} + +static struct dsa_switch_ops lan9303_switch_ops = { + .get_tag_protocol = lan9303_get_tag_protocol, + .setup = lan9303_setup, + .get_strings = lan9303_get_strings, + .phy_read = lan9303_phy_read, + .phy_write = lan9303_phy_write, + .get_ethtool_stats = lan9303_get_ethtool_stats, + .get_sset_count = lan9303_get_sset_count, + .port_enable = lan9303_port_enable, + .port_disable = lan9303_port_disable, +}; + +static int lan9303_register_switch(struct lan9303 *chip) +{ + chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS); + if (!chip->ds) + return -ENOMEM; + + chip->ds->priv = chip; + chip->ds->ops = &lan9303_switch_ops; + chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7; + + return dsa_register_switch(chip->ds, chip->dev); +} + +static void lan9303_probe_reset_gpio(struct lan9303 *chip, + struct device_node *np) +{ + chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", + GPIOD_OUT_LOW); + + if (!chip->reset_gpio) { + dev_dbg(chip->dev, "No reset GPIO defined\n"); + return; + } + + chip->reset_duration = 200; + + if (np) { + of_property_read_u32(np, "reset-duration", + &chip->reset_duration); + } else { + dev_dbg(chip->dev, "reset duration defaults to 200 ms\n"); + } + + /* A sane reset duration should not be longer than 1s */ + if (chip->reset_duration > 1000) + chip->reset_duration = 1000; +} + +int lan9303_probe(struct lan9303 *chip, struct device_node *np) +{ + int ret; + + mutex_init(&chip->indirect_mutex); + + lan9303_probe_reset_gpio(chip, np); + + ret = lan9303_handle_reset(chip); + if (ret) + return ret; + + ret = lan9303_check_device(chip); + if (ret) + return ret; + + ret = lan9303_register_switch(chip); + if (ret) { + dev_dbg(chip->dev, "Failed to register switch: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(lan9303_probe); + +int lan9303_remove(struct lan9303 *chip) +{ + int rc; + + rc = lan9303_disable_processing(chip); + if (rc != 0) + dev_warn(chip->dev, "shutting down failed\n"); + + dsa_unregister_switch(chip->ds); + + /* assert reset to the whole device to prevent it from doing anything */ + gpiod_set_value_cansleep(chip->reset_gpio, 1); + gpiod_unexport(chip->reset_gpio); + + return 0; +} +EXPORT_SYMBOL(lan9303_remove); + +MODULE_AUTHOR("Juergen Borleis "); +MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h new file mode 100644 index 0000000000000000000000000000000000000000..d1512dad2d9088911a808b3b2a46733e286f7103 --- /dev/null +++ b/drivers/net/dsa/lan9303.h @@ -0,0 +1,19 @@ +#include +#include +#include + +struct lan9303 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct gpio_desc *reset_gpio; + u32 reset_duration; /* in [ms] */ + bool phy_addr_sel_strap; + struct dsa_switch *ds; + struct mutex indirect_mutex; /* protect indexed register access */ +}; + +extern const struct regmap_access_table lan9303_register_set; + +int lan9303_probe(struct lan9303 *chip, struct device_node *np); +int lan9303_remove(struct lan9303 *chip); diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c new file mode 100644 index 0000000000000000000000000000000000000000..ab3ce0da5071a1fe15d4943d84e1bed7bbb836a5 --- /dev/null +++ b/drivers/net/dsa/lan9303_i2c.c @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2017 Pengutronix, Juergen Borleis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include + +#include "lan9303.h" + +struct lan9303_i2c { + struct i2c_client *device; + struct lan9303 chip; +}; + +static const struct regmap_config lan9303_i2c_regmap_config = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 1, + .can_multi_write = true, + .max_register = 0x0ff, /* address bits 0..1 are not used */ + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + + .volatile_table = &lan9303_register_set, + .wr_table = &lan9303_register_set, + .rd_table = &lan9303_register_set, + + .cache_type = REGCACHE_NONE, +}; + +static int lan9303_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lan9303_i2c *sw_dev; + int ret; + + sw_dev = devm_kzalloc(&client->dev, sizeof(struct lan9303_i2c), + GFP_KERNEL); + if (!sw_dev) + return -ENOMEM; + + sw_dev->chip.regmap = devm_regmap_init_i2c(client, + &lan9303_i2c_regmap_config); + if (IS_ERR(sw_dev->chip.regmap)) { + ret = PTR_ERR(sw_dev->chip.regmap); + dev_err(&client->dev, "Failed to allocate register map: %d\n", + ret); + return ret; + } + + /* link forward and backward */ + sw_dev->device = client; + i2c_set_clientdata(client, sw_dev); + sw_dev->chip.dev = &client->dev; + + ret = lan9303_probe(&sw_dev->chip, client->dev.of_node); + if (ret != 0) + return ret; + + dev_info(&client->dev, "LAN9303 I2C driver loaded successfully\n"); + + return 0; +} + +static int lan9303_i2c_remove(struct i2c_client *client) +{ + struct lan9303_i2c *sw_dev; + + sw_dev = i2c_get_clientdata(client); + if (!sw_dev) + return -ENODEV; + + return lan9303_remove(&sw_dev->chip); +} + +/*-------------------------------------------------------------------------*/ + +static const struct i2c_device_id lan9303_i2c_id[] = { + { "lan9303", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id); + +static const struct of_device_id lan9303_i2c_of_match[] = { + { .compatible = "smsc,lan9303-i2c", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match); + +static struct i2c_driver lan9303_i2c_driver = { + .driver = { + .name = "LAN9303_I2C", + .of_match_table = of_match_ptr(lan9303_i2c_of_match), + }, + .probe = lan9303_i2c_probe, + .remove = lan9303_i2c_remove, + .id_table = lan9303_i2c_id, +}; +module_i2c_driver(lan9303_i2c_driver); + +MODULE_AUTHOR("Juergen Borleis "); +MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in I2C managed mode"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c new file mode 100644 index 0000000000000000000000000000000000000000..93c36c0541cf95fc9f6a73c6442a2e5cc3136736 --- /dev/null +++ b/drivers/net/dsa/lan9303_mdio.c @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2017 Pengutronix, Juergen Borleis + * + * Partially based on a patch from + * Copyright (c) 2014 Stefan Roese + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include + +#include "lan9303.h" + +/* Generate phy-addr and -reg from the input address */ +#define PHY_ADDR(x) ((((x) >> 6) + 0x10) & 0x1f) +#define PHY_REG(x) (((x) >> 1) & 0x1f) + +struct lan9303_mdio { + struct mdio_device *device; + struct lan9303 chip; +}; + +static void lan9303_mdio_real_write(struct mdio_device *mdio, int reg, u16 val) +{ + mdio->bus->write(mdio->bus, PHY_ADDR(reg), PHY_REG(reg), val); +} + +static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) +{ + struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + + mutex_lock(&sw_dev->device->bus->mdio_lock); + lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); + lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); + mutex_unlock(&sw_dev->device->bus->mdio_lock); + + return 0; +} + +static u16 lan9303_mdio_real_read(struct mdio_device *mdio, int reg) +{ + return mdio->bus->read(mdio->bus, PHY_ADDR(reg), PHY_REG(reg)); +} + +static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) +{ + struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + + mutex_lock(&sw_dev->device->bus->mdio_lock); + *val = lan9303_mdio_real_read(sw_dev->device, reg); + *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); + mutex_unlock(&sw_dev->device->bus->mdio_lock); + + return 0; +} + +static const struct regmap_config lan9303_mdio_regmap_config = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 1, + .can_multi_write = true, + .max_register = 0x0ff, /* address bits 0..1 are not used */ + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + + .volatile_table = &lan9303_register_set, + .wr_table = &lan9303_register_set, + .rd_table = &lan9303_register_set, + + .reg_read = lan9303_mdio_read, + .reg_write = lan9303_mdio_write, + + .cache_type = REGCACHE_NONE, +}; + +static int lan9303_mdio_probe(struct mdio_device *mdiodev) +{ + struct lan9303_mdio *sw_dev; + int ret; + + sw_dev = devm_kzalloc(&mdiodev->dev, sizeof(struct lan9303_mdio), + GFP_KERNEL); + if (!sw_dev) + return -ENOMEM; + + sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev, + &lan9303_mdio_regmap_config); + if (IS_ERR(sw_dev->chip.regmap)) { + ret = PTR_ERR(sw_dev->chip.regmap); + dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret); + return ret; + } + + /* link forward and backward */ + sw_dev->device = mdiodev; + dev_set_drvdata(&mdiodev->dev, sw_dev); + sw_dev->chip.dev = &mdiodev->dev; + + ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node); + if (ret != 0) + return ret; + + dev_info(&mdiodev->dev, "LAN9303 MDIO driver loaded successfully\n"); + + return 0; +} + +static void lan9303_mdio_remove(struct mdio_device *mdiodev) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev); + + if (!sw_dev) + return; + + lan9303_remove(&sw_dev->chip); +} + +/*-------------------------------------------------------------------------*/ + +static const struct of_device_id lan9303_mdio_of_match[] = { + { .compatible = "smsc,lan9303-mdio" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match); + +static struct mdio_driver lan9303_mdio_driver = { + .mdiodrv.driver = { + .name = "LAN9303_MDIO", + .of_match_table = of_match_ptr(lan9303_mdio_of_match), + }, + .probe = lan9303_mdio_probe, + .remove = lan9303_mdio_remove, +}; +mdio_module_driver(lan9303_mdio_driver); + +MODULE_AUTHOR("Stefan Roese , Juergen Borleis "); +MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in MDIO managed mode"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c new file mode 100644 index 0000000000000000000000000000000000000000..b070c167e70fc37d301050e5cb00265053322ea4 --- /dev/null +++ b/drivers/net/dsa/mt7530.c @@ -0,0 +1,1126 @@ +/* + * Mediatek MT7530 DSA Switch driver + * Copyright (C) 2017 Sean Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mt7530.h" + +/* String, offset, and register size in bytes if different from 4 bytes */ +static const struct mt7530_mib_desc mt7530_mib[] = { + MIB_DESC(1, 0x00, "TxDrop"), + MIB_DESC(1, 0x04, "TxCrcErr"), + MIB_DESC(1, 0x08, "TxUnicast"), + MIB_DESC(1, 0x0c, "TxMulticast"), + MIB_DESC(1, 0x10, "TxBroadcast"), + MIB_DESC(1, 0x14, "TxCollision"), + MIB_DESC(1, 0x18, "TxSingleCollision"), + MIB_DESC(1, 0x1c, "TxMultipleCollision"), + MIB_DESC(1, 0x20, "TxDeferred"), + MIB_DESC(1, 0x24, "TxLateCollision"), + MIB_DESC(1, 0x28, "TxExcessiveCollistion"), + MIB_DESC(1, 0x2c, "TxPause"), + MIB_DESC(1, 0x30, "TxPktSz64"), + MIB_DESC(1, 0x34, "TxPktSz65To127"), + MIB_DESC(1, 0x38, "TxPktSz128To255"), + MIB_DESC(1, 0x3c, "TxPktSz256To511"), + MIB_DESC(1, 0x40, "TxPktSz512To1023"), + MIB_DESC(1, 0x44, "Tx1024ToMax"), + MIB_DESC(2, 0x48, "TxBytes"), + MIB_DESC(1, 0x60, "RxDrop"), + MIB_DESC(1, 0x64, "RxFiltering"), + MIB_DESC(1, 0x6c, "RxMulticast"), + MIB_DESC(1, 0x70, "RxBroadcast"), + MIB_DESC(1, 0x74, "RxAlignErr"), + MIB_DESC(1, 0x78, "RxCrcErr"), + MIB_DESC(1, 0x7c, "RxUnderSizeErr"), + MIB_DESC(1, 0x80, "RxFragErr"), + MIB_DESC(1, 0x84, "RxOverSzErr"), + MIB_DESC(1, 0x88, "RxJabberErr"), + MIB_DESC(1, 0x8c, "RxPause"), + MIB_DESC(1, 0x90, "RxPktSz64"), + MIB_DESC(1, 0x94, "RxPktSz65To127"), + MIB_DESC(1, 0x98, "RxPktSz128To255"), + MIB_DESC(1, 0x9c, "RxPktSz256To511"), + MIB_DESC(1, 0xa0, "RxPktSz512To1023"), + MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"), + MIB_DESC(2, 0xa8, "RxBytes"), + MIB_DESC(1, 0xb0, "RxCtrlDrop"), + MIB_DESC(1, 0xb4, "RxIngressDrop"), + MIB_DESC(1, 0xb8, "RxArlDrop"), +}; + +static int +mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val) +{ + int ret; + + ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val); + if (ret < 0) + dev_err(priv->dev, + "failed to priv write register\n"); + return ret; +} + +static u32 +mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg) +{ + int ret; + u32 val; + + ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val); + if (ret < 0) { + dev_err(priv->dev, + "failed to priv read register\n"); + return ret; + } + + return val; +} + +static void +mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg, + u32 mask, u32 set) +{ + u32 val; + + val = mt7623_trgmii_read(priv, reg); + val &= ~mask; + val |= set; + mt7623_trgmii_write(priv, reg, val); +} + +static void +mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val) +{ + mt7623_trgmii_rmw(priv, reg, 0, val); +} + +static void +mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val) +{ + mt7623_trgmii_rmw(priv, reg, val, 0); +} + +static int +core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) +{ + struct mii_bus *bus = priv->bus; + int value, ret; + + /* Write the desired MMD Devad */ + ret = bus->write(bus, 0, MII_MMD_CTRL, devad); + if (ret < 0) + goto err; + + /* Write the desired MMD register address */ + ret = bus->write(bus, 0, MII_MMD_DATA, prtad); + if (ret < 0) + goto err; + + /* Select the Function : DATA with no post increment */ + ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); + if (ret < 0) + goto err; + + /* Read the content of the MMD's selected register */ + value = bus->read(bus, 0, MII_MMD_DATA); + + return value; +err: + dev_err(&bus->dev, "failed to read mmd register\n"); + + return ret; +} + +static int +core_write_mmd_indirect(struct mt7530_priv *priv, int prtad, + int devad, u32 data) +{ + struct mii_bus *bus = priv->bus; + int ret; + + /* Write the desired MMD Devad */ + ret = bus->write(bus, 0, MII_MMD_CTRL, devad); + if (ret < 0) + goto err; + + /* Write the desired MMD register address */ + ret = bus->write(bus, 0, MII_MMD_DATA, prtad); + if (ret < 0) + goto err; + + /* Select the Function : DATA with no post increment */ + ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); + if (ret < 0) + goto err; + + /* Write the data into MMD's selected register */ + ret = bus->write(bus, 0, MII_MMD_DATA, data); +err: + if (ret < 0) + dev_err(&bus->dev, + "failed to write mmd register\n"); + return ret; +} + +static void +core_write(struct mt7530_priv *priv, u32 reg, u32 val) +{ + struct mii_bus *bus = priv->bus; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); + + mutex_unlock(&bus->mdio_lock); +} + +static void +core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) +{ + struct mii_bus *bus = priv->bus; + u32 val; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2); + val &= ~mask; + val |= set; + core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); + + mutex_unlock(&bus->mdio_lock); +} + +static void +core_set(struct mt7530_priv *priv, u32 reg, u32 val) +{ + core_rmw(priv, reg, 0, val); +} + +static void +core_clear(struct mt7530_priv *priv, u32 reg, u32 val) +{ + core_rmw(priv, reg, val, 0); +} + +static int +mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val) +{ + struct mii_bus *bus = priv->bus; + u16 page, r, lo, hi; + int ret; + + page = (reg >> 6) & 0x3ff; + r = (reg >> 2) & 0xf; + lo = val & 0xffff; + hi = val >> 16; + + /* MT7530 uses 31 as the pseudo port */ + ret = bus->write(bus, 0x1f, 0x1f, page); + if (ret < 0) + goto err; + + ret = bus->write(bus, 0x1f, r, lo); + if (ret < 0) + goto err; + + ret = bus->write(bus, 0x1f, 0x10, hi); +err: + if (ret < 0) + dev_err(&bus->dev, + "failed to write mt7530 register\n"); + return ret; +} + +static u32 +mt7530_mii_read(struct mt7530_priv *priv, u32 reg) +{ + struct mii_bus *bus = priv->bus; + u16 page, r, lo, hi; + int ret; + + page = (reg >> 6) & 0x3ff; + r = (reg >> 2) & 0xf; + + /* MT7530 uses 31 as the pseudo port */ + ret = bus->write(bus, 0x1f, 0x1f, page); + if (ret < 0) { + dev_err(&bus->dev, + "failed to read mt7530 register\n"); + return ret; + } + + lo = bus->read(bus, 0x1f, r); + hi = bus->read(bus, 0x1f, 0x10); + + return (hi << 16) | (lo & 0xffff); +} + +static void +mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val) +{ + struct mii_bus *bus = priv->bus; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + mt7530_mii_write(priv, reg, val); + + mutex_unlock(&bus->mdio_lock); +} + +static u32 +_mt7530_read(struct mt7530_dummy_poll *p) +{ + struct mii_bus *bus = p->priv->bus; + u32 val; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + val = mt7530_mii_read(p->priv, p->reg); + + mutex_unlock(&bus->mdio_lock); + + return val; +} + +static u32 +mt7530_read(struct mt7530_priv *priv, u32 reg) +{ + struct mt7530_dummy_poll p; + + INIT_MT7530_DUMMY_POLL(&p, priv, reg); + return _mt7530_read(&p); +} + +static void +mt7530_rmw(struct mt7530_priv *priv, u32 reg, + u32 mask, u32 set) +{ + struct mii_bus *bus = priv->bus; + u32 val; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + val = mt7530_mii_read(priv, reg); + val &= ~mask; + val |= set; + mt7530_mii_write(priv, reg, val); + + mutex_unlock(&bus->mdio_lock); +} + +static void +mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val) +{ + mt7530_rmw(priv, reg, 0, val); +} + +static void +mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val) +{ + mt7530_rmw(priv, reg, val, 0); +} + +static int +mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp) +{ + u32 val; + int ret; + struct mt7530_dummy_poll p; + + /* Set the command operating upon the MAC address entries */ + val = ATC_BUSY | ATC_MAT(0) | cmd; + mt7530_write(priv, MT7530_ATC, val); + + INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC); + ret = readx_poll_timeout(_mt7530_read, &p, val, + !(val & ATC_BUSY), 20, 20000); + if (ret < 0) { + dev_err(priv->dev, "reset timeout\n"); + return ret; + } + + /* Additional sanity for read command if the specified + * entry is invalid + */ + val = mt7530_read(priv, MT7530_ATC); + if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID)) + return -EINVAL; + + if (rsp) + *rsp = val; + + return 0; +} + +static void +mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb) +{ + u32 reg[3]; + int i; + + /* Read from ARL table into an array */ + for (i = 0; i < 3; i++) { + reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4)); + + dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n", + __func__, __LINE__, i, reg[i]); + } + + fdb->vid = (reg[1] >> CVID) & CVID_MASK; + fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK; + fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK; + fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK; + fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK; + fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK; + fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK; + fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK; + fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK; + fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT; +} + +static void +mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, + u8 port_mask, const u8 *mac, + u8 aging, u8 type) +{ + u32 reg[3] = { 0 }; + int i; + + reg[1] |= vid & CVID_MASK; + reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; + reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; + /* STATIC_ENT indicate that entry is static wouldn't + * be aged out and STATIC_EMP specified as erasing an + * entry + */ + reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS; + reg[1] |= mac[5] << MAC_BYTE_5; + reg[1] |= mac[4] << MAC_BYTE_4; + reg[0] |= mac[3] << MAC_BYTE_3; + reg[0] |= mac[2] << MAC_BYTE_2; + reg[0] |= mac[1] << MAC_BYTE_1; + reg[0] |= mac[0] << MAC_BYTE_0; + + /* Write array into the ARL table */ + for (i = 0; i < 3; i++) + mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); +} + +static int +mt7530_pad_clk_setup(struct dsa_switch *ds, int mode) +{ + struct mt7530_priv *priv = ds->priv; + u32 ncpo1, ssc_delta, trgint, i; + + switch (mode) { + case PHY_INTERFACE_MODE_RGMII: + trgint = 0; + ncpo1 = 0x0c80; + ssc_delta = 0x87; + break; + case PHY_INTERFACE_MODE_TRGMII: + trgint = 1; + ncpo1 = 0x1400; + ssc_delta = 0x57; + break; + default: + dev_err(priv->dev, "xMII mode %d not supported\n", mode); + return -EINVAL; + } + + mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, + P6_INTF_MODE(trgint)); + + /* Lower Tx Driving for TRGMII path */ + for (i = 0 ; i < NUM_TRGMII_CTRL ; i++) + mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), + TD_DM_DRVP(8) | TD_DM_DRVN(8)); + + /* Setup core clock for MT7530 */ + if (!trgint) { + /* Disable MT7530 core clock */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + + /* Disable PLL, since phy_device has not yet been created + * provided for phy_[read,write]_mmd_indirect is called, we + * provide our own core_write_mmd_indirect to complete this + * function. + */ + core_write_mmd_indirect(priv, + CORE_GSWPLL_GRP1, + MDIO_MMD_VEND2, + 0); + + /* Set core clock into 500Mhz */ + core_write(priv, CORE_GSWPLL_GRP2, + RG_GSWPLL_POSDIV_500M(1) | + RG_GSWPLL_FBKDIV_500M(25)); + + /* Enable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, + RG_GSWPLL_EN_PRE | + RG_GSWPLL_POSDIV_200M(2) | + RG_GSWPLL_FBKDIV_200M(32)); + + /* Enable MT7530 core clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + } + + /* Setup the MT7530 TRGMII Tx Clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); + core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); + core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); + core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); + core_write(priv, CORE_PLL_GROUP4, + RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | + RG_SYSPLL_BIAS_LPF_EN); + core_write(priv, CORE_PLL_GROUP2, + RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | + RG_SYSPLL_POSDIV(1)); + core_write(priv, CORE_PLL_GROUP7, + RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | + RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); + core_set(priv, CORE_TRGMII_GSW_CLK_CG, + REG_GSWCK_EN | REG_TRGMIICK_EN); + + if (!trgint) + for (i = 0 ; i < NUM_TRGMII_CTRL; i++) + mt7530_rmw(priv, MT7530_TRGMII_RD(i), + RD_TAP_MASK, RD_TAP(16)); + else + mt7623_trgmii_set(priv, GSW_INTF_MODE, INTF_MODE_TRGMII); + + return 0; +} + +static int +mt7623_pad_clk_setup(struct dsa_switch *ds) +{ + struct mt7530_priv *priv = ds->priv; + int i; + + for (i = 0 ; i < NUM_TRGMII_CTRL; i++) + mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i), + TD_DM_DRVP(8) | TD_DM_DRVN(8)); + + mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL); + mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST); + + return 0; +} + +static void +mt7530_mib_reset(struct dsa_switch *ds) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH); + mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); +} + +static void +mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable) +{ + u32 mask = PMCR_TX_EN | PMCR_RX_EN; + + if (enable) + mt7530_set(priv, MT7530_PMCR_P(port), mask); + else + mt7530_clear(priv, MT7530_PMCR_P(port), mask); +} + +static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + struct mt7530_priv *priv = ds->priv; + + return mdiobus_read_nested(priv->bus, port, regnum); +} + +int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + struct mt7530_priv *priv = ds->priv; + + return mdiobus_write_nested(priv->bus, port, regnum, val); +} + +static void +mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) + strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, + ETH_GSTRING_LEN); +} + +static void +mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct mt7530_priv *priv = ds->priv; + const struct mt7530_mib_desc *mib; + u32 reg, i; + u64 hi; + + for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) { + mib = &mt7530_mib[i]; + reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset; + + data[i] = mt7530_read(priv, reg); + if (mib->size == 2) { + hi = mt7530_read(priv, reg + 4); + data[i] |= hi << 32; + } + } +} + +static int +mt7530_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mt7530_mib); +} + +static void mt7530_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct mt7530_priv *priv = ds->priv; + + if (phy_is_pseudo_fixed_link(phydev)) { + dev_dbg(priv->dev, "phy-mode for master device = %x\n", + phydev->interface); + + /* Setup TX circuit incluing relevant PAD and driving */ + mt7530_pad_clk_setup(ds, phydev->interface); + + /* Setup RX circuit, relevant PAD and driving on the host + * which must be placed after the setup on the device side is + * all finished. + */ + mt7623_pad_clk_setup(ds); + } +} + +static int +mt7530_cpu_port_enable(struct mt7530_priv *priv, + int port) +{ + /* Enable Mediatek header mode on the cpu port */ + mt7530_write(priv, MT7530_PVC_P(port), + PORT_SPEC_TAG); + + /* Setup the MAC by default for the cpu port */ + mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK); + + /* Disable auto learning on the cpu port */ + mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); + + /* Unknown unicast frame fordwarding to the cpu port */ + mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); + + /* CPU port gets connected to all user ports of + * the switch + */ + mt7530_write(priv, MT7530_PCR_P(port), + PCR_MATRIX(priv->ds->enabled_port_mask)); + + return 0; +} + +static int +mt7530_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct mt7530_priv *priv = ds->priv; + + mutex_lock(&priv->reg_mutex); + + /* Setup the MAC for the user port */ + mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK); + + /* Allow the user port gets connected to the cpu port and also + * restore the port matrix if the port is the member of a certain + * bridge. + */ + priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT)); + priv->ports[port].enable = true; + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, + priv->ports[port].pm); + mt7530_port_set_status(priv, port, 1); + + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static void +mt7530_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct mt7530_priv *priv = ds->priv; + + mutex_lock(&priv->reg_mutex); + + /* Clear up all port matrix which could be restored in the next + * enablement for the port. + */ + priv->ports[port].enable = false; + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, + PCR_MATRIX_CLR); + mt7530_port_set_status(priv, port, 0); + + mutex_unlock(&priv->reg_mutex); +} + +static void +mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct mt7530_priv *priv = ds->priv; + u32 stp_state; + + switch (state) { + case BR_STATE_DISABLED: + stp_state = MT7530_STP_DISABLED; + break; + case BR_STATE_BLOCKING: + stp_state = MT7530_STP_BLOCKING; + break; + case BR_STATE_LISTENING: + stp_state = MT7530_STP_LISTENING; + break; + case BR_STATE_LEARNING: + stp_state = MT7530_STP_LEARNING; + break; + case BR_STATE_FORWARDING: + default: + stp_state = MT7530_STP_FORWARDING; + break; + } + + mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state); +} + +static int +mt7530_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct mt7530_priv *priv = ds->priv; + u32 port_bitmap = BIT(MT7530_CPU_PORT); + int i; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < MT7530_NUM_PORTS; i++) { + /* Add this port to the port matrix of the other ports in the + * same bridge. If the port is disabled, port matrix is kept + * and not being setup until the port becomes enabled. + */ + if (ds->enabled_port_mask & BIT(i) && i != port) { + if (ds->ports[i].bridge_dev != bridge) + continue; + if (priv->ports[i].enable) + mt7530_set(priv, MT7530_PCR_P(i), + PCR_MATRIX(BIT(port))); + priv->ports[i].pm |= PCR_MATRIX(BIT(port)); + + port_bitmap |= BIT(i); + } + } + + /* Add the all other ports to this port matrix. */ + if (priv->ports[port].enable) + mt7530_rmw(priv, MT7530_PCR_P(port), + PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); + priv->ports[port].pm |= PCR_MATRIX(port_bitmap); + + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static void +mt7530_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct mt7530_priv *priv = ds->priv; + int i; + + mutex_lock(&priv->reg_mutex); + + for (i = 0; i < MT7530_NUM_PORTS; i++) { + /* Remove this port from the port matrix of the other ports + * in the same bridge. If the port is disabled, port matrix + * is kept and not being setup until the port becomes enabled. + */ + if (ds->enabled_port_mask & BIT(i) && i != port) { + if (ds->ports[i].bridge_dev != bridge) + continue; + if (priv->ports[i].enable) + mt7530_clear(priv, MT7530_PCR_P(i), + PCR_MATRIX(BIT(port))); + priv->ports[i].pm &= ~PCR_MATRIX(BIT(port)); + } + } + + /* Set the cpu port to be the only one in the port matrix of + * this port. + */ + if (priv->ports[port].enable) + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, + PCR_MATRIX(BIT(MT7530_CPU_PORT))); + priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); + + mutex_unlock(&priv->reg_mutex); +} + +static int +mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct mt7530_priv *priv = ds->priv; + int ret; + + /* Because auto-learned entrie shares the same FDB table. + * an entry is reserved with no port_mask to make sure fdb_add + * is called while the entry is still available. + */ + mutex_lock(&priv->reg_mutex); + mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static void +mt7530_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct mt7530_priv *priv = ds->priv; + u8 port_mask = BIT(port); + + mutex_lock(&priv->reg_mutex); + mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT); + mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + mutex_unlock(&priv->reg_mutex); +} + +static int +mt7530_port_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct mt7530_priv *priv = ds->priv; + int ret; + u8 port_mask = BIT(port); + + mutex_lock(&priv->reg_mutex); + mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +mt7530_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mt7530_priv *priv = ds->priv; + struct mt7530_fdb _fdb = { 0 }; + int cnt = MT7530_NUM_FDB_RECORDS; + int ret = 0; + u32 rsp = 0; + + mutex_lock(&priv->reg_mutex); + + ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp); + if (ret < 0) + goto err; + + do { + if (rsp & ATC_SRCH_HIT) { + mt7530_fdb_read(priv, &_fdb); + if (_fdb.port_mask & BIT(port)) { + ether_addr_copy(fdb->addr, _fdb.mac); + fdb->vid = _fdb.vid; + fdb->ndm_state = _fdb.noarp ? + NUD_NOARP : NUD_REACHABLE; + ret = cb(&fdb->obj); + if (ret < 0) + break; + } + } + } while (--cnt && + !(rsp & ATC_SRCH_END) && + !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp)); +err: + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static enum dsa_tag_protocol +mtk_get_tag_protocol(struct dsa_switch *ds) +{ + struct mt7530_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, MT7530_CPU_PORT)) { + dev_warn(priv->dev, + "port not matched with tagging CPU port\n"); + return DSA_TAG_PROTO_NONE; + } else { + return DSA_TAG_PROTO_MTK; + } +} + +static int +mt7530_setup(struct dsa_switch *ds) +{ + struct mt7530_priv *priv = ds->priv; + int ret, i; + u32 id, val; + struct device_node *dn; + struct mt7530_dummy_poll p; + + /* The parent node of master_netdev which holds the common system + * controller also is the container for two GMACs nodes representing + * as two netdev instances. + */ + dn = ds->master_netdev->dev.of_node->parent; + priv->ethernet = syscon_node_to_regmap(dn); + if (IS_ERR(priv->ethernet)) + return PTR_ERR(priv->ethernet); + + regulator_set_voltage(priv->core_pwr, 1000000, 1000000); + ret = regulator_enable(priv->core_pwr); + if (ret < 0) { + dev_err(priv->dev, + "Failed to enable core power: %d\n", ret); + return ret; + } + + regulator_set_voltage(priv->io_pwr, 3300000, 3300000); + ret = regulator_enable(priv->io_pwr); + if (ret < 0) { + dev_err(priv->dev, "Failed to enable io pwr: %d\n", + ret); + return ret; + } + + /* Reset whole chip through gpio pin or memory-mapped registers for + * different type of hardware + */ + if (priv->mcm) { + reset_control_assert(priv->rstc); + usleep_range(1000, 1100); + reset_control_deassert(priv->rstc); + } else { + gpiod_set_value_cansleep(priv->reset, 0); + usleep_range(1000, 1100); + gpiod_set_value_cansleep(priv->reset, 1); + } + + /* Waiting for MT7530 got to stable */ + INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); + ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, + 20, 1000000); + if (ret < 0) { + dev_err(priv->dev, "reset timeout\n"); + return ret; + } + + id = mt7530_read(priv, MT7530_CREV); + id >>= CHIP_NAME_SHIFT; + if (id != MT7530_ID) { + dev_err(priv->dev, "chip %x can't be supported\n", id); + return -ENODEV; + } + + /* Reset the switch through internal reset */ + mt7530_write(priv, MT7530_SYS_CTRL, + SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | + SYS_CTRL_REG_RST); + + /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */ + val = mt7530_read(priv, MT7530_MHWTRAP); + val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; + val |= MHWTRAP_MANUAL; + mt7530_write(priv, MT7530_MHWTRAP, val); + + /* Enable and reset MIB counters */ + mt7530_mib_reset(ds); + + mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); + + for (i = 0; i < MT7530_NUM_PORTS; i++) { + /* Disable forwarding by default on all ports */ + mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, + PCR_MATRIX_CLR); + + if (dsa_is_cpu_port(ds, i)) + mt7530_cpu_port_enable(priv, i); + else + mt7530_port_disable(ds, i, NULL); + } + + /* Flush the FDB table */ + ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, 0); + if (ret < 0) + return ret; + + return 0; +} + +static struct dsa_switch_ops mt7530_switch_ops = { + .get_tag_protocol = mtk_get_tag_protocol, + .setup = mt7530_setup, + .get_strings = mt7530_get_strings, + .phy_read = mt7530_phy_read, + .phy_write = mt7530_phy_write, + .get_ethtool_stats = mt7530_get_ethtool_stats, + .get_sset_count = mt7530_get_sset_count, + .adjust_link = mt7530_adjust_link, + .port_enable = mt7530_port_enable, + .port_disable = mt7530_port_disable, + .port_stp_state_set = mt7530_stp_state_set, + .port_bridge_join = mt7530_port_bridge_join, + .port_bridge_leave = mt7530_port_bridge_leave, + .port_fdb_prepare = mt7530_port_fdb_prepare, + .port_fdb_add = mt7530_port_fdb_add, + .port_fdb_del = mt7530_port_fdb_del, + .port_fdb_dump = mt7530_port_fdb_dump, +}; + +static int +mt7530_probe(struct mdio_device *mdiodev) +{ + struct mt7530_priv *priv; + struct device_node *dn; + + dn = mdiodev->dev.of_node; + + priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + if (!priv->ds) + return -ENOMEM; + + /* Use medatek,mcm property to distinguish hardware type that would + * casues a little bit differences on power-on sequence. + */ + priv->mcm = of_property_read_bool(dn, "mediatek,mcm"); + if (priv->mcm) { + dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n"); + + priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm"); + if (IS_ERR(priv->rstc)) { + dev_err(&mdiodev->dev, "Couldn't get our reset line\n"); + return PTR_ERR(priv->rstc); + } + } + + priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core"); + if (IS_ERR(priv->core_pwr)) + return PTR_ERR(priv->core_pwr); + + priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io"); + if (IS_ERR(priv->io_pwr)) + return PTR_ERR(priv->io_pwr); + + /* Not MCM that indicates switch works as the remote standalone + * integrated circuit so the GPIO pin would be used to complete + * the reset, otherwise memory-mapped register accessing used + * through syscon provides in the case of MCM. + */ + if (!priv->mcm) { + priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(priv->reset)) { + dev_err(&mdiodev->dev, "Couldn't get our reset line\n"); + return PTR_ERR(priv->reset); + } + } + + priv->bus = mdiodev->bus; + priv->dev = &mdiodev->dev; + priv->ds->priv = priv; + priv->ds->ops = &mt7530_switch_ops; + mutex_init(&priv->reg_mutex); + dev_set_drvdata(&mdiodev->dev, priv); + + return dsa_register_switch(priv->ds, &mdiodev->dev); +} + +static void +mt7530_remove(struct mdio_device *mdiodev) +{ + struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev); + int ret = 0; + + ret = regulator_disable(priv->core_pwr); + if (ret < 0) + dev_err(priv->dev, + "Failed to disable core power: %d\n", ret); + + ret = regulator_disable(priv->io_pwr); + if (ret < 0) + dev_err(priv->dev, "Failed to disable io pwr: %d\n", + ret); + + dsa_unregister_switch(priv->ds); + mutex_destroy(&priv->reg_mutex); +} + +static const struct of_device_id mt7530_of_match[] = { + { .compatible = "mediatek,mt7530" }, + { /* sentinel */ }, +}; + +static struct mdio_driver mt7530_mdio_driver = { + .probe = mt7530_probe, + .remove = mt7530_remove, + .mdiodrv.driver = { + .name = "mt7530", + .of_match_table = mt7530_of_match, + }, +}; + +mdio_module_driver(mt7530_mdio_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mediatek-mt7530"); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h new file mode 100644 index 0000000000000000000000000000000000000000..b83d76b998023c38c9e67b90ca73e393f7d29fc4 --- /dev/null +++ b/drivers/net/dsa/mt7530.h @@ -0,0 +1,402 @@ +/* + * Copyright (C) 2017 Sean Wang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7530_H +#define __MT7530_H + +#define MT7530_NUM_PORTS 7 +#define MT7530_CPU_PORT 6 +#define MT7530_NUM_FDB_RECORDS 2048 + +#define NUM_TRGMII_CTRL 5 + +#define TRGMII_BASE(x) (0x10000 + (x)) + +/* Registers to ethsys access */ +#define ETHSYS_CLKCFG0 0x2c +#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) + +#define SYSC_REG_RSTCTRL 0x34 +#define RESET_MCM BIT(2) + +/* Registers to mac forward control for unknown frames */ +#define MT7530_MFC 0x10 +#define BC_FFP(x) (((x) & 0xff) << 24) +#define UNM_FFP(x) (((x) & 0xff) << 16) +#define UNU_FFP(x) (((x) & 0xff) << 8) +#define UNU_FFP_MASK UNU_FFP(~0) + +/* Registers for address table access */ +#define MT7530_ATA1 0x74 +#define STATIC_EMP 0 +#define STATIC_ENT 3 +#define MT7530_ATA2 0x78 + +/* Register for address table write data */ +#define MT7530_ATWD 0x7c + +/* Register for address table control */ +#define MT7530_ATC 0x80 +#define ATC_HASH (((x) & 0xfff) << 16) +#define ATC_BUSY BIT(15) +#define ATC_SRCH_END BIT(14) +#define ATC_SRCH_HIT BIT(13) +#define ATC_INVALID BIT(12) +#define ATC_MAT(x) (((x) & 0xf) << 8) +#define ATC_MAT_MACTAB ATC_MAT(0) + +enum mt7530_fdb_cmd { + MT7530_FDB_READ = 0, + MT7530_FDB_WRITE = 1, + MT7530_FDB_FLUSH = 2, + MT7530_FDB_START = 4, + MT7530_FDB_NEXT = 5, +}; + +/* Registers for table search read address */ +#define MT7530_TSRA1 0x84 +#define MAC_BYTE_0 24 +#define MAC_BYTE_1 16 +#define MAC_BYTE_2 8 +#define MAC_BYTE_3 0 +#define MAC_BYTE_MASK 0xff + +#define MT7530_TSRA2 0x88 +#define MAC_BYTE_4 24 +#define MAC_BYTE_5 16 +#define CVID 0 +#define CVID_MASK 0xfff + +#define MT7530_ATRD 0x8C +#define AGE_TIMER 24 +#define AGE_TIMER_MASK 0xff +#define PORT_MAP 4 +#define PORT_MAP_MASK 0xff +#define ENT_STATUS 2 +#define ENT_STATUS_MASK 0x3 + +/* Register for vlan table control */ +#define MT7530_VTCR 0x90 +#define VTCR_BUSY BIT(31) +#define VTCR_FUNC (((x) & 0xf) << 12) +#define VTCR_FUNC_RD_VID 0x1 +#define VTCR_FUNC_WR_VID 0x2 +#define VTCR_FUNC_INV_VID 0x3 +#define VTCR_FUNC_VAL_VID 0x4 +#define VTCR_VID ((x) & 0xfff) + +/* Register for setup vlan and acl write data */ +#define MT7530_VAWD1 0x94 +#define PORT_STAG BIT(31) +#define IVL_MAC BIT(30) +#define PORT_MEM(x) (((x) & 0xff) << 16) +#define VALID BIT(1) + +#define MT7530_VAWD2 0x98 + +/* Register for port STP state control */ +#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) +#define FID_PST(x) ((x) & 0x3) +#define FID_PST_MASK FID_PST(0x3) + +enum mt7530_stp_state { + MT7530_STP_DISABLED = 0, + MT7530_STP_BLOCKING = 1, + MT7530_STP_LISTENING = 1, + MT7530_STP_LEARNING = 2, + MT7530_STP_FORWARDING = 3 +}; + +/* Register for port control */ +#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100)) +#define PORT_VLAN(x) ((x) & 0x3) +#define PCR_MATRIX(x) (((x) & 0xff) << 16) +#define PORT_PRI(x) (((x) & 0x7) << 24) +#define EG_TAG(x) (((x) & 0x3) << 28) +#define PCR_MATRIX_MASK PCR_MATRIX(0xff) +#define PCR_MATRIX_CLR PCR_MATRIX(0) + +/* Register for port security control */ +#define MT7530_PSC_P(x) (0x200c + ((x) * 0x100)) +#define SA_DIS BIT(4) + +/* Register for port vlan control */ +#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100)) +#define PORT_SPEC_TAG BIT(5) +#define VLAN_ATTR(x) (((x) & 0x3) << 6) +#define STAG_VPID (((x) & 0xffff) << 16) + +/* Register for port port-and-protocol based vlan 1 control */ +#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100)) + +/* Register for port MAC control register */ +#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) +#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18) +#define PMCR_MAC_MODE BIT(16) +#define PMCR_FORCE_MODE BIT(15) +#define PMCR_TX_EN BIT(14) +#define PMCR_RX_EN BIT(13) +#define PMCR_BACKOFF_EN BIT(9) +#define PMCR_BACKPR_EN BIT(8) +#define PMCR_TX_FC_EN BIT(5) +#define PMCR_RX_FC_EN BIT(4) +#define PMCR_FORCE_SPEED_1000 BIT(3) +#define PMCR_FORCE_FDX BIT(1) +#define PMCR_FORCE_LNK BIT(0) +#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ + PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ + PMCR_TX_EN | PMCR_RX_EN | \ + PMCR_TX_FC_EN | PMCR_RX_FC_EN) +#define PMCR_CPUP_LINK (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \ + PMCR_FORCE_SPEED_1000 | \ + PMCR_FORCE_FDX | \ + PMCR_FORCE_LNK) +#define PMCR_USERP_LINK PMCR_COMMON_LINK +#define PMCR_FIXED_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ + PMCR_FORCE_MODE | PMCR_TX_EN | \ + PMCR_RX_EN | PMCR_BACKPR_EN | \ + PMCR_BACKOFF_EN | \ + PMCR_FORCE_SPEED_1000 | \ + PMCR_FORCE_FDX | \ + PMCR_FORCE_LNK) +#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \ + PMCR_TX_FC_EN | PMCR_RX_FC_EN) + +#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) + +/* Register for MIB */ +#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100) +#define MT7530_MIB_CCR 0x4fe0 +#define CCR_MIB_ENABLE BIT(31) +#define CCR_RX_OCT_CNT_GOOD BIT(7) +#define CCR_RX_OCT_CNT_BAD BIT(6) +#define CCR_TX_OCT_CNT_GOOD BIT(5) +#define CCR_TX_OCT_CNT_BAD BIT(4) +#define CCR_MIB_FLUSH (CCR_RX_OCT_CNT_GOOD | \ + CCR_RX_OCT_CNT_BAD | \ + CCR_TX_OCT_CNT_GOOD | \ + CCR_TX_OCT_CNT_BAD) +#define CCR_MIB_ACTIVATE (CCR_MIB_ENABLE | \ + CCR_RX_OCT_CNT_GOOD | \ + CCR_RX_OCT_CNT_BAD | \ + CCR_TX_OCT_CNT_GOOD | \ + CCR_TX_OCT_CNT_BAD) +/* Register for system reset */ +#define MT7530_SYS_CTRL 0x7000 +#define SYS_CTRL_PHY_RST BIT(2) +#define SYS_CTRL_SW_RST BIT(1) +#define SYS_CTRL_REG_RST BIT(0) + +/* Register for hw trap status */ +#define MT7530_HWTRAP 0x7800 + +/* Register for hw trap modification */ +#define MT7530_MHWTRAP 0x7804 +#define MHWTRAP_MANUAL BIT(16) +#define MHWTRAP_P5_MAC_SEL BIT(13) +#define MHWTRAP_P6_DIS BIT(8) +#define MHWTRAP_P5_RGMII_MODE BIT(7) +#define MHWTRAP_P5_DIS BIT(6) +#define MHWTRAP_PHY_ACCESS BIT(5) + +/* Register for TOP signal control */ +#define MT7530_TOP_SIG_CTRL 0x7808 +#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16)) + +#define MT7530_IO_DRV_CR 0x7810 +#define P5_IO_CLK_DRV(x) ((x) & 0x3) +#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4) + +#define MT7530_P6ECR 0x7830 +#define P6_INTF_MODE_MASK 0x3 +#define P6_INTF_MODE(x) ((x) & 0x3) + +/* Registers for TRGMII on the both side */ +#define MT7530_TRGMII_RCK_CTRL 0x7a00 +#define GSW_TRGMII_RCK_CTRL 0x300 +#define RX_RST BIT(31) +#define RXC_DQSISEL BIT(30) +#define DQSI1_TAP_MASK (0x7f << 8) +#define DQSI0_TAP_MASK 0x7f +#define DQSI1_TAP(x) (((x) & 0x7f) << 8) +#define DQSI0_TAP(x) ((x) & 0x7f) + +#define MT7530_TRGMII_RCK_RTT 0x7a04 +#define GSW_TRGMII_RCK_RTT 0x304 +#define DQS1_GATE BIT(31) +#define DQS0_GATE BIT(30) + +#define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8) +#define GSW_TRGMII_RD(x) (0x310 + (x) * 8) +#define BSLIP_EN BIT(31) +#define EDGE_CHK BIT(30) +#define RD_TAP_MASK 0x7f +#define RD_TAP(x) ((x) & 0x7f) + +#define GSW_TRGMII_TXCTRL 0x340 +#define MT7530_TRGMII_TXCTRL 0x7a40 +#define TRAIN_TXEN BIT(31) +#define TXC_INV BIT(30) +#define TX_RST BIT(28) + +#define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i)) +#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i)) +#define TD_DM_DRVP(x) ((x) & 0xf) +#define TD_DM_DRVN(x) (((x) & 0xf) << 4) + +#define GSW_INTF_MODE 0x390 +#define INTF_MODE_TRGMII BIT(1) + +#define MT7530_TRGMII_TCK_CTRL 0x7a78 +#define TCK_TAP(x) (((x) & 0xf) << 8) + +#define MT7530_P5RGMIIRXCR 0x7b00 +#define CSR_RGMII_EDGE_ALIGN BIT(8) +#define CSR_RGMII_RXC_0DEG_CFG(x) ((x) & 0xf) + +#define MT7530_P5RGMIITXCR 0x7b04 +#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f) + +#define MT7530_CREV 0x7ffc +#define CHIP_NAME_SHIFT 16 +#define MT7530_ID 0x7530 + +/* Registers for core PLL access through mmd indirect */ +#define CORE_PLL_GROUP2 0x401 +#define RG_SYSPLL_EN_NORMAL BIT(15) +#define RG_SYSPLL_VODEN BIT(14) +#define RG_SYSPLL_LF BIT(13) +#define RG_SYSPLL_RST_DLY(x) (((x) & 0x3) << 12) +#define RG_SYSPLL_LVROD_EN BIT(10) +#define RG_SYSPLL_PREDIV(x) (((x) & 0x3) << 8) +#define RG_SYSPLL_POSDIV(x) (((x) & 0x3) << 5) +#define RG_SYSPLL_FBKSEL BIT(4) +#define RT_SYSPLL_EN_AFE_OLT BIT(0) + +#define CORE_PLL_GROUP4 0x403 +#define RG_SYSPLL_DDSFBK_EN BIT(12) +#define RG_SYSPLL_BIAS_EN BIT(11) +#define RG_SYSPLL_BIAS_LPF_EN BIT(10) + +#define CORE_PLL_GROUP5 0x404 +#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff) + +#define CORE_PLL_GROUP6 0x405 +#define RG_LCDDS_PCW_NCPO0(x) ((x) & 0xffff) + +#define CORE_PLL_GROUP7 0x406 +#define RG_LCDDS_PWDB BIT(15) +#define RG_LCDDS_ISO_EN BIT(13) +#define RG_LCCDS_C(x) (((x) & 0x7) << 4) +#define RG_LCDDS_PCW_NCPO_CHG BIT(3) + +#define CORE_PLL_GROUP10 0x409 +#define RG_LCDDS_SSC_DELTA(x) ((x) & 0xfff) + +#define CORE_PLL_GROUP11 0x40a +#define RG_LCDDS_SSC_DELTA1(x) ((x) & 0xfff) + +#define CORE_GSWPLL_GRP1 0x40d +#define RG_GSWPLL_PREDIV(x) (((x) & 0x3) << 14) +#define RG_GSWPLL_POSDIV_200M(x) (((x) & 0x3) << 12) +#define RG_GSWPLL_EN_PRE BIT(11) +#define RG_GSWPLL_FBKSEL BIT(10) +#define RG_GSWPLL_BP BIT(9) +#define RG_GSWPLL_BR BIT(8) +#define RG_GSWPLL_FBKDIV_200M(x) ((x) & 0xff) + +#define CORE_GSWPLL_GRP2 0x40e +#define RG_GSWPLL_POSDIV_500M(x) (((x) & 0x3) << 8) +#define RG_GSWPLL_FBKDIV_500M(x) ((x) & 0xff) + +#define CORE_TRGMII_GSW_CLK_CG 0x410 +#define REG_GSWCK_EN BIT(0) +#define REG_TRGMIICK_EN BIT(1) + +#define MIB_DESC(_s, _o, _n) \ + { \ + .size = (_s), \ + .offset = (_o), \ + .name = (_n), \ + } + +struct mt7530_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +struct mt7530_fdb { + u16 vid; + u8 port_mask; + u8 aging; + u8 mac[6]; + bool noarp; +}; + +struct mt7530_port { + bool enable; + u32 pm; +}; + +/* struct mt7530_priv - This is the main data structure for holding the state + * of the driver + * @dev: The device pointer + * @ds: The pointer to the dsa core structure + * @bus: The bus used for the device and built-in PHY + * @rstc: The pointer to reset control used by MCM + * @ethernet: The regmap used for access TRGMII-based registers + * @core_pwr: The power supplied into the core + * @io_pwr: The power supplied into the I/O + * @reset: The descriptor for GPIO line tied to its reset pin + * @mcm: Flag for distinguishing if standalone IC or module + * coupling + * @ports: Holding the state among ports + * @reg_mutex: The lock for protecting among process accessing + * registers + */ +struct mt7530_priv { + struct device *dev; + struct dsa_switch *ds; + struct mii_bus *bus; + struct reset_control *rstc; + struct regmap *ethernet; + struct regulator *core_pwr; + struct regulator *io_pwr; + struct gpio_desc *reset; + bool mcm; + + struct mt7530_port ports[MT7530_NUM_PORTS]; + /* protect among processes for registers access*/ + struct mutex reg_mutex; +}; + +struct mt7530_hw_stats { + const char *string; + u16 reg; + u8 sizeof_stat; +}; + +struct mt7530_dummy_poll { + struct mt7530_priv *priv; + u32 reg; +}; + +static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p, + struct mt7530_priv *priv, u32 reg) +{ + p->priv = priv; + p->reg = reg; +} + +#endif /* __MT7530_H */ diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index c36be318de1aaf1d6d936ad97194b1524112bcbd..6edd869c8d6f545b30654da8c5f17d9ea72587a9 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -1,5 +1,7 @@ obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o mv88e6xxx-objs := chip.o mv88e6xxx-objs += global1.o +mv88e6xxx-objs += global1_atu.o +mv88e6xxx-objs += global1_vtu.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o mv88e6xxx-objs += port.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 03dc886ed3d6be1747d5cef7616f2eb3074a5492..19581d783d8ec7dc9ed9e5fd5a2891e8f2d22135 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3,11 +3,11 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2015 CMC Electronics, Inc. - * Added support for VLAN Table Unit operations - * * Copyright (c) 2016 Andrew Lunn * + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -677,36 +677,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, return err; } -static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6097; -} - -static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6165; -} - -static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6320; -} - -static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6341; -} - -static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6351; -} - -static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip) -{ - return chip->info->family == MV88E6XXX_FAMILY_6352; -} - static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, int speed, int duplex, phy_interface_t mode) @@ -1066,11 +1036,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) -{ - return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY); -} - static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { @@ -1130,143 +1095,42 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, return err; } -static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) +static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) { - u16 val; - int err; - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) { - err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid); - if (err) - return err; - } else if (mv88e6xxx_num_databases(chip) == 256) { - /* ATU DBNum[7:4] are located in ATU Control 15:12 */ - err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val); - if (err) - return err; - - err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, - (val & 0xfff) | ((fid << 8) & 0xf000)); - if (err) - return err; - - /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ - cmd |= fid & 0xf; - } - - err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd); - if (err) - return err; - - return _mv88e6xxx_atu_wait(chip); -} - -static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_atu_entry *entry) -{ - u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; - - if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { - unsigned int mask, shift; - - if (entry->trunk) { - data |= GLOBAL_ATU_DATA_TRUNK; - mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; - shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; - } else { - mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; - shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; - } - - data |= (entry->portv_trunkid << shift) & mask; - } - - return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data); -} - -static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_atu_entry *entry, - bool static_too) -{ - int op; - int err; - - err = _mv88e6xxx_atu_wait(chip); - if (err) - return err; - - err = _mv88e6xxx_atu_data_write(chip, entry); - if (err) - return err; - - if (entry->fid) { - op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : - GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; - } else { - op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL : - GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; - } - - return _mv88e6xxx_atu_cmd(chip, entry->fid, op); -} + struct dsa_switch *ds = NULL; + struct net_device *br; + u16 pvlan; + int i; -static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip, - u16 fid, bool static_too) -{ - struct mv88e6xxx_atu_entry entry = { - .fid = fid, - .state = 0, /* EntryState bits must be 0 */ - }; + if (dev < DSA_MAX_SWITCHES) + ds = chip->ds->dst->ds[dev]; - return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); -} - -static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid, - int from_port, int to_port, bool static_too) -{ - struct mv88e6xxx_atu_entry entry = { - .trunk = false, - .fid = fid, - }; + /* Prevent frames from unknown switch or port */ + if (!ds || port >= ds->num_ports) + return 0; - /* EntryState bits must be 0xF */ - entry.state = GLOBAL_ATU_DATA_STATE_MASK; + /* Frames from DSA links and CPU ports can egress any local port */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + return mv88e6xxx_port_mask(chip); - /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */ - entry.portv_trunkid = (to_port & 0x0f) << 4; - entry.portv_trunkid |= from_port & 0x0f; + br = ds->ports[port].bridge_dev; + pvlan = 0; - return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); -} + /* Frames from user ports can egress any local DSA links and CPU ports, + * as well as any local member of their bridge group. + */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) + if (dsa_is_cpu_port(chip->ds, i) || + dsa_is_dsa_port(chip->ds, i) || + (br && chip->ds->ports[i].bridge_dev == br)) + pvlan |= BIT(i); -static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, - int port, bool static_too) -{ - /* Destination port 0xF means remove the entries */ - return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too); + return pvlan; } -static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) +static int mv88e6xxx_port_vlan_map(struct mv88e6xxx_chip *chip, int port) { - struct dsa_switch *ds = chip->ds; - struct net_device *bridge = ds->ports[port].bridge_dev; - u16 output_ports = 0; - int i; - - /* allow CPU port or DSA link(s) to send frames to every port */ - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { - output_ports = ~0; - } else { - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - /* allow sending frames to every group member */ - if (bridge && ds->ports[i].bridge_dev == bridge) - output_ports |= BIT(i); - - /* allow sending frames to CPU port and DSA link(s) */ - if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) - output_ports |= BIT(i); - } - } + u16 output_ports = mv88e6xxx_port_vlan(chip, chip->ds->index, port); /* prevent frames from going back out of the port they came in on */ output_ports &= ~BIT(port); @@ -1306,182 +1170,98 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, netdev_err(ds->ports[port].netdev, "failed to update state\n"); } -static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) +static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) { - struct mv88e6xxx_chip *chip = ds->priv; int err; - mutex_lock(&chip->reg_lock); - err = _mv88e6xxx_atu_remove(chip, 0, port, false); - mutex_unlock(&chip->reg_lock); - + err = mv88e6xxx_g1_atu_flush(chip, 0, true); if (err) - netdev_err(ds->ports[port].netdev, "failed to flush ATU\n"); -} - -static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) -{ - return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY); -} - -static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) -{ - int err; + return err; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op); + err = mv88e6xxx_g1_atu_set_learn2all(chip, true); if (err) return err; - return _mv88e6xxx_vtu_wait(chip); + return mv88e6xxx_g1_atu_set_age_time(chip, 300000); } -static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port) { - int ret; + u16 pvlan = 0; - ret = _mv88e6xxx_vtu_wait(chip); - if (ret < 0) - return ret; + if (!mv88e6xxx_has_pvt(chip)) + return -EOPNOTSUPP; - return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL); + /* Skip the local source device, which uses in-chip port VLAN */ + if (dev != chip->ds->index) + pvlan = mv88e6xxx_port_vlan(chip, dev, port); + + return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan); } -static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry, - unsigned int nibble_offset) +static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip) { - u16 regs[3]; - int i, err; - - for (i = 0; i < 3; ++i) { - u16 *reg = ®s[i]; + int dev, port; + int err; - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg); - if (err) - return err; - } + if (!mv88e6xxx_has_pvt(chip)) + return 0; - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - unsigned int shift = (i % 4) * 4 + nibble_offset; - u16 reg = regs[i / 4]; + /* Clear 5 Bit Port for usage with Marvell Link Street devices: + * use 4 bits for the Src_Port/Src_Trunk and 5 bits for the Src_Dev. + */ + err = mv88e6xxx_g2_misc_4_bit_port(chip); + if (err) + return err; - entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK; + for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; ++dev) { + for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; ++port) { + err = mv88e6xxx_pvt_map(chip, dev, port); + if (err) + return err; + } } return 0; } -static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) -{ - return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0); -} - -static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) -{ - return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2); -} - -static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry, - unsigned int nibble_offset) +static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) { - u16 regs[3] = { 0 }; - int i, err; - - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - unsigned int shift = (i % 4) * 4 + nibble_offset; - u8 data = entry->data[i]; - - regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift; - } - - for (i = 0; i < 3; ++i) { - u16 reg = regs[i]; + struct mv88e6xxx_chip *chip = ds->priv; + int err; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg); - if (err) - return err; - } + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_atu_remove(chip, 0, port, false); + mutex_unlock(&chip->reg_lock); - return 0; + if (err) + netdev_err(ds->ports[port].netdev, "failed to flush ATU\n"); } -static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0); -} + if (!chip->info->max_vid) + return 0; -static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) -{ - return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2); + return mv88e6xxx_g1_vtu_flush(chip); } -static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid) +static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) { - return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, - vid & GLOBAL_VTU_VID_MASK); + if (!chip->info->ops->vtu_getnext) + return -EOPNOTSUPP; + + return chip->info->ops->vtu_getnext(chip, entry); } -static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) { - struct mv88e6xxx_vtu_entry next = { 0 }; - u16 val; - int err; - - err = _mv88e6xxx_vtu_wait(chip); - if (err) - return err; - - err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT); - if (err) - return err; - - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val); - if (err) - return err; - - next.vid = val & GLOBAL_VTU_VID_MASK; - next.valid = !!(val & GLOBAL_VTU_VID_VALID); - - if (next.valid) { - err = mv88e6xxx_vtu_data_read(chip, &next); - if (err) - return err; - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) { - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val); - if (err) - return err; - - next.fid = val & GLOBAL_VTU_FID_MASK; - } else if (mv88e6xxx_num_databases(chip) == 256) { - /* VTU DBNum[7:4] are located in VTU Operation 11:8, and - * VTU DBNum[3:0] are located in VTU Operation 3:0 - */ - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val); - if (err) - return err; - - next.fid = (val & 0xf00) >> 4; - next.fid |= val & 0xf; - } - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val); - if (err) - return err; - - next.sid = val & GLOBAL_VTU_SID_MASK; - } - } + if (!chip->info->ops->vtu_loadpurge) + return -EOPNOTSUPP; - *entry = next; - return 0; + return chip->info->ops->vtu_loadpurge(chip, entry); } static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, @@ -1489,11 +1269,13 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry next; + struct mv88e6xxx_vtu_entry next = { + .vid = chip->info->max_vid, + }; u16 pvid; int err; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + if (!chip->info->max_vid) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); @@ -1502,19 +1284,15 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, if (err) goto unlock; - err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); - if (err) - goto unlock; - do { - err = _mv88e6xxx_vtu_getnext(chip, &next); + err = mv88e6xxx_vtu_getnext(chip, &next); if (err) break; if (!next.valid) break; - if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; /* reinit and dump this VLAN obj */ @@ -1522,7 +1300,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, vlan->vid_end = next.vid; vlan->flags = 0; - if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + if (next.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (next.vid == pvid) @@ -1531,7 +1309,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, err = cb(&vlan->obj); if (err) break; - } while (next.vid < GLOBAL_VTU_VID_MASK); + } while (next.vid < chip->info->max_vid); unlock: mutex_unlock(&chip->reg_lock); @@ -1539,133 +1317,12 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, return err; } -static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) -{ - u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; - u16 reg = 0; - int err; - - err = _mv88e6xxx_vtu_wait(chip); - if (err) - return err; - - if (!entry->valid) - goto loadpurge; - - /* Write port member tags */ - err = mv88e6xxx_vtu_data_write(chip, entry); - if (err) - return err; - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { - reg = entry->sid & GLOBAL_VTU_SID_MASK; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg); - if (err) - return err; - } - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) { - reg = entry->fid & GLOBAL_VTU_FID_MASK; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, reg); - if (err) - return err; - } else if (mv88e6xxx_num_databases(chip) == 256) { - /* VTU DBNum[7:4] are located in VTU Operation 11:8, and - * VTU DBNum[3:0] are located in VTU Operation 3:0 - */ - op |= (entry->fid & 0xf0) << 8; - op |= entry->fid & 0xf; - } - - reg = GLOBAL_VTU_VID_VALID; -loadpurge: - reg |= entry->vid & GLOBAL_VTU_VID_MASK; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg); - if (err) - return err; - - return _mv88e6xxx_vtu_cmd(chip, op); -} - -static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid, - struct mv88e6xxx_vtu_entry *entry) -{ - struct mv88e6xxx_vtu_entry next = { 0 }; - u16 val; - int err; - - err = _mv88e6xxx_vtu_wait(chip); - if (err) - return err; - - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, - sid & GLOBAL_VTU_SID_MASK); - if (err) - return err; - - err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT); - if (err) - return err; - - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val); - if (err) - return err; - - next.sid = val & GLOBAL_VTU_SID_MASK; - - err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val); - if (err) - return err; - - next.valid = !!(val & GLOBAL_VTU_VID_VALID); - - if (next.valid) { - err = mv88e6xxx_stu_data_read(chip, &next); - if (err) - return err; - } - - *entry = next; - return 0; -} - -static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) -{ - u16 reg = 0; - int err; - - err = _mv88e6xxx_vtu_wait(chip); - if (err) - return err; - - if (!entry->valid) - goto loadpurge; - - /* Write port states */ - err = mv88e6xxx_stu_data_write(chip, entry); - if (err) - return err; - - reg = GLOBAL_VTU_VID_VALID; -loadpurge: - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg); - if (err) - return err; - - reg = entry->sid & GLOBAL_VTU_SID_MASK; - err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg); - if (err) - return err; - - return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); -} - -static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) +static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); - struct mv88e6xxx_vtu_entry vlan; + struct mv88e6xxx_vtu_entry vlan = { + .vid = chip->info->max_vid, + }; int i, err; bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); @@ -1680,12 +1337,8 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) } /* Set every FID bit used by the VLAN entries */ - err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); - if (err) - return err; - do { - err = _mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; @@ -1693,7 +1346,7 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) break; set_bit(vlan.fid, fid_bitmap); - } while (vlan.vid < GLOBAL_VTU_VID_MASK); + } while (vlan.vid < chip->info->max_vid); /* The reset value 0x000 is used to indicate that multiple address * databases are not needed. Return the next positive available. @@ -1703,92 +1356,55 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) return -ENOSPC; /* Clear the database */ - return _mv88e6xxx_atu_flush(chip, *fid, true); + return mv88e6xxx_g1_atu_flush(chip, *fid, true); } -static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, - struct mv88e6xxx_vtu_entry *entry) -{ - struct dsa_switch *ds = chip->ds; - struct mv88e6xxx_vtu_entry vlan = { - .valid = true, - .vid = vid, - }; - int i, err; - - err = _mv88e6xxx_fid_new(chip, &vlan.fid); - if (err) - return err; - - /* exclude all ports except the CPU and DSA ports */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) - ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED - : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; - - if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || - mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) || - mv88e6xxx_6341_family(chip)) { - struct mv88e6xxx_vtu_entry vstp; - - /* Adding a VTU entry requires a valid STU entry. As VSTP is not - * implemented, only one STU entry is needed to cover all VTU - * entries. Thus, validate the SID 0. - */ - vlan.sid = 0; - err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp); - if (err) - return err; - - if (vstp.sid != vlan.sid || !vstp.valid) { - memset(&vstp, 0, sizeof(vstp)); - vstp.valid = true; - vstp.sid = vlan.sid; - - err = _mv88e6xxx_stu_loadpurge(chip, &vstp); - if (err) - return err; - } - } - - *entry = vlan; - return 0; -} - -static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, - struct mv88e6xxx_vtu_entry *entry, bool creat) +static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_entry *entry, bool new) { int err; if (!vid) return -EINVAL; - err = _mv88e6xxx_vtu_vid_write(chip, vid - 1); - if (err) - return err; + entry->vid = vid - 1; + entry->valid = false; - err = _mv88e6xxx_vtu_getnext(chip, entry); + err = mv88e6xxx_vtu_getnext(chip, entry); if (err) return err; - if (entry->vid != vid || !entry->valid) { - if (!creat) - return -EOPNOTSUPP; - /* -ENOENT would've been more appropriate, but switchdev expects - * -EOPNOTSUPP to inform bridge about an eventual software VLAN. - */ + if (entry->vid == vid && entry->valid) + return 0; - err = _mv88e6xxx_vtu_new(chip, vid, entry); + if (new) { + int i; + + /* Initialize a fresh VLAN entry */ + memset(entry, 0, sizeof(*entry)); + entry->valid = true; + entry->vid = vid; + + /* Include only CPU and DSA ports */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) + entry->member[i] = dsa_is_normal_port(chip->ds, i) ? + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER : + GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED; + + return mv88e6xxx_atu_new(chip, &entry->fid); } - return err; + /* switchdev expects -EOPNOTSUPP to honor software VLANs */ + return -EOPNOTSUPP; } static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry vlan; + struct mv88e6xxx_vtu_entry vlan = { + .vid = vid_begin - 1, + }; int i, err; if (!vid_begin) @@ -1796,12 +1412,8 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, mutex_lock(&chip->reg_lock); - err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1); - if (err) - goto unlock; - do { - err = _mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) goto unlock; @@ -1818,7 +1430,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (!ds->ports[port].netdev) continue; - if (vlan.data[i] == + if (vlan.member[i] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; @@ -1852,7 +1464,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, PORT_CONTROL_2_8021Q_DISABLED; int err; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + if (!chip->info->max_vid) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); @@ -1870,7 +1482,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + if (!chip->info->max_vid) return -EOPNOTSUPP; /* If the requested port doesn't belong to the same bridge as the VLAN @@ -1893,15 +1505,15 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, struct mv88e6xxx_vtu_entry vlan; int err; - err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true); + err = mv88e6xxx_vtu_get(chip, vid, &vlan, true); if (err) return err; - vlan.data[port] = untagged ? + vlan.member[port] = untagged ? GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; - return _mv88e6xxx_vtu_loadpurge(chip, &vlan); + return mv88e6xxx_vtu_loadpurge(chip, &vlan); } static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, @@ -1913,7 +1525,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; u16 vid; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + if (!chip->info->max_vid) return; mutex_lock(&chip->reg_lock); @@ -1938,15 +1550,15 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry vlan; int i, err; - err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); if (err) return err; /* Tell switchdev if this VLAN is handled in software */ - if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + if (vlan.member[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; - vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + vlan.member[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; /* keep the VLAN unless all ports are excluded */ vlan.valid = false; @@ -1954,17 +1566,17 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) continue; - if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + if (vlan.member[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { vlan.valid = true; break; } } - err = _mv88e6xxx_vtu_loadpurge(chip, &vlan); + err = mv88e6xxx_vtu_loadpurge(chip, &vlan); if (err) return err; - return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false); + return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false); } static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, @@ -1974,7 +1586,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 pvid, vid; int err = 0; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + if (!chip->info->max_vid) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); @@ -2001,96 +1613,6 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, return err; } -static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip, - const unsigned char *addr) -{ - int i, err; - - for (i = 0; i < 3; i++) { - err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, - (addr[i * 2] << 8) | addr[i * 2 + 1]); - if (err) - return err; - } - - return 0; -} - -static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip, - unsigned char *addr) -{ - u16 val; - int i, err; - - for (i = 0; i < 3; i++) { - err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val); - if (err) - return err; - - addr[i * 2] = val >> 8; - addr[i * 2 + 1] = val & 0xff; - } - - return 0; -} - -static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_atu_entry *entry) -{ - int ret; - - ret = _mv88e6xxx_atu_wait(chip); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_mac_write(chip, entry->mac); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_data_write(chip, entry); - if (ret < 0) - return ret; - - return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB); -} - -static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, - struct mv88e6xxx_atu_entry *entry); - -static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid, - const u8 *addr, struct mv88e6xxx_atu_entry *entry) -{ - struct mv88e6xxx_atu_entry next; - int err; - - memcpy(next.mac, addr, ETH_ALEN); - eth_addr_dec(next.mac); - - err = _mv88e6xxx_atu_mac_write(chip, next.mac); - if (err) - return err; - - do { - err = _mv88e6xxx_atu_getnext(chip, fid, &next); - if (err) - return err; - - if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED) - break; - - if (ether_addr_equal(next.mac, addr)) { - *entry = next; - return 0; - } - } while (ether_addr_greater(addr, next.mac)); - - memset(entry, 0, sizeof(*entry)); - entry->fid = fid; - ether_addr_copy(entry->mac, addr); - - return 0; -} - static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, const unsigned char *addr, u16 vid, u8 state) @@ -2103,25 +1625,36 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (vid == 0) err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid); else - err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + err = mv88e6xxx_vtu_get(chip, vid, &vlan, false); if (err) return err; - err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry); + entry.state = GLOBAL_ATU_DATA_STATE_UNUSED; + ether_addr_copy(entry.mac, addr); + eth_addr_dec(entry.mac); + + err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry); if (err) return err; + /* Initialize a fresh ATU entry if it isn't found */ + if (entry.state == GLOBAL_ATU_DATA_STATE_UNUSED || + !ether_addr_equal(entry.mac, addr)) { + memset(&entry, 0, sizeof(entry)); + ether_addr_copy(entry.mac, addr); + } + /* Purge the ATU entry only if no port is using it anymore */ if (state == GLOBAL_ATU_DATA_STATE_UNUSED) { - entry.portv_trunkid &= ~BIT(port); - if (!entry.portv_trunkid) + entry.portvec &= ~BIT(port); + if (!entry.portvec) entry.state = GLOBAL_ATU_DATA_STATE_UNUSED; } else { - entry.portv_trunkid |= BIT(port); + entry.portvec |= BIT(port); entry.state = state; } - return _mv88e6xxx_atu_load(chip, &entry); + return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); } static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, @@ -2161,75 +1694,26 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, return err; } -static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, - struct mv88e6xxx_atu_entry *entry) -{ - struct mv88e6xxx_atu_entry next = { 0 }; - u16 val; - int err; - - next.fid = fid; - - err = _mv88e6xxx_atu_wait(chip); - if (err) - return err; - - err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB); - if (err) - return err; - - err = _mv88e6xxx_atu_mac_read(chip, next.mac); - if (err) - return err; - - err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val); - if (err) - return err; - - next.state = val & GLOBAL_ATU_DATA_STATE_MASK; - if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) { - unsigned int mask, shift; - - if (val & GLOBAL_ATU_DATA_TRUNK) { - next.trunk = true; - mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; - shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; - } else { - next.trunk = false; - mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; - shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; - } - - next.portv_trunkid = (val & mask) >> shift; - } - - *entry = next; - return 0; -} - static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, struct switchdev_obj *obj, int (*cb)(struct switchdev_obj *obj)) { - struct mv88e6xxx_atu_entry addr = { - .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - }; + struct mv88e6xxx_atu_entry addr; int err; - err = _mv88e6xxx_atu_mac_write(chip, addr.mac); - if (err) - return err; + addr.state = GLOBAL_ATU_DATA_STATE_UNUSED; + eth_broadcast_addr(addr.mac); do { - err = _mv88e6xxx_atu_getnext(chip, fid, &addr); + err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr); if (err) return err; if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) break; - if (addr.trunk || (addr.portv_trunkid & BIT(port)) == 0) + if (addr.trunk || (addr.portvec & BIT(port)) == 0) continue; if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) { @@ -2271,7 +1755,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, int (*cb)(struct switchdev_obj *obj)) { struct mv88e6xxx_vtu_entry vlan = { - .vid = GLOBAL_VTU_VID_MASK, /* all ones */ + .vid = chip->info->max_vid, }; u16 fid; int err; @@ -2286,12 +1770,8 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, return err; /* Dump VLANs' Filtering Information Databases */ - err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid); - if (err) - return err; - do { - err = _mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; @@ -2302,7 +1782,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, obj, cb); if (err) return err; - } while (vlan.vid < GLOBAL_VTU_VID_MASK); + } while (vlan.vid < chip->info->max_vid); return err; } @@ -2321,23 +1801,52 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) +static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, + struct net_device *br) { - struct mv88e6xxx_chip *chip = ds->priv; - int i, err = 0; - - mutex_lock(&chip->reg_lock); + struct dsa_switch *ds; + int port; + int dev; + int err; - /* Remap each port's VLANTable */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (ds->ports[i].bridge_dev == br) { - err = _mv88e6xxx_port_based_vlan_map(chip, i); + /* Remap the Port VLAN of each local bridge group member */ + for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) { + if (chip->ds->ports[port].bridge_dev == br) { + err = mv88e6xxx_port_vlan_map(chip, port); if (err) - break; + return err; + } + } + + if (!mv88e6xxx_has_pvt(chip)) + return 0; + + /* Remap the Port VLAN of each cross-chip bridge group member */ + for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) { + ds = chip->ds->dst->ds[dev]; + if (!ds) + break; + + for (port = 0; port < ds->num_ports; ++port) { + if (ds->ports[port].bridge_dev == br) { + err = mv88e6xxx_pvt_map(chip, dev, port); + if (err) + return err; + } } } + return 0; +} + +static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *br) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_bridge_map(chip, br); mutex_unlock(&chip->reg_lock); return err; @@ -2347,17 +1856,41 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct mv88e6xxx_chip *chip = ds->priv; - int i; mutex_lock(&chip->reg_lock); + if (mv88e6xxx_bridge_map(chip, br) || + mv88e6xxx_port_vlan_map(chip, port)) + dev_err(ds->dev, "failed to remap in-chip Port VLAN\n"); + mutex_unlock(&chip->reg_lock); +} - /* Remap each port's VLANTable */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - if (i == port || ds->ports[i].bridge_dev == br) - if (_mv88e6xxx_port_based_vlan_map(chip, i)) - netdev_warn(ds->ports[i].netdev, - "failed to remap\n"); +static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev, + int port, struct net_device *br) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + if (!mv88e6xxx_has_pvt(chip)) + return 0; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_pvt_map(chip, dev, port); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev, + int port, struct net_device *br) +{ + struct mv88e6xxx_chip *chip = ds->priv; + + if (!mv88e6xxx_has_pvt(chip)) + return; + mutex_lock(&chip->reg_lock); + if (mv88e6xxx_pvt_map(chip, dev, port)) + dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); mutex_unlock(&chip->reg_lock); } @@ -2430,73 +1963,88 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip) err = mv88e6xxx_serdes_write(chip, MII_BMCR, val); } - return err; + return err; +} + +static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_frame_mode frame, u16 egress, + u16 etype) +{ + int err; + + if (!chip->info->ops->port_set_frame_mode) + return -EOPNOTSUPP; + + err = mv88e6xxx_port_set_egress_mode(chip, port, egress); + if (err) + return err; + + err = chip->info->ops->port_set_frame_mode(chip, port, frame); + if (err) + return err; + + if (chip->info->ops->port_set_ether_type) + return chip->info->ops->port_set_ether_type(chip, port, etype); + + return 0; } -static int mv88e6xxx_setup_port_dsa(struct mv88e6xxx_chip *chip, int port, - int upstream_port) +static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port) { - int err; - - err = chip->info->ops->port_set_frame_mode( - chip, port, MV88E6XXX_FRAME_MODE_DSA); - if (err) - return err; + return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL, + PORT_CONTROL_EGRESS_UNMODIFIED, + PORT_ETH_TYPE_DEFAULT); +} - return chip->info->ops->port_set_egress_unknowns( - chip, port, port == upstream_port); +static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA, + PORT_CONTROL_EGRESS_UNMODIFIED, + PORT_ETH_TYPE_DEFAULT); } -static int mv88e6xxx_setup_port_cpu(struct mv88e6xxx_chip *chip, int port) +static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port) { - int err; + return mv88e6xxx_set_port_mode(chip, port, + MV88E6XXX_FRAME_MODE_ETHERTYPE, + PORT_CONTROL_EGRESS_ADD_TAG, ETH_P_EDSA); +} - switch (chip->info->tag_protocol) { - case DSA_TAG_PROTO_EDSA: - err = chip->info->ops->port_set_frame_mode( - chip, port, MV88E6XXX_FRAME_MODE_ETHERTYPE); - if (err) - return err; +static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port) +{ + if (dsa_is_dsa_port(chip->ds, port)) + return mv88e6xxx_set_port_mode_dsa(chip, port); - err = mv88e6xxx_port_set_egress_mode( - chip, port, PORT_CONTROL_EGRESS_ADD_TAG); - if (err) - return err; + if (dsa_is_normal_port(chip->ds, port)) + return mv88e6xxx_set_port_mode_normal(chip, port); - if (chip->info->ops->port_set_ether_type) - err = chip->info->ops->port_set_ether_type( - chip, port, ETH_P_EDSA); - break; + /* Setup CPU port mode depending on its supported tag format */ + if (chip->info->tag_protocol == DSA_TAG_PROTO_DSA) + return mv88e6xxx_set_port_mode_dsa(chip, port); - case DSA_TAG_PROTO_DSA: - err = chip->info->ops->port_set_frame_mode( - chip, port, MV88E6XXX_FRAME_MODE_DSA); - if (err) - return err; + if (chip->info->tag_protocol == DSA_TAG_PROTO_EDSA) + return mv88e6xxx_set_port_mode_edsa(chip, port); - err = mv88e6xxx_port_set_egress_mode( - chip, port, PORT_CONTROL_EGRESS_UNMODIFIED); - break; - default: - err = -EINVAL; - } + return -EINVAL; +} - if (err) - return err; +static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port) +{ + bool message = dsa_is_dsa_port(chip->ds, port); - return chip->info->ops->port_set_egress_unknowns(chip, port, true); + return mv88e6xxx_port_set_message_port(chip, port, message); } -static int mv88e6xxx_setup_port_normal(struct mv88e6xxx_chip *chip, int port) +static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) { - int err; + bool flood = port == dsa_upstream_port(chip->ds); - err = chip->info->ops->port_set_frame_mode( - chip, port, MV88E6XXX_FRAME_MODE_NORMAL); - if (err) - return err; + /* Upstream ports flood frames with unknown unicast or multicast DA */ + if (chip->info->ops->port_set_egress_floods) + return chip->info->ops->port_set_egress_floods(chip, port, + flood, flood); - return chip->info->ops->port_set_egress_unknowns(chip, port, false); + return 0; } static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) @@ -2541,14 +2089,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (err) return err; - if (dsa_is_cpu_port(ds, port)) { - err = mv88e6xxx_setup_port_cpu(chip, port); - } else if (dsa_is_dsa_port(ds, port)) { - err = mv88e6xxx_setup_port_dsa(chip, port, - dsa_upstream_port(ds)); - } else { - err = mv88e6xxx_setup_port_normal(chip, port); - } + err = mv88e6xxx_setup_port_mode(chip, port); + if (err) + return err; + + err = mv88e6xxx_setup_egress_floods(chip, port); if (err) return err; @@ -2623,20 +2168,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) { - /* Port ATU control: disable limiting the number of - * address database entries that this port is allowed - * to use. - */ - err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, - 0x0000); - /* Priority Override: disable DA, SA and VTU priority - * override. - */ - err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, - 0x0000); + if (chip->info->ops->port_disable_learn_limit) { + err = chip->info->ops->port_disable_learn_limit(chip, port); + if (err) + return err; + } + + if (chip->info->ops->port_disable_pri_override) { + err = chip->info->ops->port_disable_pri_override(chip, port); if (err) return err; } @@ -2653,10 +2192,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return err; } - /* Port Control 1: disable trunking, disable sending - * learning messages to this port. - */ - err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000); + err = mv88e6xxx_setup_message_port(chip, port); if (err) return err; @@ -2668,7 +2204,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if (err) return err; - err = _mv88e6xxx_port_based_vlan_map(chip, port); + err = mv88e6xxx_port_vlan_map(chip, port); if (err) return err; @@ -2697,33 +2233,6 @@ static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) return 0; } -static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip, - unsigned int msecs) -{ - const unsigned int coeff = chip->info->age_time_coeff; - const unsigned int min = 0x01 * coeff; - const unsigned int max = 0xff * coeff; - u8 age_time; - u16 val; - int err; - - if (msecs < min || msecs > max) - return -ERANGE; - - /* Round to nearest multiple of coeff */ - age_time = (msecs + coeff / 2) / coeff; - - err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val); - if (err) - return err; - - /* AgeTime is 11:4 bits */ - val &= ~0xff0; - val |= age_time << 4; - - return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val); -} - static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { @@ -2731,7 +2240,7 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_g1_set_age_time(chip, ageing_time); + err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time); mutex_unlock(&chip->reg_lock); return err; @@ -2769,29 +2278,6 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) if (err) return err; - /* Clear all the VTU and STU entries */ - err = _mv88e6xxx_vtu_stu_flush(chip); - if (err < 0) - return err; - - /* Set the default address aging time to 5 minutes, and - * enable address learn messages to be sent to all message - * ports. - */ - err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, - GLOBAL_ATU_CONTROL_LEARN2ALL); - if (err) - return err; - - err = mv88e6xxx_g1_set_age_time(chip, 300000); - if (err) - return err; - - /* Clear all ATU entries */ - err = _mv88e6xxx_atu_flush(chip, 0, true); - if (err) - return err; - /* Configure the IP ToS mapping registers. */ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000); if (err) @@ -2872,6 +2358,18 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; } + err = mv88e6xxx_vtu_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_pvt_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_atu_setup(chip); + if (err) + goto unlock; + /* Some generations have the configuration of sending reserved * management frames to the CPU in global2, others in * global1. Hence it does not fit the two setup functions @@ -3101,10 +2599,12 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3116,6 +2616,8 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -3127,7 +2629,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, .port_set_frame_mode = mv88e6085_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_set_upstream_port = mv88e6095_port_set_upstream_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, @@ -3137,6 +2639,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .vtu_getnext = mv88e6185_g1_vtu_getnext, + .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6097_ops = { @@ -3149,11 +2653,13 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3163,6 +2669,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6123_ops = { @@ -3174,7 +2682,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, .port_set_frame_mode = mv88e6085_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3184,6 +2694,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6131_ops = { @@ -3196,7 +2708,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_set_upstream_port = mv88e6095_port_set_upstream_port, .port_jumbo_config = mv88e6165_port_jumbo_config, @@ -3213,6 +2725,41 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .vtu_getnext = mv88e6185_g1_vtu_getnext, + .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, +}; + +static const struct mv88e6xxx_ops mv88e6141_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6161_ops = { @@ -3225,11 +2772,13 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3239,6 +2788,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -3249,6 +2800,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3258,6 +2811,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6171_ops = { @@ -3271,11 +2826,13 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3285,6 +2842,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6172_ops = { @@ -3300,11 +2859,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3314,6 +2875,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6175_ops = { @@ -3327,11 +2890,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3341,6 +2906,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6176_ops = { @@ -3356,11 +2923,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3370,6 +2939,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6185_ops = { @@ -3381,7 +2952,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, .port_set_frame_mode = mv88e6085_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6185_port_set_egress_floods, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_set_upstream_port = mv88e6095_port_set_upstream_port, .stats_snapshot = mv88e6xxx_g1_stats_snapshot, @@ -3395,6 +2966,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .vtu_getnext = mv88e6185_g1_vtu_getnext, + .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6190_ops = { @@ -3410,9 +2983,11 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_config = mv88e6390_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3423,6 +2998,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6190x_ops = { @@ -3438,9 +3015,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_set_speed = mv88e6390x_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_config = mv88e6390_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3451,6 +3030,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6191_ops = { @@ -3466,9 +3047,11 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_config = mv88e6390_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3479,6 +3062,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -3494,11 +3079,13 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3508,6 +3095,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -3523,10 +3112,12 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_config = mv88e6390_port_pause_config, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3537,6 +3128,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -3551,11 +3144,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, @@ -3564,6 +3159,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .g1_set_egress_port = mv88e6095_g1_set_egress_port, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6185_g1_vtu_getnext, + .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -3578,11 +3175,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, @@ -3590,6 +3189,41 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .g1_set_cpu_port = mv88e6095_g1_set_cpu_port, .g1_set_egress_port = mv88e6095_g1_set_egress_port, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6185_g1_vtu_getnext, + .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, +}; + +static const struct mv88e6xxx_ops mv88e6341_ops = { + /* MV88E6XXX_FAMILY_6341 */ + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed = mv88e6390_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_jumbo_config = mv88e6165_port_jumbo_config, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, + .g1_set_egress_port = mv88e6390_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3603,11 +3237,13 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3617,6 +3253,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6351_ops = { @@ -3630,11 +3268,13 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .port_set_speed = mv88e6185_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3644,6 +3284,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3659,11 +3301,13 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .port_set_speed = mv88e6352_port_set_speed, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6097_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6320_g1_stats_snapshot, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3673,64 +3317,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, -}; - -static const struct mv88e6xxx_ops mv88e6141_ops = { - /* MV88E6XXX_FAMILY_6341 */ - .get_eeprom = mv88e6xxx_g2_get_eeprom8, - .set_eeprom = mv88e6xxx_g2_set_eeprom8, - .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, - .port_set_link = mv88e6xxx_port_set_link, - .port_set_duplex = mv88e6xxx_port_set_duplex, - .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, - .port_set_speed = mv88e6390_port_set_speed, - .port_tag_remap = mv88e6095_port_tag_remap, - .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, - .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_jumbo_config = mv88e6165_port_jumbo_config, - .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, - .port_pause_config = mv88e6097_port_pause_config, - .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_get_sset_count = mv88e6320_stats_get_sset_count, - .stats_get_strings = mv88e6320_stats_get_strings, - .stats_get_stats = mv88e6390_stats_get_stats, - .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, - .g1_set_egress_port = mv88e6390_g1_set_egress_port, - .watchdog_ops = &mv88e6390_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, - .reset = mv88e6352_g1_reset, -}; - -static const struct mv88e6xxx_ops mv88e6341_ops = { - /* MV88E6XXX_FAMILY_6341 */ - .get_eeprom = mv88e6xxx_g2_get_eeprom8, - .set_eeprom = mv88e6xxx_g2_set_eeprom8, - .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, - .port_set_link = mv88e6xxx_port_set_link, - .port_set_duplex = mv88e6xxx_port_set_duplex, - .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, - .port_set_speed = mv88e6390_port_set_speed, - .port_tag_remap = mv88e6095_port_tag_remap, - .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, - .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_jumbo_config = mv88e6165_port_jumbo_config, - .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, - .port_pause_config = mv88e6097_port_pause_config, - .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_get_sset_count = mv88e6320_stats_get_sset_count, - .stats_get_strings = mv88e6320_stats_get_strings, - .stats_get_stats = mv88e6390_stats_get_stats, - .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, - .g1_set_egress_port = mv88e6390_g1_set_egress_port, - .watchdog_ops = &mv88e6390_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, - .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6390_ops = { @@ -3746,12 +3334,14 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_speed = mv88e6390_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6390_port_pause_config, .port_set_cmode = mv88e6390x_port_set_cmode, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3762,6 +3352,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3777,11 +3369,13 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_speed = mv88e6390x_port_set_speed, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6390_port_pause_config, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3792,52 +3386,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, }; -static const struct mv88e6xxx_ops mv88e6391_ops = { - /* MV88E6XXX_FAMILY_6390 */ - .get_eeprom = mv88e6xxx_g2_get_eeprom8, - .set_eeprom = mv88e6xxx_g2_set_eeprom8, - .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, - .port_set_link = mv88e6xxx_port_set_link, - .port_set_duplex = mv88e6xxx_port_set_duplex, - .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, - .port_set_speed = mv88e6390_port_set_speed, - .port_tag_remap = mv88e6390_port_tag_remap, - .port_set_frame_mode = mv88e6351_port_set_frame_mode, - .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns, - .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_pause_config = mv88e6390_port_pause_config, - .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6390_g1_stats_set_histogram, - .stats_get_sset_count = mv88e6320_stats_get_sset_count, - .stats_get_strings = mv88e6320_stats_get_strings, - .stats_get_stats = mv88e6390_stats_get_stats, - .g1_set_cpu_port = mv88e6390_g1_set_cpu_port, - .g1_set_egress_port = mv88e6390_g1_set_egress_port, - .watchdog_ops = &mv88e6390_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, - .reset = mv88e6352_g1_reset, -}; - -static int mv88e6xxx_verify_madatory_ops(struct mv88e6xxx_chip *chip, - const struct mv88e6xxx_ops *ops) -{ - if (!ops->port_set_frame_mode) { - dev_err(chip->dev, "Missing port_set_frame_mode"); - return -EINVAL; - } - - if (!ops->port_set_egress_unknowns) { - dev_err(chip->dev, "Missing port_set_egress_mode"); - return -EINVAL; - } - - return 0; -} - static const struct mv88e6xxx_info mv88e6xxx_table[] = { [MV88E6085] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, @@ -3845,10 +3397,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6085", .num_databases = 4096, .num_ports = 10, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6085_ops, @@ -3860,10 +3415,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6095/88E6095F", .num_databases = 256, .num_ports = 11, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6095, .ops = &mv88e6095_ops, @@ -3875,10 +3432,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6097/88E6097F", .num_databases = 4096, .num_ports = 11, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6097_ops, @@ -3890,10 +3450,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6123", .num_databases = 4096, .num_ports = 3, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6123_ops, @@ -3905,25 +3468,47 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6131", .num_databases = 256, .num_ports = 8, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6131_ops, }, + [MV88E6141] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6141, + .family = MV88E6XXX_FAMILY_6341, + .name = "Marvell 88E6341", + .num_databases = 4096, + .num_ports = 6, + .max_vid = 4095, + .port_base_addr = 0x10, + .global1_addr = 0x1b, + .age_time_coeff = 3750, + .atu_move_port_mask = 0x1f, + .pvt = true, + .tag_protocol = DSA_TAG_PROTO_EDSA, + .flags = MV88E6XXX_FLAGS_FAMILY_6341, + .ops = &mv88e6141_ops, + }, + [MV88E6161] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", .num_databases = 4096, .num_ports = 6, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6161_ops, @@ -3935,10 +3520,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6165", .num_databases = 4096, .num_ports = 6, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6165_ops, @@ -3950,10 +3538,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6171", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6171_ops, @@ -3965,10 +3556,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6172", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6172_ops, @@ -3980,10 +3574,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6175", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6175_ops, @@ -3995,10 +3592,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6176", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6176_ops, @@ -4010,10 +3610,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6185", .num_databases = 256, .num_ports = 10, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6185_ops, @@ -4025,11 +3627,14 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, + .pvt = true, + .atu_move_port_mask = 0x1f, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, }, @@ -4040,10 +3645,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190x_ops, @@ -4055,13 +3663,16 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6191", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, - .ops = &mv88e6391_ops, + .ops = &mv88e6191_ops, }, [MV88E6240] = { @@ -4070,10 +3681,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6240", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6240_ops, @@ -4085,10 +3699,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6290", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6290_ops, @@ -4100,10 +3717,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6320", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6320_ops, @@ -4115,38 +3735,29 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6321", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .atu_move_port_mask = 0xf, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6321_ops, }, - [MV88E6141] = { - .prod_num = PORT_SWITCH_ID_PROD_NUM_6141, - .family = MV88E6XXX_FAMILY_6341, - .name = "Marvell 88E6341", - .num_databases = 4096, - .num_ports = 6, - .port_base_addr = 0x10, - .global1_addr = 0x1b, - .age_time_coeff = 3750, - .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, - .ops = &mv88e6141_ops, - }, - [MV88E6341] = { .prod_num = PORT_SWITCH_ID_PROD_NUM_6341, .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6341", .num_databases = 4096, .num_ports = 6, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 3750, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6341_ops, @@ -4158,10 +3769,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6350", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6350_ops, @@ -4173,10 +3787,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6351", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6351_ops, @@ -4188,10 +3805,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6352", .num_databases = 4096, .num_ports = 7, + .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .atu_move_port_mask = 0xf, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6352_ops, @@ -4202,10 +3822,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390_ops, @@ -4216,10 +3839,13 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ + .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .atu_move_port_mask = 0x1f, + .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390x_ops, @@ -4455,6 +4081,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_add = mv88e6xxx_port_mdb_add, .port_mdb_del = mv88e6xxx_port_mdb_del, .port_mdb_dump = mv88e6xxx_port_mdb_dump, + .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, + .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, }; static struct dsa_switch_driver mv88e6xxx_switch_drv = { @@ -4466,12 +4094,14 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) struct device *dev = chip->dev; struct dsa_switch *ds; - ds = dsa_switch_alloc(dev, DSA_MAX_PORTS); + ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip)); if (!ds) return -ENOMEM; ds->priv = chip; ds->ops = &mv88e6xxx_switch_ops; + ds->ageing_time_min = chip->info->age_time_coeff; + ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; dev_set_drvdata(dev, ds); @@ -4502,10 +4132,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) chip->info = compat_info; - err = mv88e6xxx_verify_madatory_ops(chip, chip->info->ops); - if (err) - return err; - err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 75af86a7fad80feb606fa6bab4cf78720d228eb0..39825837a1c9c062accd80af5a8dc3d6ccd4b771 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -3,7 +3,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 1aec7382c02dff90864995d9910ab6f94676a792..46a4ea0f8c473a9dc9d63cc6224c236c3cb0e1e7 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -3,7 +3,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,4 +39,29 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); +int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, + unsigned int msecs); +int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry); +int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry); +int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all); +int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, + bool all); + +int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); +int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip); + #endif /* _MV88E6XXX_GLOBAL1_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c new file mode 100644 index 0000000000000000000000000000000000000000..fa7e7db5171bb5c3f70305458bd942adc74ec668 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -0,0 +1,305 @@ +/* + * Marvell 88E6xxx Address Translation Unit (ATU) support + * + * Copyright (c) 2008 Marvell Semiconductor + * Copyright (c) 2017 Savoir-faire Linux, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "mv88e6xxx.h" +#include "global1.h" + +/* Offset 0x01: ATU FID Register */ + +static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid) +{ + return mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid & 0xfff); +} + +/* Offset 0x0A: ATU Control Register */ + +int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val); + if (err) + return err; + + if (learn2all) + val |= GLOBAL_ATU_CONTROL_LEARN2ALL; + else + val &= ~GLOBAL_ATU_CONTROL_LEARN2ALL; + + return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val); +} + +int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, + unsigned int msecs) +{ + const unsigned int coeff = chip->info->age_time_coeff; + const unsigned int min = 0x01 * coeff; + const unsigned int max = 0xff * coeff; + u8 age_time; + u16 val; + int err; + + if (msecs < min || msecs > max) + return -ERANGE; + + /* Round to nearest multiple of coeff */ + age_time = (msecs + coeff / 2) / coeff; + + err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val); + if (err) + return err; + + /* AgeTime is 11:4 bits */ + val &= ~0xff0; + val |= age_time << 4; + + err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val); + if (err) + return err; + + dev_dbg(chip->dev, "AgeTime set to 0x%02x (%d ms)\n", age_time, + age_time * coeff); + + return 0; +} + +/* Offset 0x0B: ATU Operation Register */ + +static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY); +} + +static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op) +{ + u16 val; + int err; + + /* FID bits are dispatched all around gradually as more are supported */ + if (mv88e6xxx_num_databases(chip) > 256) { + err = mv88e6xxx_g1_atu_fid_write(chip, fid); + if (err) + return err; + } else { + if (mv88e6xxx_num_databases(chip) > 16) { + /* ATU DBNum[7:4] are located in ATU Control 15:12 */ + err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val); + if (err) + return err; + + val = (val & 0x0fff) | ((fid << 8) & 0xf000); + err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val); + if (err) + return err; + } + + /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ + op |= fid & 0xf; + } + + err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, op); + if (err) + return err; + + return mv88e6xxx_g1_atu_op_wait(chip); +} + +/* Offset 0x0C: ATU Data Register */ + +static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val); + if (err) + return err; + + entry->state = val & 0xf; + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + entry->trunk = !!(val & GLOBAL_ATU_DATA_TRUNK); + entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip); + } + + return 0; +} + +static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 data = entry->state & 0xf; + + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + if (entry->trunk) + data |= GLOBAL_ATU_DATA_TRUNK; + + data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4; + } + + return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data); +} + +/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1 + * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3 + * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5 + */ + +static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 val; + int i, err; + + for (i = 0; i < 3; i++) { + err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val); + if (err) + return err; + + entry->mac[i * 2] = val >> 8; + entry->mac[i * 2 + 1] = val & 0xff; + } + + return 0; +} + +static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 val; + int i, err; + + for (i = 0; i < 3; i++) { + val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1]; + err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i, val); + if (err) + return err; + } + + return 0; +} + +/* Address Translation Unit operations */ + +int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_atu_op_wait(chip); + if (err) + return err; + + /* Write the MAC address to iterate from only once */ + if (entry->state == GLOBAL_ATU_DATA_STATE_UNUSED) { + err = mv88e6xxx_g1_atu_mac_write(chip, entry); + if (err) + return err; + } + + err = mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + if (err) + return err; + + err = mv88e6xxx_g1_atu_data_read(chip, entry); + if (err) + return err; + + return mv88e6xxx_g1_atu_mac_read(chip, entry); +} + +int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_atu_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g1_atu_mac_write(chip, entry); + if (err) + return err; + + err = mv88e6xxx_g1_atu_data_write(chip, entry); + if (err) + return err; + + return mv88e6xxx_g1_atu_op(chip, fid, GLOBAL_ATU_OP_LOAD_DB); +} + +static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry, + bool all) +{ + u16 op; + int err; + + err = mv88e6xxx_g1_atu_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g1_atu_data_write(chip, entry); + if (err) + return err; + + /* Flush/Move all or non-static entries from all or a given database */ + if (all && fid) + op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB; + else if (fid) + op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; + else if (all) + op = GLOBAL_ATU_OP_FLUSH_MOVE_ALL; + else + op = GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; + + return mv88e6xxx_g1_atu_op(chip, fid, op); +} + +int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all) +{ + struct mv88e6xxx_atu_entry entry = { + .state = 0, /* Null EntryState means Flush */ + }; + + return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all); +} + +static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid, + int from_port, int to_port, bool all) +{ + struct mv88e6xxx_atu_entry entry = { 0 }; + unsigned long mask; + int shift; + + if (!chip->info->atu_move_port_mask) + return -EOPNOTSUPP; + + mask = chip->info->atu_move_port_mask; + shift = bitmap_weight(&mask, 16); + + entry.state = 0xf, /* Full EntryState means Move */ + entry.portvec = from_port & mask; + entry.portvec |= (to_port & mask) << shift; + + return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all); +} + +int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, + bool all) +{ + int from_port = port; + int to_port = chip->info->atu_move_port_mask; + + return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all); +} diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c new file mode 100644 index 0000000000000000000000000000000000000000..9aea22d4c9e2e6f27cfc94dc84d78bb2812138ed --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -0,0 +1,511 @@ +/* + * Marvell 88E6xxx VLAN [Spanning Tree] Translation Unit (VTU [STU]) support + * + * Copyright (c) 2008 Marvell Semiconductor + * Copyright (c) 2015 CMC Electronics, Inc. + * Copyright (c) 2017 Savoir-faire Linux, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "mv88e6xxx.h" +#include "global1.h" + +/* Offset 0x02: VTU FID Register */ + +static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val); + if (err) + return err; + + entry->fid = val & GLOBAL_VTU_FID_MASK; + + return 0; +} + +static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val = entry->fid & GLOBAL_VTU_FID_MASK; + + return mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, val); +} + +/* Offset 0x03: VTU SID Register */ + +static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val); + if (err) + return err; + + entry->sid = val & GLOBAL_VTU_SID_MASK; + + return 0; +} + +static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val = entry->sid & GLOBAL_VTU_SID_MASK; + + return mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, val); +} + +/* Offset 0x05: VTU Operation Register */ + +static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY); +} + +static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op) +{ + int err; + + err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op); + if (err) + return err; + + return mv88e6xxx_g1_vtu_op_wait(chip); +} + +/* Offset 0x06: VTU VID Register */ + +static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val); + if (err) + return err; + + entry->vid = val & 0xfff; + + if (val & GLOBAL_VTU_VID_PAGE) + entry->vid |= 0x1000; + + entry->valid = !!(val & GLOBAL_VTU_VID_VALID); + + return 0; +} + +static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val = entry->vid & 0xfff; + + if (entry->vid & 0x1000) + val |= GLOBAL_VTU_VID_PAGE; + + if (entry->valid) + val |= GLOBAL_VTU_VID_VALID; + + return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, val); +} + +/* Offset 0x07: VTU/STU Data Register 1 + * Offset 0x08: VTU/STU Data Register 2 + * Offset 0x09: VTU/STU Data Register 3 + */ + +static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 regs[3]; + int i; + + /* Read all 3 VTU/STU Data registers */ + for (i = 0; i < 3; ++i) { + u16 *reg = ®s[i]; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg); + if (err) + return err; + } + + /* Extract MemberTag and PortState data */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + unsigned int member_offset = (i % 4) * 4; + unsigned int state_offset = member_offset + 2; + + entry->member[i] = (regs[i / 4] >> member_offset) & 0x3; + entry->state[i] = (regs[i / 4] >> state_offset) & 0x3; + } + + return 0; +} + +static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 regs[3] = { 0 }; + int i; + + /* Insert MemberTag and PortState data */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + unsigned int member_offset = (i % 4) * 4; + unsigned int state_offset = member_offset + 2; + + regs[i / 4] |= (entry->member[i] & 0x3) << member_offset; + regs[i / 4] |= (entry->state[i] & 0x3) << state_offset; + } + + /* Write all 3 VTU/STU Data registers */ + for (i = 0; i < 3; ++i) { + u16 reg = regs[i]; + int err; + + err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg); + if (err) + return err; + } + + return 0; +} + +static int mv88e6390_g1_vtu_data_read(struct mv88e6xxx_chip *chip, u8 *data) +{ + u16 regs[2]; + int i; + + /* Read the 2 VTU/STU Data registers */ + for (i = 0; i < 2; ++i) { + u16 *reg = ®s[i]; + int err; + + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg); + if (err) + return err; + } + + /* Extract data */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + unsigned int offset = (i % 8) * 2; + + data[i] = (regs[i / 8] >> offset) & 0x3; + } + + return 0; +} + +static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data) +{ + u16 regs[2] = { 0 }; + int i; + + /* Insert data */ + for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { + unsigned int offset = (i % 8) * 2; + + regs[i / 8] |= (data[i] & 0x3) << offset; + } + + /* Write the 2 VTU/STU Data registers */ + for (i = 0; i < 2; ++i) { + u16 reg = regs[i]; + int err; + + err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg); + if (err) + return err; + } + + return 0; +} + +/* VLAN Translation Unit Operations */ + +static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_vtu_sid_write(chip, entry); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_GET_NEXT); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_sid_read(chip, entry); + if (err) + return err; + + return mv88e6xxx_g1_vtu_vid_read(chip, entry); +} + +static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *vtu) +{ + struct mv88e6xxx_vtu_entry stu; + int err; + + err = mv88e6xxx_g1_vtu_sid_read(chip, vtu); + if (err) + return err; + + stu.sid = vtu->sid - 1; + + err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu); + if (err) + return err; + + if (stu.sid != vtu->sid || !stu.valid) + return -EINVAL; + + return 0; +} + +static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_vtu_op_wait(chip); + if (err) + return err; + + /* To get the next higher active VID, the VTU GetNext operation can be + * started again without setting the VID registers since it already + * contains the last VID. + * + * To save a few hardware accesses and abstract this to the caller, + * write the VID only once, when the entry is given as invalid. + */ + if (!entry->valid) { + err = mv88e6xxx_g1_vtu_vid_write(chip, entry); + if (err) + return err; + } + + err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_GET_NEXT); + if (err) + return err; + + return mv88e6xxx_g1_vtu_vid_read(chip, entry); +} + +int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_vtu_getnext(chip, entry); + if (err) + return err; + + if (entry->valid) { + err = mv88e6185_g1_vtu_data_read(chip, entry); + if (err) + return err; + + /* VTU DBNum[3:0] are located in VTU Operation 3:0 + * VTU DBNum[7:4] are located in VTU Operation 11:8 + */ + err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val); + if (err) + return err; + + entry->fid = val & 0x000f; + entry->fid |= (val & 0x0f00) >> 4; + } + + return 0; +} + +int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + /* Fetch VLAN MemberTag data from the VTU */ + err = mv88e6xxx_g1_vtu_getnext(chip, entry); + if (err) + return err; + + if (entry->valid) { + /* Fetch (and mask) VLAN PortState data from the STU */ + err = mv88e6xxx_g1_vtu_stu_get(chip, entry); + if (err) + return err; + + err = mv88e6185_g1_vtu_data_read(chip, entry); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_fid_read(chip, entry); + if (err) + return err; + } + + return 0; +} + +int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + /* Fetch VLAN MemberTag data from the VTU */ + err = mv88e6xxx_g1_vtu_getnext(chip, entry); + if (err) + return err; + + if (entry->valid) { + err = mv88e6390_g1_vtu_data_read(chip, entry->member); + if (err) + return err; + + /* Fetch VLAN PortState data from the STU */ + err = mv88e6xxx_g1_vtu_stu_get(chip, entry); + if (err) + return err; + + err = mv88e6390_g1_vtu_data_read(chip, entry->state); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_fid_read(chip, entry); + if (err) + return err; + } + + return 0; +} + +int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; + int err; + + err = mv88e6xxx_g1_vtu_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_vid_write(chip, entry); + if (err) + return err; + + if (entry->valid) { + err = mv88e6185_g1_vtu_data_write(chip, entry); + if (err) + return err; + + /* VTU DBNum[3:0] are located in VTU Operation 3:0 + * VTU DBNum[7:4] are located in VTU Operation 11:8 + */ + op |= entry->fid & 0x000f; + op |= (entry->fid & 0x00f0) << 8; + } + + return mv88e6xxx_g1_vtu_op(chip, op); +} + +int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_vtu_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_vid_write(chip, entry); + if (err) + return err; + + if (entry->valid) { + /* Write MemberTag and PortState data */ + err = mv88e6185_g1_vtu_data_write(chip, entry); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_sid_write(chip, entry); + if (err) + return err; + + /* Load STU entry */ + err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_fid_write(chip, entry); + if (err) + return err; + } + + /* Load/Purge VTU entry */ + return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE); +} + +int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) +{ + int err; + + err = mv88e6xxx_g1_vtu_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_vid_write(chip, entry); + if (err) + return err; + + if (entry->valid) { + /* Write PortState data */ + err = mv88e6390_g1_vtu_data_write(chip, entry->state); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_sid_write(chip, entry); + if (err) + return err; + + /* Load STU entry */ + err = mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); + if (err) + return err; + + /* Write MemberTag data */ + err = mv88e6390_g1_vtu_data_write(chip, entry->member); + if (err) + return err; + + err = mv88e6xxx_g1_vtu_fid_write(chip, entry); + if (err) + return err; + } + + /* Load/Purge VTU entry */ + return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_VTU_LOAD_PURGE); +} + +int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6xxx_g1_vtu_op_wait(chip); + if (err) + return err; + + return mv88e6xxx_g1_vtu_op(chip, GLOBAL_VTU_OP_FLUSH_ALL); +} diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 8f15bc7b1f5f88d2e8150f78cdf79b9be28cb07c..b3fea55071e3799063b04b485790e4bf73bc9f94 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -4,7 +4,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,6 +13,7 @@ * (at your option) any later version. */ +#include #include #include "mv88e6xxx.h" #include "global2.h" @@ -170,6 +172,50 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip) return err; } +/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register + * Offset 0x0C: Cross-chip Port VLAN Data Register + */ + +static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g2_wait(chip, GLOBAL2_PVT_ADDR, GLOBAL2_PVT_ADDR_BUSY); +} + +static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 op) +{ + int err; + + /* 9-bit Cross-chip PVT pointer: with GLOBAL2_MISC_5_BIT_PORT cleared, + * source device is 5-bit, source port is 4-bit. + */ + op |= (src_dev & 0x1f) << 4; + op |= (src_port & 0xf); + + err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, op); + if (err) + return err; + + return mv88e6xxx_g2_pvt_op_wait(chip); +} + +int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 data) +{ + int err; + + err = mv88e6xxx_g2_pvt_op_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_DATA, data); + if (err) + return err; + + return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port, + GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN); +} + /* Offset 0x0D: Switch MAC/WoL/WoF register */ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip, @@ -522,8 +568,9 @@ static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_smi_phy_cmd(chip, cmd); } -int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr, - int reg_c45, u16 *val, bool external) +static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, + int addr, int reg_c45, u16 *val, + bool external) { int device = (reg_c45 >> 16) & 0x1f; int reg = reg_c45 & 0xffff; @@ -553,8 +600,9 @@ int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr, return 0; } -int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 *val, bool external) +static int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val, + bool external) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; int err; @@ -586,8 +634,9 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external); } -int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr, - int reg_c45, u16 val, bool external) +static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, + int addr, int reg_c45, u16 val, + bool external) { int device = (reg_c45 >> 16) & 0x1f; int reg = reg_c45 & 0xffff; @@ -615,8 +664,9 @@ int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr, return 0; } -int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr, - int reg, u16 val, bool external) +static int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val, + bool external) { u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; int err; @@ -782,6 +832,31 @@ static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip) return err; } +/* Offset 0x1D: Misc Register */ + +static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip, + bool port_5_bit) +{ + u16 val; + int err; + + err = mv88e6xxx_g2_read(chip, GLOBAL2_MISC, &val); + if (err) + return err; + + if (port_5_bit) + val |= GLOBAL2_MISC_5_BIT_PORT; + else + val &= ~GLOBAL2_MISC_5_BIT_PORT; + + return mv88e6xxx_g2_write(chip, GLOBAL2_MISC, val); +} + +int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g2_misc_5_bit_port(chip, false); +} + static void mv88e6xxx_g2_irq_mask(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); @@ -964,14 +1039,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) return err; } - if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) { - /* Initialize Cross-chip Port VLAN Table to reset defaults */ - err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR, - GLOBAL2_PVT_ADDR_OP_INIT_ONES); - if (err) - return err; - } - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { /* Clear the priority override table. */ err = mv88e6xxx_g2_clear_pot(chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index a8b2f9486a4abad1227030f57ce00b6d8cbb1fbb..96046bb12ca17333530f237fddb46438c3298dea 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -3,7 +3,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -41,6 +42,10 @@ int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip, int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, struct ethtool_eeprom *eeprom, u8 *data); +int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 data); +int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); + int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -109,6 +114,17 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, + int src_port, u16 data) +{ + return -EOPNOTSUPP; +} + +int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 6033f2f6260a464418fcc981e5430e155e836bc3..77236cd72df25e31cd2a25a0aea02718c73d77a2 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -16,6 +16,7 @@ #include #include #include +#include #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) @@ -132,18 +133,19 @@ #define PORT_CONTROL_TAG_IF_BOTH BIT(6) #define PORT_CONTROL_USE_IP BIT(5) #define PORT_CONTROL_USE_TAG BIT(4) -#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3) #define PORT_CONTROL_FORWARD_UNKNOWN BIT(2) -#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_DA (0x0 << 2) -#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_MULTICAST_DA (0x1 << 2) -#define PORT_CONTROL_NOT_EGRESS_UNKNOWN_UNITCAST_DA (0x2 << 2) -#define PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA (0x3 << 2) +#define PORT_CONTROL_EGRESS_FLOODS_MASK (0x3 << 2) +#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA (0x0 << 2) +#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA (0x1 << 2) +#define PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA (0x2 << 2) +#define PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA (0x3 << 2) #define PORT_CONTROL_STATE_MASK 0x03 #define PORT_CONTROL_STATE_DISABLED 0x00 #define PORT_CONTROL_STATE_BLOCKING 0x01 #define PORT_CONTROL_STATE_LEARNING 0x02 #define PORT_CONTROL_STATE_FORWARDING 0x03 #define PORT_CONTROL_1 0x05 +#define PORT_CONTROL_1_MESSAGE_PORT BIT(15) #define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0) #define PORT_BASE_VLAN 0x06 #define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12) @@ -166,7 +168,6 @@ #define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) #define PORT_CONTROL_2_MAP_DA BIT(7) #define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6) -#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6) #define PORT_CONTROL_2_EGRESS_MONITOR BIT(5) #define PORT_CONTROL_2_INGRESS_MONITOR BIT(4) #define PORT_CONTROL_2_UPSTREAM_MASK 0x0f @@ -181,6 +182,7 @@ #define PORT_ATU_CONTROL 0x0c #define PORT_PRI_OVERRIDE 0x0d #define PORT_ETH_TYPE 0x0f +#define PORT_ETH_TYPE_DEFAULT 0x9100 #define PORT_IN_DISCARD_LO 0x10 #define PORT_IN_DISCARD_HI 0x11 #define PORT_IN_FILTERED 0x12 @@ -247,6 +249,7 @@ #define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY) #define GLOBAL_VTU_VID 0x06 #define GLOBAL_VTU_VID_MASK 0xfff +#define GLOBAL_VTU_VID_PAGE BIT(13) #define GLOBAL_VTU_VID_VALID BIT(12) #define GLOBAL_VTU_DATA_0_3 0x07 #define GLOBAL_VTU_DATA_4_7 0x08 @@ -437,9 +440,14 @@ #define GLOBAL2_WDOG_FORCE_IRQ BIT(0) #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d +#define GLOBAL2_MISC_5_BIT_PORT BIT(14) #define MV88E6XXX_N_FID 4096 +/* PVT limits for 4-bit port and 5-bit switch */ +#define MV88E6XXX_MAX_PVT_SWITCHES 32 +#define MV88E6XXX_MAX_PVT_PORTS 16 + enum mv88e6xxx_frame_mode { MV88E6XXX_FRAME_MODE_NORMAL, MV88E6XXX_FRAME_MODE_DSA, @@ -525,8 +533,6 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */ MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */ - MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */ - MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ /* Per VLAN Spanning Tree Unit (STU). @@ -551,7 +557,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES) -#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID) #define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID) #define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) @@ -560,13 +565,8 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X) #define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD) #define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA) -#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR) -#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA) #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) -#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU) -#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU) - /* Ingress Rate Limit unit */ #define MV88E6XXX_FLAGS_IRL \ (MV88E6XXX_FLAG_G2_IRL_CMD | \ @@ -577,11 +577,6 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAG_SMI_CMD | \ MV88E6XXX_FLAG_SMI_DATA) -/* Cross-chip Port VLAN Table */ -#define MV88E6XXX_FLAGS_PVT \ - (MV88E6XXX_FLAG_G2_PVT_ADDR | \ - MV88E6XXX_FLAG_G2_PVT_DATA) - /* Fiber/SERDES Registers at SMI address F, page 1 */ #define MV88E6XXX_FLAGS_SERDES \ (MV88E6XXX_FLAG_PHY_PAGE | \ @@ -590,43 +585,33 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6095 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_G1_ATU_FID | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ + (MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_G1_ATU_FID | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ + (MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAG_VTU) + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ @@ -634,64 +619,47 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6341 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT | \ MV88E6XXX_FLAGS_SERDES) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_G1_ATU_FID | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ + (MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_ATU_FID | \ MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT | \ MV88E6XXX_FLAGS_SERDES) #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ - MV88E6XXX_FLAG_STU | \ - MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_MULTI_CHIP | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; @@ -701,20 +669,26 @@ struct mv88e6xxx_info { const char *name; unsigned int num_databases; unsigned int num_ports; + unsigned int max_vid; unsigned int port_base_addr; unsigned int global1_addr; unsigned int age_time_coeff; unsigned int g1_irqs; + bool pvt; enum dsa_tag_protocol tag_protocol; unsigned long long flags; + + /* Mask for FromPort and ToPort value of PortVec used in ATU Move + * operation. 0 means that the ATU Move operation is not supported. + */ + u8 atu_move_port_mask; const struct mv88e6xxx_ops *ops; }; struct mv88e6xxx_atu_entry { - u16 fid; u8 state; bool trunk; - u16 portv_trunkid; + u16 portvec; u8 mac[ETH_ALEN]; }; @@ -723,7 +697,8 @@ struct mv88e6xxx_vtu_entry { u16 fid; u8 sid; bool valid; - u8 data[DSA_MAX_PORTS]; + u8 member[DSA_MAX_PORTS]; + u8 state[DSA_MAX_PORTS]; }; struct mv88e6xxx_bus_ops; @@ -864,14 +839,16 @@ struct mv88e6xxx_ops { int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_frame_mode mode); - int (*port_set_egress_unknowns)(struct mv88e6xxx_chip *chip, int port, - bool on); + int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port, + bool unicast, bool multicast); int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port, u16 etype); int (*port_jumbo_config)(struct mv88e6xxx_chip *chip, int port); int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port); int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port); + int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port); + int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port); /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc. * Some chips allow this to be configured on specific ports. @@ -906,6 +883,12 @@ struct mv88e6xxx_ops { /* Can be either in g1 or g2, so don't use a prefix */ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); + + /* VLAN Translation Unit operations */ + int (*vtu_getnext)(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); + int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); }; struct mv88e6xxx_irq_ops { @@ -934,6 +917,11 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, return (chip->info->flags & flags) == flags; } +static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip) +{ + return chip->info->pvt; +} + static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip) { return chip->info->num_databases; @@ -944,6 +932,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip) return chip->info->num_ports; } +static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip) +{ + return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0); +} + int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 8875784c4718feee699355f91c2092622417cce1..548a956637eec56b4aac9d7e4b2890e078f51285 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -3,7 +3,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -497,8 +498,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); } -int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on) +static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip, + int port, bool unicast) { int err; u16 reg; @@ -507,7 +508,7 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, if (err) return err; - if (on) + if (unicast) reg |= PORT_CONTROL_FORWARD_UNKNOWN; else reg &= ~PORT_CONTROL_FORWARD_UNKNOWN; @@ -515,8 +516,8 @@ int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); } -int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on) +int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, + bool unicast, bool multicast) { int err; u16 reg; @@ -525,21 +526,45 @@ int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, if (err) return err; - if (on) - reg |= PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA; + reg &= ~PORT_CONTROL_EGRESS_FLOODS_MASK; + + if (unicast && multicast) + reg |= PORT_CONTROL_EGRESS_FLOODS_ALL_UNKNOWN_DA; + else if (unicast) + reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_MC_DA; + else if (multicast) + reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_UC_DA; else - reg &= ~PORT_CONTROL_EGRESS_ALL_UNKNOWN_DA; + reg |= PORT_CONTROL_EGRESS_FLOODS_NO_UNKNOWN_DA; return mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg); } /* Offset 0x05: Port Control 1 */ +int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port, + bool message_port) +{ + u16 val; + int err; + + err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &val); + if (err) + return err; + + if (message_port) + val |= PORT_CONTROL_1_MESSAGE_PORT; + else + val &= ~PORT_CONTROL_1_MESSAGE_PORT; + + return mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, val); +} + /* Offset 0x06: Port Based VLAN Map */ int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map) { - const u16 mask = GENMASK(mv88e6xxx_num_ports(chip) - 1, 0); + const u16 mask = mv88e6xxx_port_mask(chip); u16 reg; int err; @@ -672,8 +697,8 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = { [PORT_CONTROL_2_8021Q_SECURE] = "Secure", }; -int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on) +static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip, + int port, bool multicast) { int err; u16 reg; @@ -682,14 +707,26 @@ int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, if (err) return err; - if (on) - reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; + if (multicast) + reg |= PORT_CONTROL_2_DEFAULT_FORWARD; else - reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN; + reg &= ~PORT_CONTROL_2_DEFAULT_FORWARD; return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg); } +int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, + bool unicast, bool multicast) +{ + int err; + + err = mv88e6185_port_set_forward_unknown(chip, port, unicast); + if (err) + return err; + + return mv88e6185_port_set_default_forward(chip, port, multicast); +} + int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int upstream_port) { @@ -769,6 +806,20 @@ int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL, 0x0001); } +/* Offset 0x0C: Port ATU Control */ + +int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL, 0); +} + +/* Offset 0x0D: (Priority) Override Register */ + +int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE, 0); +} + /* Offset 0x0f: Port Ether type */ int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index c83cbb3f449182317a21c9fb894a114d99a08ebb..86f40887b6d28d998316706fe36333c471ec1c25 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -3,7 +3,8 @@ * * Copyright (c) 2008 Marvell Semiconductor * - * Copyright (c) 2016 Vivien Didelot + * Copyright (c) 2016-2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -56,14 +57,14 @@ int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_frame_mode mode); int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port, enum mv88e6xxx_frame_mode mode); -int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on); -int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on); -int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port, - bool on); +int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, + bool unicast, bool multicast); +int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port, + bool unicast, bool multicast); int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, u16 etype); +int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port, + bool message_port); int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port); @@ -75,4 +76,8 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int upstream_port); + +int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port); + #endif /* _MV88E6XXX_PORT_H */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 2c80611b94aef3c5ce9d0f98e8d92e497542123a..149244aac20aa765551b93ca25d78018b28f17f9 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -125,6 +126,7 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) dstats->tx_bytes += skb->len; u64_stats_update_end(&dstats->syncp); + skb_tx_timestamp(skb); dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -304,8 +306,21 @@ static void dummy_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } +static int dummy_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *ts_info) +{ + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + ts_info->phc_index = -1; + + return 0; +}; + static const struct ethtool_ops dummy_ethtool_ops = { .get_drvinfo = dummy_get_drvinfo, + .get_ts_info = dummy_get_ts_info, }; static void dummy_free_netdev(struct net_device *dev) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 084a6d58543a71b35e0aa184cf03a9f9e5af3044..be823c186517a54e6d0317263008267f30bfce9b 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -283,7 +283,6 @@ struct typhoon { spinlock_t command_lock ____cacheline_aligned; struct basic_ring cmdRing; struct basic_ring respRing; - struct net_device_stats stats; struct net_device_stats stats_saved; struct typhoon_shared * shared; dma_addr_t shared_dma; @@ -898,7 +897,7 @@ typhoon_set_rx_mode(struct net_device *dev) static int typhoon_do_get_stats(struct typhoon *tp) { - struct net_device_stats *stats = &tp->stats; + struct net_device_stats *stats = &tp->dev->stats; struct net_device_stats *saved = &tp->stats_saved; struct cmd_desc xp_cmd; struct resp_desc xp_resp[7]; @@ -951,7 +950,7 @@ static struct net_device_stats * typhoon_get_stats(struct net_device *dev) { struct typhoon *tp = netdev_priv(dev); - struct net_device_stats *stats = &tp->stats; + struct net_device_stats *stats = &tp->dev->stats; struct net_device_stats *saved = &tp->stats_saved; smp_rmb(); @@ -1991,7 +1990,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type) tp->card_state = Sleeping; smp_wmb(); typhoon_do_get_stats(tp); - memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); + memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats)); INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 8c08f9deef9268e4cacc939a2534110a42be6c3b..edae15ac0e982e7a1678627253dc5bcd696737bc 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -180,5 +180,6 @@ source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/synopsys/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 26dce5bf2c18c966c5b378cf79b385aa726f9b4f..bf7f4502cabcf2b40f735d3a928a475f6d03c061 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -91,3 +91,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ +obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h index 8c3b56198e4b731125648284086850eab1fdf0f2..4ad5b9be3f84c52b9e04f6919431c198a6d5a011 100644 --- a/drivers/net/ethernet/adi/bfin_mac.h +++ b/drivers/net/ethernet/adi/bfin_mac.h @@ -68,13 +68,6 @@ struct net_dma_desc_tx { }; struct bfin_mac_local { - /* - * these are things that the kernel wants me to keep, so users - * can find out semi-useless statistics of how well the card is - * performing - */ - struct net_device_stats stats; - spinlock_t lock; int wol; /* Wake On Lan */ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 9f7422ada704e9484b79d336ece46a0547667abb..d8e133ced7b8a026682ee5f60396e380b4fa5ee6 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -1454,11 +1455,10 @@ static int greth_of_probe(struct platform_device *ofdev) break; } if (i == 6) { - const unsigned char *addr; - int len; - addr = of_get_property(ofdev->dev.of_node, "local-mac-address", - &len); - if (addr != NULL && len == 6) { + const u8 *addr; + + addr = of_get_mac_address(ofdev->dev.of_node); + if (addr) { for (i = 0; i < 6; i++) macaddr[i] = (unsigned int) addr[i]; } else { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 35f19430c84ae2dc7ed0e098acbe3516f3e166b5..7c1214d7885566ded4dfc05e85c2ee86b8d3c949 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -133,7 +133,7 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) int irq_idx = ENA_IO_IRQ_IDX(i); rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, - adapter->msix_entries[irq_idx].vector); + pci_irq_vector(adapter->pdev, irq_idx)); if (rc) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; @@ -1208,13 +1208,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) { - int i, msix_vecs, rc; - - if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { - netif_err(adapter, probe, adapter->netdev, - "Error, MSI-X is already enabled\n"); - return -EPERM; - } + int msix_vecs, rc; /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(num_queues); @@ -1222,16 +1216,9 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); - adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry)); - - if (!adapter->msix_entries) - return -ENOMEM; - - for (i = 0; i < msix_vecs; i++) - adapter->msix_entries[i].entry = i; - - rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs); - if (rc != 0) { + rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs, + PCI_IRQ_MSIX); + if (rc < 0) { netif_err(adapter, probe, adapter->netdev, "Failed to enable MSI-X, vectors %d rc %d\n", msix_vecs, rc); @@ -1248,7 +1235,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) } adapter->msix_vecs = msix_vecs; - set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); return 0; } @@ -1264,7 +1250,7 @@ static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) ena_intr_msix_mgmnt; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = - adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; + pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); cpu = cpumask_first(cpu_online_mask); adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; cpumask_set_cpu(cpu, @@ -1287,7 +1273,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter) adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; adapter->irq_tbl[irq_idx].vector = - adapter->msix_entries[irq_idx].vector; + pci_irq_vector(adapter->pdev, irq_idx); adapter->irq_tbl[irq_idx].cpu = cpu; cpumask_set_cpu(cpu, @@ -1325,12 +1311,6 @@ static int ena_request_io_irq(struct ena_adapter *adapter) struct ena_irq *irq; int rc = 0, i, k; - if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { - netif_err(adapter, ifup, adapter->netdev, - "Failed to request I/O IRQ: MSI-X is not enabled\n"); - return -EINVAL; - } - for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; rc = request_irq(irq->vector, irq->handler, flags, irq->name, @@ -1389,16 +1369,6 @@ static void ena_free_io_irq(struct ena_adapter *adapter) } } -static void ena_disable_msix(struct ena_adapter *adapter) -{ - if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) - pci_disable_msix(adapter->pdev); - - if (adapter->msix_entries) - vfree(adapter->msix_entries); - adapter->msix_entries = NULL; -} - static void ena_disable_io_intr_sync(struct ena_adapter *adapter) { int i; @@ -2479,8 +2449,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, return 0; err_disable_msix: - ena_disable_msix(adapter); - + pci_free_irq_vectors(adapter->pdev); return rc; } @@ -2518,7 +2487,7 @@ static void ena_fw_reset_device(struct work_struct *work) ena_free_mgmnt_irq(adapter); - ena_disable_msix(adapter); + pci_free_irq_vectors(adapter->pdev); ena_com_abort_admin_commands(ena_dev); @@ -2569,7 +2538,7 @@ static void ena_fw_reset_device(struct work_struct *work) return; err_disable_msix: ena_free_mgmnt_irq(adapter); - ena_disable_msix(adapter); + pci_free_irq_vectors(adapter->pdev); err_device_destroy: ena_com_admin_destroy(ena_dev); err: @@ -3103,7 +3072,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_free_msix: ena_com_dev_reset(ena_dev); ena_free_mgmnt_irq(adapter); - ena_disable_msix(adapter); + pci_free_irq_vectors(adapter->pdev); err_worker_destroy: ena_com_destroy_interrupt_moderation(ena_dev); del_timer(&adapter->timer_service); @@ -3188,7 +3157,7 @@ static void ena_remove(struct pci_dev *pdev) ena_free_mgmnt_irq(adapter); - ena_disable_msix(adapter); + pci_free_irq_vectors(adapter->pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index ed62d8e231a155a693a1278862a967f3594635b4..0e22bce6239d0e06c73a366e0d98a2348a9b7fa9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -248,7 +248,6 @@ enum ena_flags_t { ENA_FLAG_DEVICE_RUNNING, ENA_FLAG_DEV_UP, ENA_FLAG_LINK_UP, - ENA_FLAG_MSIX_ENABLED, ENA_FLAG_TRIGGER_RESET }; @@ -267,7 +266,6 @@ struct ena_adapter { int num_queues; - struct msix_entry *msix_entries; int msix_vecs; u32 tx_usecs, rx_usecs; /* interrupt moderation */ diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index b556c926557a10599b1148ba52d7fcf39d6030bd..9c152d85840d79add7dc32bd1ee47579208ced25 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -359,7 +359,6 @@ typedef struct _mace_statistics { typedef struct _mace_private { struct pcmcia_device *p_dev; - struct net_device_stats linux_stats; /* Linux statistics counters */ mace_statistics mace_stats; /* MACE chip statistics counters */ /* restore_multicast_list() state variables */ @@ -879,7 +878,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb, service a transmit interrupt while we are in here. */ - lp->linux_stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; lp->tx_free_frames--; /* WARNING: Write the _exact_ number of bytes written in the header! */ @@ -967,7 +966,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); if ((fifofc & MACE_FIFOFC_XMTFC)==0) { - lp->linux_stats.tx_errors++; + dev->stats.tx_errors++; outb(0xFF, ioaddr + AM2150_XMT_SKIP); } @@ -1016,7 +1015,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) } /* if (xmtfs & MACE_XMTFS_XMTSV) */ - lp->linux_stats.tx_packets++; + dev->stats.tx_packets++; lp->tx_free_frames++; netif_wake_queue(dev); } /* if (status & MACE_IR_XMTINT) */ @@ -1077,7 +1076,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) " 0x%X.\n", dev->name, rx_framecnt, rx_status); if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ - lp->linux_stats.rx_errors++; + dev->stats.rx_errors++; if (rx_status & MACE_RCVFS_OFLO) { lp->mace_stats.oflo++; } @@ -1114,14 +1113,14 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt) netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ - lp->linux_stats.rx_packets++; - lp->linux_stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ continue; } else { pr_debug("%s: couldn't allocate a sk_buff of size" " %d.\n", dev->name, pkt_len); - lp->linux_stats.rx_dropped++; + dev->stats.rx_dropped++; } } outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ @@ -1231,13 +1230,13 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev) lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC); lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC); /* At this point, mace_stats is fully updated for this call. - We may now update the linux_stats. */ + We may now update the netdev stats. */ - /* The MACE has no equivalent for linux_stats field which are commented + /* The MACE has no equivalent for netdev stats field which are commented out. */ - /* lp->linux_stats.multicast; */ - lp->linux_stats.collisions = + /* dev->stats.multicast; */ + dev->stats.collisions = lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc; /* Collision: The MACE may retry sending a packet 15 times before giving up. The retry count is in XMTRC. @@ -1245,22 +1244,22 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev) If so, why doesn't the RCVCC record these collisions? */ /* detailed rx_errors: */ - lp->linux_stats.rx_length_errors = + dev->stats.rx_length_errors = lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc; - /* lp->linux_stats.rx_over_errors */ - lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs; - lp->linux_stats.rx_frame_errors = lp->mace_stats.fram; - lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo; - lp->linux_stats.rx_missed_errors = + /* dev->stats.rx_over_errors */ + dev->stats.rx_crc_errors = lp->mace_stats.fcs; + dev->stats.rx_frame_errors = lp->mace_stats.fram; + dev->stats.rx_fifo_errors = lp->mace_stats.oflo; + dev->stats.rx_missed_errors = lp->mace_stats.mpco * 256 + lp->mace_stats.mpc; /* detailed tx_errors */ - lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry; - lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar; + dev->stats.tx_aborted_errors = lp->mace_stats.rtry; + dev->stats.tx_carrier_errors = lp->mace_stats.lcar; /* LCAR usually results from bad cabling. */ - lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; - lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; - /* lp->linux_stats.tx_window_errors; */ + dev->stats.tx_fifo_errors = lp->mace_stats.uflo; + dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr; + /* dev->stats.tx_window_errors; */ } /* update_stats */ /* ---------------------------------------------------------------------------- @@ -1274,10 +1273,10 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev) update_stats(dev->base_addr, dev); pr_debug("%s: updating the statistics.\n", dev->name); - pr_linux_stats(&lp->linux_stats); + pr_linux_stats(&dev->stats); pr_mace_stats(&lp->mace_stats); - return &lp->linux_stats; + return &dev->stats; } /* net_device_stats */ /* ---------------------------------------------------------------------------- diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a713abd9d03e63aea8aab96c4f5569d1d6b3e44d..c772420fa41caf57907610bce4528da9c70be63e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -118,6 +118,7 @@ #include #include #include +#include #include #include #include @@ -1854,7 +1855,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, if (tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; - tc = tc_to_netdev->tc; + tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + tc = tc_to_netdev->mqprio->num_tc; if (tc > pdata->hw_feat.tc_cnt) return -EINVAL; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 0c7088a426e90ddaa32c09795506bc8805a377f6..417bdb5982a93ca252962f7fcd6141a2f53fca37 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -115,6 +115,7 @@ */ #include +#include #include #include #include diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 4c5b90eea4af2e389decec1d80b31e7134cee140..b672d92495397bb3132c90e25db17872c4589fa6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -114,6 +114,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig index ec63d706d464710af057591df62cfd0c4500b14b..59efe5b145ddf562e11fda61acad07b2da82548e 100644 --- a/drivers/net/ethernet/apm/Kconfig +++ b/drivers/net/ethernet/apm/Kconfig @@ -1 +1,2 @@ source "drivers/net/ethernet/apm/xgene/Kconfig" +source "drivers/net/ethernet/apm/xgene-v2/Kconfig" diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile index 65ce32ad1b2cc66a2c017cf8a06f8e2099bbaa9e..946b2a4c882d3cb627849ac25817e9d4e45591de 100644 --- a/drivers/net/ethernet/apm/Makefile +++ b/drivers/net/ethernet/apm/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_NET_XGENE) += xgene/ +obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/ diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..1205861b631896a0fc6b19ac608483d8e4b27d4e --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig @@ -0,0 +1,11 @@ +config NET_XGENE_V2 + tristate "APM X-Gene SoC Ethernet-v2 Driver" + depends on HAS_DMA + depends on ARCH_XGENE || COMPILE_TEST + help + This is the Ethernet driver for the on-chip ethernet interface + which uses a linked list of DMA descriptor architecture (v2) for + APM X-Gene SoCs. + + To compile this driver as a module, choose M here. This module will + be called xgene-enet-v2. diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f16a2b3dde8b2707add2c4e57f3f000c2f645d08 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for APM X-Gene Ethernet v2 driver +# + +xgene-enet-v2-objs := main.o mac.o enet.o ring.o mdio.o ethtool.o +obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.c b/drivers/net/ethernet/apm/xgene-v2/enet.c new file mode 100644 index 0000000000000000000000000000000000000000..5998da01492356a8536ff3673855f1d821bd2394 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/enet.c @@ -0,0 +1,83 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val) +{ + void __iomem *addr = pdata->resources.base_addr + offset; + + iowrite32(val, addr); +} + +u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset) +{ + void __iomem *addr = pdata->resources.base_addr + offset; + + return ioread32(addr); +} + +int xge_port_reset(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + u32 data, wait = 10; + + xge_wr_csr(pdata, ENET_CLKEN, 0x3); + xge_wr_csr(pdata, ENET_SRST, 0xf); + xge_wr_csr(pdata, ENET_SRST, 0); + xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1); + xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0); + + do { + usleep_range(100, 110); + data = xge_rd_csr(pdata, BLOCK_MEM_RDY); + } while (data != MEM_RDY && wait--); + + if (data != MEM_RDY) { + dev_err(dev, "ECC init failed: %x\n", data); + return -ETIMEDOUT; + } + + xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH); + + return 0; +} + +static void xge_traffic_resume(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1); + xge_wr_csr(pdata, FORCE_LINK_STATUS, 1); + + xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1); + xge_wr_csr(pdata, RX_DV_GATE_REG, 1); +} + +void xge_port_init(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + pdata->phy_speed = SPEED_1000; + xge_mac_init(pdata); + xge_traffic_resume(ndev); +} diff --git a/drivers/net/ethernet/apm/xgene-v2/enet.h b/drivers/net/ethernet/apm/xgene-v2/enet.h new file mode 100644 index 0000000000000000000000000000000000000000..3fd36dc66a23d44ab878ca348890ad9511f30147 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/enet.h @@ -0,0 +1,45 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_V2_ENET_H__ +#define __XGENE_ENET_V2_ENET_H__ + +#define ENET_CLKEN 0xc008 +#define ENET_SRST 0xc000 +#define ENET_SHIM 0xc010 +#define CFG_MEM_RAM_SHUTDOWN 0xd070 +#define BLOCK_MEM_RDY 0xd074 + +#define MEM_RDY 0xffffffff +#define DEVM_ARAUX_COH BIT(19) +#define DEVM_AWAUX_COH BIT(3) + +#define CFG_FORCE_LINK_STATUS_EN 0x229c +#define FORCE_LINK_STATUS 0x22a0 +#define CFG_LINK_AGGR_RESUME 0x27c8 +#define RX_DV_GATE_REG 0x2dfc + +void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val); +u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset); +int xge_port_reset(struct net_device *ndev); +void xge_port_init(struct net_device *ndev); + +#endif /* __XGENE_ENET_V2_ENET__H__ */ diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..b6666e418e79fd1c0bfc4f33a9b17e4c094c36b9 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c @@ -0,0 +1,187 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +#define XGE_STAT(m) { #m, offsetof(struct xge_pdata, stats.m) } +#define XGE_EXTD_STAT(m, n) \ + { \ + #m, \ + n, \ + 0 \ + } + +static const struct xge_gstrings_stats gstrings_stats[] = { + XGE_STAT(rx_packets), + XGE_STAT(tx_packets), + XGE_STAT(rx_bytes), + XGE_STAT(tx_bytes), + XGE_STAT(rx_errors) +}; + +static struct xge_gstrings_extd_stats gstrings_extd_stats[] = { + XGE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64), + XGE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127), + XGE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255), + XGE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511), + XGE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K), + XGE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX), + XGE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV), + XGE_EXTD_STAT(rx_fcs_error_cntr, RFCS), + XGE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA), + XGE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA), + XGE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF), + XGE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF), + XGE_EXTD_STAT(rx_unk_opcode_cntr, RXUO), + XGE_EXTD_STAT(rx_align_err_cntr, RALN), + XGE_EXTD_STAT(rx_frame_len_err_cntr, RFLR), + XGE_EXTD_STAT(rx_code_err_cntr, RCDE), + XGE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE), + XGE_EXTD_STAT(rx_undersize_pkt_cntr, RUND), + XGE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR), + XGE_EXTD_STAT(rx_fragments_cntr, RFRG), + XGE_EXTD_STAT(rx_jabber_cntr, RJBR), + XGE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP), + XGE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA), + XGE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA), + XGE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF), + XGE_EXTD_STAT(tx_defer_pkt_cntr, TDFR), + XGE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF), + XGE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL), + XGE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL), + XGE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL), + XGE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL), + XGE_EXTD_STAT(tx_total_col_cntr, TNCL), + XGE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH), + XGE_EXTD_STAT(tx_drop_frame_cntr, TDRP), + XGE_EXTD_STAT(tx_jabber_frame_cntr, TJBR), + XGE_EXTD_STAT(tx_fcs_error_cntr, TFCS), + XGE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF), + XGE_EXTD_STAT(tx_oversize_frame_cntr, TOVR), + XGE_EXTD_STAT(tx_undersize_frame_cntr, TUND), + XGE_EXTD_STAT(tx_fragments_cntr, TFRG) +}; + +#define XGE_STATS_LEN ARRAY_SIZE(gstrings_stats) +#define XGE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats) + +static void xge_mac_get_extd_stats(struct xge_pdata *pdata) +{ + u32 data; + int i; + + for (i = 0; i < XGE_EXTD_STATS_LEN; i++) { + data = xge_rd_csr(pdata, gstrings_extd_stats[i].addr); + gstrings_extd_stats[i].value += data; + } +} + +static void xge_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct platform_device *pdev = pdata->pdev; + + strcpy(info->driver, "xgene-enet-v2"); + strcpy(info->version, XGENE_ENET_V2_VERSION); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A"); + sprintf(info->bus_info, "%s", pdev->name); +} + +static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < XGE_STATS_LEN; i++) { + memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < XGE_EXTD_STATS_LEN; i++) { + memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +} + +static int xge_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return XGE_STATS_LEN + XGE_EXTD_STATS_LEN; +} + +static void xge_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *dummy, + u64 *data) +{ + void *pdata = netdev_priv(ndev); + int i; + + for (i = 0; i < XGE_STATS_LEN; i++) + *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); + + xge_mac_get_extd_stats(pdata); + + for (i = 0; i < XGE_EXTD_STATS_LEN; i++) + *data++ = gstrings_extd_stats[i].value; +} + +static int xge_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ksettings_get(phydev, cmd); +} + +static int xge_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(phydev, cmd); +} + +static const struct ethtool_ops xge_ethtool_ops = { + .get_drvinfo = xge_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = xge_get_strings, + .get_sset_count = xge_get_sset_count, + .get_ethtool_stats = xge_get_ethtool_stats, + .get_link_ksettings = xge_get_link_ksettings, + .set_link_ksettings = xge_set_link_ksettings, +}; + +void xge_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &xge_ethtool_ops; +} diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.h b/drivers/net/ethernet/apm/xgene-v2/ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..54b48d5561b8b388fb7da4fec3f1ef9136d762e8 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.h @@ -0,0 +1,78 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_V2_ETHTOOL_H__ +#define __XGENE_ENET_V2_ETHTOOL_H__ + +struct xge_gstrings_stats { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +struct xge_gstrings_extd_stats { + char name[ETH_GSTRING_LEN]; + u32 addr; + u32 value; +}; + +#define TR64 0xa080 +#define TR127 0xa084 +#define TR255 0xa088 +#define TR511 0xa08c +#define TR1K 0xa090 +#define TRMAX 0xa094 +#define TRMGV 0xa098 +#define RFCS 0xa0a4 +#define RMCA 0xa0a8 +#define RBCA 0xa0ac +#define RXCF 0xa0b0 +#define RXPF 0xa0b4 +#define RXUO 0xa0b8 +#define RALN 0xa0bc +#define RFLR 0xa0c0 +#define RCDE 0xa0c4 +#define RCSE 0xa0c8 +#define RUND 0xa0cc +#define ROVR 0xa0d0 +#define RFRG 0xa0d4 +#define RJBR 0xa0d8 +#define RDRP 0xa0dc +#define TMCA 0xa0e8 +#define TBCA 0xa0ec +#define TXPF 0xa0f0 +#define TDFR 0xa0f4 +#define TEDF 0xa0f8 +#define TSCL 0xa0fc +#define TMCL 0xa100 +#define TLCL 0xa104 +#define TXCL 0xa108 +#define TNCL 0xa10c +#define TPFH 0xa110 +#define TDRP 0xa114 +#define TJBR 0xa118 +#define TFCS 0xa11c +#define TXCF 0xa120 +#define TOVR 0xa124 +#define TUND 0xa128 +#define TFRG 0xa12c + +void xge_set_ethtool_ops(struct net_device *ndev); + +#endif /* __XGENE_ENET_V2_ETHTOOL_H__ */ diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c new file mode 100644 index 0000000000000000000000000000000000000000..ee431e397e570ff3270a561aeae51f72f0e7a1a1 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/mac.c @@ -0,0 +1,116 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +void xge_mac_reset(struct xge_pdata *pdata) +{ + xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET); + xge_wr_csr(pdata, MAC_CONFIG_1, 0); +} + +void xge_mac_set_speed(struct xge_pdata *pdata) +{ + u32 icm0, icm2, ecm0, mc2; + u32 intf_ctrl, rgmii; + + icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0); + icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0); + ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0); + rgmii = xge_rd_csr(pdata, RGMII_REG_0); + mc2 = xge_rd_csr(pdata, MAC_CONFIG_2); + intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL); + icm2 |= CFG_WAITASYNCRD_EN; + + switch (pdata->phy_speed) { + case SPEED_10: + SET_REG_BITS(&mc2, INTF_MODE, 1); + SET_REG_BITS(&intf_ctrl, HD_MODE, 0); + SET_REG_BITS(&icm0, CFG_MACMODE, 0); + SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500); + SET_REG_BIT(&rgmii, CFG_SPEED_125, 0); + break; + case SPEED_100: + SET_REG_BITS(&mc2, INTF_MODE, 1); + SET_REG_BITS(&intf_ctrl, HD_MODE, 1); + SET_REG_BITS(&icm0, CFG_MACMODE, 1); + SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80); + SET_REG_BIT(&rgmii, CFG_SPEED_125, 0); + break; + default: + SET_REG_BITS(&mc2, INTF_MODE, 2); + SET_REG_BITS(&intf_ctrl, HD_MODE, 2); + SET_REG_BITS(&icm0, CFG_MACMODE, 2); + SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16); + SET_REG_BIT(&rgmii, CFG_SPEED_125, 1); + break; + } + + mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC; + SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32); + + xge_wr_csr(pdata, MAC_CONFIG_2, mc2); + xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl); + xge_wr_csr(pdata, RGMII_REG_0, rgmii); + xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0); + xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2); + xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0); +} + +void xge_mac_set_station_addr(struct xge_pdata *pdata) +{ + u8 *dev_addr = pdata->ndev->dev_addr; + u32 addr0, addr1; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + + xge_wr_csr(pdata, STATION_ADDR0, addr0); + xge_wr_csr(pdata, STATION_ADDR1, addr1); +} + +void xge_mac_init(struct xge_pdata *pdata) +{ + xge_mac_reset(pdata); + xge_mac_set_speed(pdata); + xge_mac_set_station_addr(pdata); +} + +void xge_mac_enable(struct xge_pdata *pdata) +{ + u32 data; + + data = xge_rd_csr(pdata, MAC_CONFIG_1); + data |= TX_EN | RX_EN; + xge_wr_csr(pdata, MAC_CONFIG_1, data); + + data = xge_rd_csr(pdata, MAC_CONFIG_1); +} + +void xge_mac_disable(struct xge_pdata *pdata) +{ + u32 data; + + data = xge_rd_csr(pdata, MAC_CONFIG_1); + data &= ~(TX_EN | RX_EN); + xge_wr_csr(pdata, MAC_CONFIG_1, data); +} diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.h b/drivers/net/ethernet/apm/xgene-v2/mac.h new file mode 100644 index 0000000000000000000000000000000000000000..3c83fa6173567e614a4a1a01254625f9ba1b187f --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/mac.h @@ -0,0 +1,107 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_V2_MAC_H__ +#define __XGENE_ENET_V2_MAC_H__ + +/* Register offsets */ +#define MAC_CONFIG_1 0xa000 +#define MAC_CONFIG_2 0xa004 +#define MII_MGMT_CONFIG 0xa020 +#define MII_MGMT_COMMAND 0xa024 +#define MII_MGMT_ADDRESS 0xa028 +#define MII_MGMT_CONTROL 0xa02c +#define MII_MGMT_STATUS 0xa030 +#define MII_MGMT_INDICATORS 0xa034 +#define INTERFACE_CONTROL 0xa038 +#define STATION_ADDR0 0xa040 +#define STATION_ADDR1 0xa044 + +#define RGMII_REG_0 0x27e0 +#define ICM_CONFIG0_REG_0 0x2c00 +#define ICM_CONFIG2_REG_0 0x2c08 +#define ECM_CONFIG0_REG_0 0x2d00 + +/* Register fields */ +#define SOFT_RESET BIT(31) +#define TX_EN BIT(0) +#define RX_EN BIT(2) +#define PAD_CRC BIT(2) +#define CRC_EN BIT(1) +#define FULL_DUPLEX BIT(0) + +#define INTF_MODE_POS 8 +#define INTF_MODE_LEN 2 +#define HD_MODE_POS 25 +#define HD_MODE_LEN 2 +#define CFG_MACMODE_POS 18 +#define CFG_MACMODE_LEN 2 +#define CFG_WAITASYNCRD_POS 0 +#define CFG_WAITASYNCRD_LEN 16 +#define CFG_SPEED_125_POS 24 +#define CFG_WFIFOFULLTHR_POS 0 +#define CFG_WFIFOFULLTHR_LEN 7 +#define MGMT_CLOCK_SEL_POS 0 +#define MGMT_CLOCK_SEL_LEN 3 +#define PHY_ADDR_POS 8 +#define PHY_ADDR_LEN 5 +#define REG_ADDR_POS 0 +#define REG_ADDR_LEN 5 +#define MII_MGMT_BUSY BIT(0) +#define MII_READ_CYCLE BIT(0) +#define CFG_WAITASYNCRD_EN BIT(16) + +static inline void xgene_set_reg_bits(u32 *var, int pos, int len, u32 val) +{ + u32 mask = GENMASK(pos + len, pos); + + *var &= ~mask; + *var |= ((val << pos) & mask); +} + +static inline u32 xgene_get_reg_bits(u32 var, int pos, int len) +{ + u32 mask = GENMASK(pos + len, pos); + + return (var & mask) >> pos; +} + +#define SET_REG_BITS(var, field, val) \ + xgene_set_reg_bits(var, field ## _POS, field ## _LEN, val) + +#define SET_REG_BIT(var, field, val) \ + xgene_set_reg_bits(var, field ## _POS, 1, val) + +#define GET_REG_BITS(var, field) \ + xgene_get_reg_bits(var, field ## _POS, field ## _LEN) + +#define GET_REG_BIT(var, field) ((var) & (field)) + +struct xge_pdata; + +void xge_mac_reset(struct xge_pdata *pdata); +void xge_mac_set_speed(struct xge_pdata *pdata); +void xge_mac_enable(struct xge_pdata *pdata); +void xge_mac_disable(struct xge_pdata *pdata); +void xge_mac_init(struct xge_pdata *pdata); +void xge_mac_set_station_addr(struct xge_pdata *pdata); + +#endif /* __XGENE_ENET_V2_MAC_H__ */ diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c new file mode 100644 index 0000000000000000000000000000000000000000..0f2ad50f3bd7831694e0b2d0f6d09b711ec3ff20 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -0,0 +1,759 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +static const struct acpi_device_id xge_acpi_match[]; + +static int xge_get_resources(struct xge_pdata *pdata) +{ + struct platform_device *pdev; + struct net_device *ndev; + int phy_mode, ret = 0; + struct resource *res; + struct device *dev; + + pdev = pdata->pdev; + dev = &pdev->dev; + ndev = pdata->ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Resource enet_csr not defined\n"); + return -ENODEV; + } + + pdata->resources.base_addr = devm_ioremap(dev, res->start, + resource_size(res)); + if (!pdata->resources.base_addr) { + dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); + return -ENOMEM; + } + + if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) + eth_hw_addr_random(ndev); + + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + phy_mode = device_get_phy_mode(dev); + if (phy_mode < 0) { + dev_err(dev, "Unable to get phy-connection-type\n"); + return phy_mode; + } + pdata->resources.phy_mode = phy_mode; + + if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) { + dev_err(dev, "Incorrect phy-connection-type specified\n"); + return -ENODEV; + } + + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "Unable to get irq\n"); + return ret; + } + pdata->resources.irq = ret; + + return 0; +} + +static int xge_refill_buffers(struct net_device *ndev, u32 nbuf) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct xge_desc_ring *ring = pdata->rx_ring; + const u8 slots = XGENE_ENET_NUM_DESC - 1; + struct device *dev = &pdata->pdev->dev; + struct xge_raw_desc *raw_desc; + u64 addr_lo, addr_hi; + u8 tail = ring->tail; + struct sk_buff *skb; + dma_addr_t dma_addr; + u16 len; + int i; + + for (i = 0; i < nbuf; i++) { + raw_desc = &ring->raw_desc[tail]; + + len = XGENE_ENET_STD_MTU; + skb = netdev_alloc_skb(ndev, len); + if (unlikely(!skb)) + return -ENOMEM; + + dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(ndev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + ring->pkt_info[tail].skb = skb; + ring->pkt_info[tail].dma_addr = dma_addr; + + addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1)); + addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1)); + raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) | + SET_BITS(NEXT_DESC_ADDRH, addr_hi) | + SET_BITS(PKT_ADDRH, + upper_32_bits(dma_addr))); + + dma_wmb(); + raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) | + SET_BITS(E, 1)); + tail = (tail + 1) & slots; + } + + ring->tail = tail; + + return 0; +} + +static int xge_init_hw(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = xge_port_reset(ndev); + if (ret) + return ret; + + xge_port_init(ndev); + pdata->nbufs = NUM_BUFS; + + return 0; +} + +static irqreturn_t xge_irq(const int irq, void *data) +{ + struct xge_pdata *pdata = data; + + if (napi_schedule_prep(&pdata->napi)) { + xge_intr_disable(pdata); + __napi_schedule(&pdata->napi); + } + + return IRQ_HANDLED; +} + +static int xge_request_irq(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + int ret; + + snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name); + + ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name, + pdata); + if (ret) + netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name); + + return ret; +} + +static void xge_free_irq(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + free_irq(pdata->resources.irq, pdata); +} + +static bool is_tx_slot_available(struct xge_raw_desc *raw_desc) +{ + if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) && + (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY)) + return true; + + return false; +} + +static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct xge_desc_ring *tx_ring; + struct xge_raw_desc *raw_desc; + static dma_addr_t dma_addr; + u64 addr_lo, addr_hi; + void *pkt_buf; + u8 tail; + u16 len; + + tx_ring = pdata->tx_ring; + tail = tx_ring->tail; + len = skb_headlen(skb); + raw_desc = &tx_ring->raw_desc[tail]; + + if (!is_tx_slot_available(raw_desc)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + /* Packet buffers should be 64B aligned */ + pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, + GFP_ATOMIC); + if (unlikely(!pkt_buf)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memcpy(pkt_buf, skb->data, len); + + addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1)); + addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1)); + raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) | + SET_BITS(NEXT_DESC_ADDRH, addr_hi) | + SET_BITS(PKT_ADDRH, + upper_32_bits(dma_addr))); + + tx_ring->pkt_info[tail].skb = skb; + tx_ring->pkt_info[tail].dma_addr = dma_addr; + tx_ring->pkt_info[tail].pkt_buf = pkt_buf; + + dma_wmb(); + + raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) | + SET_BITS(PKT_SIZE, len) | + SET_BITS(E, 0)); + skb_tx_timestamp(skb); + xge_wr_csr(pdata, DMATXCTRL, 1); + + tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1); + + return NETDEV_TX_OK; +} + +static bool is_tx_hw_done(struct xge_raw_desc *raw_desc) +{ + if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) && + !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0))) + return true; + + return false; +} + +static void xge_txc_poll(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct xge_desc_ring *tx_ring; + struct xge_raw_desc *raw_desc; + dma_addr_t dma_addr; + struct sk_buff *skb; + void *pkt_buf; + u32 data; + u8 head; + + tx_ring = pdata->tx_ring; + head = tx_ring->head; + + data = xge_rd_csr(pdata, DMATXSTATUS); + if (!GET_BITS(TXPKTCOUNT, data)) + return; + + while (1) { + raw_desc = &tx_ring->raw_desc[head]; + + if (!is_tx_hw_done(raw_desc)) + break; + + dma_rmb(); + + skb = tx_ring->pkt_info[head].skb; + dma_addr = tx_ring->pkt_info[head].dma_addr; + pkt_buf = tx_ring->pkt_info[head].pkt_buf; + pdata->stats.tx_packets++; + pdata->stats.tx_bytes += skb->len; + dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr); + dev_kfree_skb_any(skb); + + /* clear pktstart address and pktsize */ + raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) | + SET_BITS(PKT_SIZE, SLOT_EMPTY)); + xge_wr_csr(pdata, DMATXSTATUS, 1); + + head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); + } + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + tx_ring->head = head; +} + +static int xge_rx_poll(struct net_device *ndev, unsigned int budget) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct xge_desc_ring *rx_ring; + struct xge_raw_desc *raw_desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int processed = 0; + u8 head, rx_error; + int i, ret; + u32 data; + u16 len; + + rx_ring = pdata->rx_ring; + head = rx_ring->head; + + data = xge_rd_csr(pdata, DMARXSTATUS); + if (!GET_BITS(RXPKTCOUNT, data)) + return 0; + + for (i = 0; i < budget; i++) { + raw_desc = &rx_ring->raw_desc[head]; + + if (GET_BITS(E, le64_to_cpu(raw_desc->m0))) + break; + + dma_rmb(); + + skb = rx_ring->pkt_info[head].skb; + rx_ring->pkt_info[head].skb = NULL; + dma_addr = rx_ring->pkt_info[head].dma_addr; + len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)); + dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU, + DMA_FROM_DEVICE); + + rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2)); + if (unlikely(rx_error)) { + pdata->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto out; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + + pdata->stats.rx_packets++; + pdata->stats.rx_bytes += len; + napi_gro_receive(&pdata->napi, skb); +out: + ret = xge_refill_buffers(ndev, 1); + xge_wr_csr(pdata, DMARXSTATUS, 1); + xge_wr_csr(pdata, DMARXCTRL, 1); + + if (ret) + break; + + head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); + processed++; + } + + rx_ring->head = head; + + return processed; +} + +static void xge_delete_desc_ring(struct net_device *ndev, + struct xge_desc_ring *ring) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + u16 size; + + if (!ring) + return; + + size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; + if (ring->desc_addr) + dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr); + + kfree(ring->pkt_info); + kfree(ring); +} + +static void xge_free_buffers(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct xge_desc_ring *ring = pdata->rx_ring; + struct device *dev = &pdata->pdev->dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { + skb = ring->pkt_info[i].skb; + dma_addr = ring->pkt_info[i].dma_addr; + + if (!skb) + continue; + + dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void xge_delete_desc_rings(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + xge_txc_poll(ndev); + xge_delete_desc_ring(ndev, pdata->tx_ring); + + xge_rx_poll(ndev, 64); + xge_free_buffers(ndev); + xge_delete_desc_ring(ndev, pdata->rx_ring); +} + +static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct xge_desc_ring *ring; + u16 size; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return NULL; + + ring->ndev = ndev; + + size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; + ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, + GFP_KERNEL); + if (!ring->desc_addr) + goto err; + + ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info), + GFP_KERNEL); + if (!ring->pkt_info) + goto err; + + xge_setup_desc(ring); + + return ring; + +err: + xge_delete_desc_ring(ndev, ring); + + return NULL; +} + +static int xge_create_desc_rings(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct xge_desc_ring *ring; + int ret; + + /* create tx ring */ + ring = xge_create_desc_ring(ndev); + if (!ring) + goto err; + + pdata->tx_ring = ring; + xge_update_tx_desc_addr(pdata); + + /* create rx ring */ + ring = xge_create_desc_ring(ndev); + if (!ring) + goto err; + + pdata->rx_ring = ring; + xge_update_rx_desc_addr(pdata); + + ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC); + if (ret) + goto err; + + return 0; +err: + xge_delete_desc_rings(ndev); + + return -ENOMEM; +} + +static int xge_open(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = xge_create_desc_rings(ndev); + if (ret) + return ret; + + napi_enable(&pdata->napi); + ret = xge_request_irq(ndev); + if (ret) + return ret; + + xge_intr_enable(pdata); + xge_wr_csr(pdata, DMARXCTRL, 1); + + phy_start(ndev->phydev); + xge_mac_enable(pdata); + netif_start_queue(ndev); + + return 0; +} + +static int xge_close(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + netif_stop_queue(ndev); + xge_mac_disable(pdata); + phy_stop(ndev->phydev); + + xge_intr_disable(pdata); + xge_free_irq(ndev); + napi_disable(&pdata->napi); + xge_delete_desc_rings(ndev); + + return 0; +} + +static int xge_napi(struct napi_struct *napi, const int budget) +{ + struct net_device *ndev = napi->dev; + struct xge_pdata *pdata; + int processed; + + pdata = netdev_priv(ndev); + + xge_txc_poll(ndev); + processed = xge_rx_poll(ndev, budget); + + if (processed < budget) { + napi_complete_done(napi, processed); + xge_intr_enable(pdata); + } + + return processed; +} + +static int xge_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + return ret; + + xge_mac_set_station_addr(pdata); + + return 0; +} + +static bool is_tx_pending(struct xge_raw_desc *raw_desc) +{ + if (!GET_BITS(E, le64_to_cpu(raw_desc->m0))) + return true; + + return false; +} + +static void xge_free_pending_skb(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct xge_desc_ring *tx_ring; + struct xge_raw_desc *raw_desc; + dma_addr_t dma_addr; + struct sk_buff *skb; + void *pkt_buf; + int i; + + tx_ring = pdata->tx_ring; + + for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { + raw_desc = &tx_ring->raw_desc[i]; + + if (!is_tx_pending(raw_desc)) + continue; + + skb = tx_ring->pkt_info[i].skb; + dma_addr = tx_ring->pkt_info[i].dma_addr; + pkt_buf = tx_ring->pkt_info[i].pkt_buf; + dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr); + dev_kfree_skb_any(skb); + } +} + +static void xge_timeout(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + + rtnl_lock(); + + if (!netif_running(ndev)) + goto out; + + netif_stop_queue(ndev); + xge_intr_disable(pdata); + napi_disable(&pdata->napi); + + xge_wr_csr(pdata, DMATXCTRL, 0); + xge_txc_poll(ndev); + xge_free_pending_skb(ndev); + xge_wr_csr(pdata, DMATXSTATUS, ~0U); + + xge_setup_desc(pdata->tx_ring); + xge_update_tx_desc_addr(pdata); + xge_mac_init(pdata); + + napi_enable(&pdata->napi); + xge_intr_enable(pdata); + xge_mac_enable(pdata); + netif_start_queue(ndev); + +out: + rtnl_unlock(); +} + +static void xge_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct xge_stats *stats = &pdata->stats; + + storage->tx_packets += stats->tx_packets; + storage->tx_bytes += stats->tx_bytes; + + storage->rx_packets += stats->rx_packets; + storage->rx_bytes += stats->rx_bytes; + storage->rx_errors += stats->rx_errors; +} + +static const struct net_device_ops xgene_ndev_ops = { + .ndo_open = xge_open, + .ndo_stop = xge_close, + .ndo_start_xmit = xge_start_xmit, + .ndo_set_mac_address = xge_set_mac_addr, + .ndo_tx_timeout = xge_timeout, + .ndo_get_stats64 = xge_get_stats64, +}; + +static int xge_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct xge_pdata *pdata; + int ret; + + ndev = alloc_etherdev(sizeof(*pdata)); + if (!ndev) + return -ENOMEM; + + pdata = netdev_priv(ndev); + + pdata->pdev = pdev; + pdata->ndev = ndev; + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, pdata); + ndev->netdev_ops = &xgene_ndev_ops; + + ndev->features |= NETIF_F_GSO | + NETIF_F_GRO; + + ret = xge_get_resources(pdata); + if (ret) + goto err; + + ndev->hw_features = ndev->features; + xge_set_ethtool_ops(ndev); + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + netdev_err(ndev, "No usable DMA configuration\n"); + goto err; + } + + ret = xge_init_hw(ndev); + if (ret) + goto err; + + ret = xge_mdio_config(ndev); + if (ret) + goto err; + + netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + + return 0; + +err: + free_netdev(ndev); + + return ret; +} + +static int xge_remove(struct platform_device *pdev) +{ + struct xge_pdata *pdata; + struct net_device *ndev; + + pdata = platform_get_drvdata(pdev); + ndev = pdata->ndev; + + rtnl_lock(); + if (netif_running(ndev)) + dev_close(ndev); + rtnl_unlock(); + + xge_mdio_remove(ndev); + unregister_netdev(ndev); + free_netdev(ndev); + + return 0; +} + +static void xge_shutdown(struct platform_device *pdev) +{ + struct xge_pdata *pdata; + + pdata = platform_get_drvdata(pdev); + if (!pdata) + return; + + if (!pdata->ndev) + return; + + xge_remove(pdev); +} + +static const struct acpi_device_id xge_acpi_match[] = { + { "APMC0D80" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, xge_acpi_match); + +static struct platform_driver xge_driver = { + .driver = { + .name = "xgene-enet-v2", + .acpi_match_table = ACPI_PTR(xge_acpi_match), + }, + .probe = xge_probe, + .remove = xge_remove, + .shutdown = xge_shutdown, +}; +module_platform_driver(xge_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver"); +MODULE_AUTHOR("Iyappan Subramanian "); +MODULE_VERSION(XGENE_ENET_V2_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h new file mode 100644 index 0000000000000000000000000000000000000000..969b258cb7ded0cd624f5698f1d0d8d9f7a3db1e --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/main.h @@ -0,0 +1,80 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_V2_MAIN_H__ +#define __XGENE_ENET_V2_MAIN_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mac.h" +#include "enet.h" +#include "ring.h" +#include "ethtool.h" + +#define XGENE_ENET_V2_VERSION "v1.0" +#define XGENE_ENET_STD_MTU 1536 +#define XGENE_ENET_MIN_FRAME 60 +#define IRQ_ID_SIZE 16 + +struct xge_resource { + void __iomem *base_addr; + int phy_mode; + u32 irq; +}; + +struct xge_stats { + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 rx_errors; +}; + +/* ethernet private data */ +struct xge_pdata { + struct xge_resource resources; + struct xge_desc_ring *tx_ring; + struct xge_desc_ring *rx_ring; + struct platform_device *pdev; + char irq_name[IRQ_ID_SIZE]; + struct mii_bus *mdio_bus; + struct net_device *ndev; + struct napi_struct napi; + struct xge_stats stats; + int phy_speed; + u8 nbufs; +}; + +int xge_mdio_config(struct net_device *ndev); +void xge_mdio_remove(struct net_device *ndev); + +#endif /* __XGENE_ENET_V2_MAIN_H__ */ diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c new file mode 100644 index 0000000000000000000000000000000000000000..f5fe3bb2e59d9a9eecfcfb358af0a15785d69cfa --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c @@ -0,0 +1,168 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) +{ + struct xge_pdata *pdata = bus->priv; + u32 done, val = 0; + u8 wait = 10; + + SET_REG_BITS(&val, PHY_ADDR, phy_id); + SET_REG_BITS(&val, REG_ADDR, reg); + xge_wr_csr(pdata, MII_MGMT_ADDRESS, val); + + xge_wr_csr(pdata, MII_MGMT_CONTROL, data); + do { + usleep_range(5, 10); + done = xge_rd_csr(pdata, MII_MGMT_INDICATORS); + } while ((done & MII_MGMT_BUSY) && wait--); + + if (done & MII_MGMT_BUSY) { + dev_err(&bus->dev, "MII_MGMT write failed\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct xge_pdata *pdata = bus->priv; + u32 data, done, val = 0; + u8 wait = 10; + + SET_REG_BITS(&val, PHY_ADDR, phy_id); + SET_REG_BITS(&val, REG_ADDR, reg); + xge_wr_csr(pdata, MII_MGMT_ADDRESS, val); + + xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE); + do { + usleep_range(5, 10); + done = xge_rd_csr(pdata, MII_MGMT_INDICATORS); + } while ((done & MII_MGMT_BUSY) && wait--); + + if (done & MII_MGMT_BUSY) { + dev_err(&bus->dev, "MII_MGMT read failed\n"); + return -ETIMEDOUT; + } + + data = xge_rd_csr(pdata, MII_MGMT_STATUS); + xge_wr_csr(pdata, MII_MGMT_COMMAND, 0); + + return data; +} + +static void xge_adjust_link(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (phydev->link) { + if (pdata->phy_speed != phydev->speed) { + pdata->phy_speed = phydev->speed; + xge_mac_set_speed(pdata); + xge_mac_enable(pdata); + phy_print_status(phydev); + } + } else { + if (pdata->phy_speed != SPEED_UNKNOWN) { + pdata->phy_speed = SPEED_UNKNOWN; + xge_mac_disable(pdata); + phy_print_status(phydev); + } + } +} + +void xge_mdio_remove(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct mii_bus *mdio_bus = pdata->mdio_bus; + + if (ndev->phydev) + phy_disconnect(ndev->phydev); + + if (mdio_bus->state == MDIOBUS_REGISTERED) + mdiobus_unregister(mdio_bus); + + mdiobus_free(mdio_bus); +} + +int xge_mdio_config(struct net_device *ndev) +{ + struct xge_pdata *pdata = netdev_priv(ndev); + struct device *dev = &pdata->pdev->dev; + struct mii_bus *mdio_bus; + struct phy_device *phydev; + int ret; + + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus"; + mdio_bus->read = xge_mdio_read; + mdio_bus->write = xge_mdio_write; + mdio_bus->priv = pdata; + mdio_bus->parent = dev; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); + pdata->mdio_bus = mdio_bus; + + mdio_bus->phy_mask = 0x1; + ret = mdiobus_register(mdio_bus); + if (ret) + goto err; + + phydev = phy_find_first(mdio_bus); + if (!phydev) { + dev_err(dev, "no PHY found\n"); + ret = -ENODEV; + goto err; + } + phydev = phy_connect(ndev, phydev_name(phydev), + &xge_adjust_link, + pdata->resources.phy_mode); + + if (IS_ERR(phydev)) { + netdev_err(ndev, "Could not attach to PHY\n"); + ret = PTR_ERR(phydev); + goto err; + } + + phydev->supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_AUI | + SUPPORTED_MII | + SUPPORTED_FIBRE | + SUPPORTED_BNC); + phydev->advertising = phydev->supported; + pdata->phy_speed = SPEED_UNKNOWN; + + return 0; +err: + xge_mdio_remove(ndev); + + return ret; +} diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.c b/drivers/net/ethernet/apm/xgene-v2/ring.c new file mode 100644 index 0000000000000000000000000000000000000000..38810828f8f043a1f4f9031b42e70d00a2c50d94 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/ring.c @@ -0,0 +1,81 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "main.h" + +/* create circular linked list of descriptors */ +void xge_setup_desc(struct xge_desc_ring *ring) +{ + struct xge_raw_desc *raw_desc; + dma_addr_t dma_h, next_dma; + u16 offset; + int i; + + for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { + raw_desc = &ring->raw_desc[i]; + + offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1); + next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE); + + raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) | + SET_BITS(PKT_SIZE, SLOT_EMPTY)); + dma_h = upper_32_bits(next_dma); + raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) | + SET_BITS(NEXT_DESC_ADDRH, dma_h)); + } +} + +void xge_update_tx_desc_addr(struct xge_pdata *pdata) +{ + struct xge_desc_ring *ring = pdata->tx_ring; + dma_addr_t dma_addr = ring->dma_addr; + + xge_wr_csr(pdata, DMATXDESCL, dma_addr); + xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr)); + + ring->head = 0; + ring->tail = 0; +} + +void xge_update_rx_desc_addr(struct xge_pdata *pdata) +{ + struct xge_desc_ring *ring = pdata->rx_ring; + dma_addr_t dma_addr = ring->dma_addr; + + xge_wr_csr(pdata, DMARXDESCL, dma_addr); + xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr)); + + ring->head = 0; + ring->tail = 0; +} + +void xge_intr_enable(struct xge_pdata *pdata) +{ + u32 data; + + data = RX_PKT_RCVD | TX_PKT_SENT; + xge_wr_csr(pdata, DMAINTRMASK, data); +} + +void xge_intr_disable(struct xge_pdata *pdata) +{ + xge_wr_csr(pdata, DMAINTRMASK, 0); +} diff --git a/drivers/net/ethernet/apm/xgene-v2/ring.h b/drivers/net/ethernet/apm/xgene-v2/ring.h new file mode 100644 index 0000000000000000000000000000000000000000..abc8c9a84954b3840c066d2b0471dc1c15da46d8 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene-v2/ring.h @@ -0,0 +1,119 @@ +/* + * Applied Micro X-Gene SoC Ethernet v2 Driver + * + * Copyright (c) 2017, Applied Micro Circuits Corporation + * Author(s): Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_V2_RING_H__ +#define __XGENE_ENET_V2_RING_H__ + +#define XGENE_ENET_DESC_SIZE 64 +#define XGENE_ENET_NUM_DESC 256 +#define NUM_BUFS 8 +#define SLOT_EMPTY 0xfff + +#define DMATXCTRL 0xa180 +#define DMATXDESCL 0xa184 +#define DMATXDESCH 0xa1a0 +#define DMATXSTATUS 0xa188 +#define DMARXCTRL 0xa18c +#define DMARXDESCL 0xa190 +#define DMARXDESCH 0xa1a4 +#define DMARXSTATUS 0xa194 +#define DMAINTRMASK 0xa198 +#define DMAINTERRUPT 0xa19c + +#define D_POS 62 +#define D_LEN 2 +#define E_POS 63 +#define E_LEN 1 +#define PKT_ADDRL_POS 0 +#define PKT_ADDRL_LEN 32 +#define PKT_ADDRH_POS 32 +#define PKT_ADDRH_LEN 10 +#define PKT_SIZE_POS 32 +#define PKT_SIZE_LEN 12 +#define NEXT_DESC_ADDRL_POS 0 +#define NEXT_DESC_ADDRL_LEN 32 +#define NEXT_DESC_ADDRH_POS 48 +#define NEXT_DESC_ADDRH_LEN 10 + +#define TXPKTCOUNT_POS 16 +#define TXPKTCOUNT_LEN 8 +#define RXPKTCOUNT_POS 16 +#define RXPKTCOUNT_LEN 8 + +#define TX_PKT_SENT BIT(0) +#define TX_BUS_ERROR BIT(3) +#define RX_PKT_RCVD BIT(4) +#define RX_BUS_ERROR BIT(7) +#define RXSTATUS_RXPKTRCVD BIT(0) + +struct xge_raw_desc { + __le64 m0; + __le64 m1; + __le64 m2; + __le64 m3; + __le64 m4; + __le64 m5; + __le64 m6; + __le64 m7; +}; + +struct pkt_info { + struct sk_buff *skb; + dma_addr_t dma_addr; + void *pkt_buf; +}; + +/* software context of a descriptor ring */ +struct xge_desc_ring { + struct net_device *ndev; + dma_addr_t dma_addr; + u8 head; + u8 tail; + union { + void *desc_addr; + struct xge_raw_desc *raw_desc; + }; + struct pkt_info (*pkt_info); +}; + +static inline u64 xge_set_desc_bits(int pos, int len, u64 val) +{ + return (val & ((1ULL << len) - 1)) << pos; +} + +static inline u64 xge_get_desc_bits(int pos, int len, u64 src) +{ + return (src >> pos) & ((1ULL << len) - 1); +} + +#define SET_BITS(field, val) \ + xge_set_desc_bits(field ## _POS, field ## _LEN, val) + +#define GET_BITS(field, src) \ + xge_get_desc_bits(field ## _POS, field ## _LEN, src) + +void xge_setup_desc(struct xge_desc_ring *ring); +void xge_update_tx_desc_addr(struct xge_pdata *pdata); +void xge_update_rx_desc_addr(struct xge_pdata *pdata); +void xge_intr_enable(struct xge_pdata *pdata); +void xge_intr_disable(struct xge_pdata *pdata); + +#endif /* __XGENE_ENET_V2_RING_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 06e681697c1734872b2317550f17f28c306a63bc..2a835e07adfb58b2b6e4021e4b93a2555be8ff73 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -494,7 +494,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) break; } - mc2 |= FULL_DUPLEX2 | PAD_CRC; + mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK; xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); @@ -623,6 +623,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + CFG_CLE_IP_HDR_LEN_SET(&cb, 0); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 5f83037bb96b5f4e78b3ad9e7afc110112fe5ff3..d250bfe94d24cb8f2a3a1f19487ac98a9659332e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -163,6 +163,7 @@ enum xgene_enet_rm { #define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5) #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) #define CFG_CLE_NXTFPSEL0_SET(dst, val) xgene_set_bits(dst, val, 20, 4) @@ -215,6 +216,7 @@ enum xgene_enet_rm { #define ENET_GHD_MODE BIT(26) #define FULL_DUPLEX2 BIT(0) #define PAD_CRC BIT(2) +#define LENGTH_CHK BIT(4) #define SCAN_AUTO_INCR BIT(5) #define TBYT_ADDR 0x38 #define TPKT_ADDR 0x39 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index b3568c453b1451f179a3c6ebe18ac524825840ac..5f37ed3506d571d6ddaad170f2147f430a88e51c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -601,14 +601,24 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static void xgene_enet_skip_csum(struct sk_buff *skb) +static void xgene_enet_rx_csum(struct sk_buff *skb) { + struct net_device *ndev = skb->dev; struct iphdr *iph = ip_hdr(skb); - if (!ip_is_fragment(iph) || - (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } + if (!(ndev->features & NETIF_F_RXCSUM)) + return; + + if (skb->protocol != htons(ETH_P_IP)) + return; + + if (ip_is_fragment(iph)) + return; + + if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) + return; + + skb->ip_summed = CHECKSUM_UNNECESSARY; } static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, @@ -648,12 +658,24 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, buf_pool->head = head; } +/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */ +static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status) +{ + if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) { + if (ntohs(eth_hdr(skb)->h_proto) < 46) + return true; + } + + return false; +} + static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, struct xgene_enet_raw_desc *raw_desc, struct xgene_enet_raw_desc *exp_desc) { struct xgene_enet_desc_ring *buf_pool, *page_pool; u32 datalen, frag_size, skb_index; + struct xgene_enet_pdata *pdata; struct net_device *ndev; dma_addr_t dma_addr; struct sk_buff *skb; @@ -666,6 +688,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, bool nv; ndev = rx_ring->ndev; + pdata = netdev_priv(ndev); dev = ndev_to_dev(rx_ring->ndev); buf_pool = rx_ring->buf_pool; page_pool = rx_ring->page_pool; @@ -676,30 +699,29 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, skb = buf_pool->rx_skb[skb_index]; buf_pool->rx_skb[skb_index] = NULL; + datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); + skb_put(skb, datalen); + prefetch(skb->data - NET_IP_ALIGN); + skb->protocol = eth_type_trans(skb, ndev); + /* checking for error */ - status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || + status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) | GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); - if (unlikely(status > 2)) { - dev_kfree_skb_any(skb); - xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); - xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), - status); - ret = -EIO; - goto out; + if (unlikely(status)) { + if (!xgene_enet_errata_10GE_8(skb, datalen, status)) { + dev_kfree_skb_any(skb); + xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); + xgene_enet_parse_error(rx_ring, pdata, status); + goto out; + } } - /* strip off CRC as HW isn't doing this */ - datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); - nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); - if (!nv) + if (!nv) { + /* strip off CRC as HW isn't doing this */ datalen -= 4; - - skb_put(skb, datalen); - prefetch(skb->data - NET_IP_ALIGN); - - if (!nv) goto skip_jumbo; + } slots = page_pool->slots - 1; head = page_pool->head; @@ -728,11 +750,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, skip_jumbo: skb_checksum_none_assert(skb); - skb->protocol = eth_type_trans(skb, ndev); - if (likely((ndev->features & NETIF_F_IP_CSUM) && - skb->protocol == htons(ETH_P_IP))) { - xgene_enet_skip_csum(skb); - } + xgene_enet_rx_csum(skb); rx_ring->rx_packets++; rx_ring->rx_bytes += datalen; @@ -2039,7 +2057,7 @@ static int xgene_enet_probe(struct platform_device *pdev) xgene_enet_setup_ops(pdata); if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { - ndev->features |= NETIF_F_TSO; + ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM; spin_lock_init(&pdata->mss_lock); } ndev->hw_features = ndev->features; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 52571741da9f5559145e5ce172a7ee910818bbc4..0d4be2425ebc2c24b4ecfa26fc4637f76ff76f91 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -41,6 +41,7 @@ #include "../../../phy/mdio-xgene.h" #define XGENE_DRV_VERSION "v1.0" +#define ETHER_MIN_PACKET 64 #define XGENE_ENET_STD_MTU 1536 #define XGENE_ENET_MAX_MTU 9600 #define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index ece19e6d68e3bc53e0dddc8dbbb65e1bdf4af0e1..423240c97d398735d649d401dd6c1825911735cf 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -341,8 +341,15 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + /* Errata 10GE_1 - FIFO threshold default value incorrect */ + RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH); xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); + /* Errata 10GE_1 - FIFO threshold default value incorrect */ + xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data); + RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH); + xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data); + xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); data |= BIT(12); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index 03b847ad89370ec2f79d2b731f9dbc24760a7224..e644a429ebf448dbba856b40a3bec57912cf1b8c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -65,6 +65,11 @@ #define XG_DEF_PAUSE_THRES 0x390 #define XG_DEF_PAUSE_OFF_THRES 0x2c0 #define XG_RSIF_CONFIG_REG_ADDR 0x00a0 +#define XG_RSIF_CLE_BUFF_THRESH 0x3 +#define RSIF_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 3) +#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8 +#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1 +#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2) #define XCLE_BYPASS_REG0_ADDR 0x0160 #define XCLE_BYPASS_REG1_ADDR 0x0164 #define XG_CFG_BYPASS_ADDR 0x0204 diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 23873395f100b06763e03e12b6a91685aba71bbf..68de2f2652f2bdb562793d9ca8810f06bded1088 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -434,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev) /* Enable EMAC */ arc_reg_or(priv, R_CTRL, EN_MASK); - phy_start_aneg(ndev->phydev); + phy_start(ndev->phydev); netif_start_queue(ndev); @@ -556,6 +556,8 @@ static int arc_emac_stop(struct net_device *ndev) napi_disable(&priv->napi); netif_stop_queue(ndev); + phy_stop(ndev->phydev); + /* Disable interrupts */ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index d4a409139ea2c9cc13a03d7d63511d322210cc7a..78c5de467426f1e4276cebe8ee81cc0091d4c6fa 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -102,9 +102,6 @@ struct alx_napi { #define ALX_MAX_NAPIS 8 -#define ALX_FLAG_USING_MSIX BIT(0) -#define ALX_FLAG_USING_MSI BIT(1) - struct alx_priv { struct net_device *dev; @@ -112,7 +109,6 @@ struct alx_priv { /* msi-x vectors */ int num_vec; - struct msix_entry *msix_entries; /* all descriptor memory */ struct { @@ -139,8 +135,6 @@ struct alx_priv { u16 msg_enable; - int flags; - /* protects hw.stats */ spinlock_t stats_lock; }; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 6a27c266267587685956b8720cd0a6bc9b3fcb69..a8c2db881b75354f6297094ddedbd26773e65601 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -314,7 +314,7 @@ static int alx_poll(struct napi_struct *napi, int budget) napi_complete_done(&np->napi, work); /* enable interrupt */ - if (alx->flags & ALX_FLAG_USING_MSIX) { + if (alx->hw.pdev->msix_enabled) { alx_mask_msix(hw, np->vec_idx, false); } else { spin_lock_irqsave(&alx->irq_lock, flags); @@ -811,7 +811,7 @@ static void alx_config_vector_mapping(struct alx_priv *alx) u32 tbl[2] = {0, 0}; int i, vector, idx, shift; - if (alx->flags & ALX_FLAG_USING_MSIX) { + if (alx->hw.pdev->msix_enabled) { /* tx mappings */ for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) { idx = txq_vec_mapping_shift[i * 2]; @@ -828,29 +828,19 @@ static void alx_config_vector_mapping(struct alx_priv *alx) alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); } -static bool alx_enable_msix(struct alx_priv *alx) +static int alx_enable_msix(struct alx_priv *alx) { - int i, err, num_vec, num_txq, num_rxq; + int err, num_vec, num_txq, num_rxq; num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES); num_rxq = 1; num_vec = max_t(int, num_txq, num_rxq) + 1; - alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry), - GFP_KERNEL); - if (!alx->msix_entries) { - netdev_warn(alx->dev, "Allocation of msix entries failed!\n"); - return false; - } - - for (i = 0; i < num_vec; i++) - alx->msix_entries[i].entry = i; - - err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec); + err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec, + PCI_IRQ_MSIX); if (err) { - kfree(alx->msix_entries); netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n"); - return false; + return err; } alx->num_vec = num_vec; @@ -858,7 +848,7 @@ static bool alx_enable_msix(struct alx_priv *alx) alx->num_txq = num_txq; alx->num_rxq = num_rxq; - return true; + return err; } static int alx_request_msix(struct alx_priv *alx) @@ -866,7 +856,7 @@ static int alx_request_msix(struct alx_priv *alx) struct net_device *netdev = alx->dev; int i, err, vector = 0, free_vector = 0; - err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc, + err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc, 0, netdev->name, alx); if (err) goto out_err; @@ -889,7 +879,7 @@ static int alx_request_msix(struct alx_priv *alx) sprintf(np->irq_lbl, "%s-unused", netdev->name); np->vec_idx = vector; - err = request_irq(alx->msix_entries[vector].vector, + err = request_irq(pci_irq_vector(alx->hw.pdev, vector), alx_intr_msix_ring, 0, np->irq_lbl, np); if (err) goto out_free; @@ -897,47 +887,31 @@ static int alx_request_msix(struct alx_priv *alx) return 0; out_free: - free_irq(alx->msix_entries[free_vector++].vector, alx); + free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx); vector--; for (i = 0; i < vector; i++) - free_irq(alx->msix_entries[free_vector++].vector, + free_irq(pci_irq_vector(alx->hw.pdev,free_vector++), alx->qnapi[i]); out_err: return err; } -static void alx_init_intr(struct alx_priv *alx, bool msix) +static int alx_init_intr(struct alx_priv *alx) { - if (msix) { - if (alx_enable_msix(alx)) - alx->flags |= ALX_FLAG_USING_MSIX; - } + int ret; - if (!(alx->flags & ALX_FLAG_USING_MSIX)) { - alx->num_vec = 1; - alx->num_napi = 1; - alx->num_txq = 1; - alx->num_rxq = 1; - - if (!pci_enable_msi(alx->hw.pdev)) - alx->flags |= ALX_FLAG_USING_MSI; - } -} - -static void alx_disable_advanced_intr(struct alx_priv *alx) -{ - if (alx->flags & ALX_FLAG_USING_MSIX) { - kfree(alx->msix_entries); - pci_disable_msix(alx->hw.pdev); - alx->flags &= ~ALX_FLAG_USING_MSIX; - } + ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (ret) + return ret; - if (alx->flags & ALX_FLAG_USING_MSI) { - pci_disable_msi(alx->hw.pdev); - alx->flags &= ~ALX_FLAG_USING_MSI; - } + alx->num_vec = 1; + alx->num_napi = 1; + alx->num_txq = 1; + alx->num_rxq = 1; + return 0; } static void alx_irq_enable(struct alx_priv *alx) @@ -950,10 +924,11 @@ static void alx_irq_enable(struct alx_priv *alx) alx_write_mem32(hw, ALX_IMR, alx->int_mask); alx_post_write(hw); - if (alx->flags & ALX_FLAG_USING_MSIX) + if (alx->hw.pdev->msix_enabled) { /* enable all msix irqs */ for (i = 0; i < alx->num_vec; i++) alx_mask_msix(hw, i, false); + } } static void alx_irq_disable(struct alx_priv *alx) @@ -965,13 +940,13 @@ static void alx_irq_disable(struct alx_priv *alx) alx_write_mem32(hw, ALX_IMR, 0); alx_post_write(hw); - if (alx->flags & ALX_FLAG_USING_MSIX) { + if (alx->hw.pdev->msix_enabled) { for (i = 0; i < alx->num_vec; i++) { alx_mask_msix(hw, i, true); - synchronize_irq(alx->msix_entries[i].vector); + synchronize_irq(pci_irq_vector(alx->hw.pdev, i)); } } else { - synchronize_irq(alx->hw.pdev->irq); + synchronize_irq(pci_irq_vector(alx->hw.pdev, 0)); } } @@ -981,8 +956,11 @@ static int alx_realloc_resources(struct alx_priv *alx) alx_free_rings(alx); alx_free_napis(alx); - alx_disable_advanced_intr(alx); - alx_init_intr(alx, false); + pci_free_irq_vectors(alx->hw.pdev); + + err = alx_init_intr(alx); + if (err) + return err; err = alx_alloc_napis(alx); if (err) @@ -1004,7 +982,7 @@ static int alx_request_irq(struct alx_priv *alx) msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; - if (alx->flags & ALX_FLAG_USING_MSIX) { + if (alx->hw.pdev->msix_enabled) { alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl); err = alx_request_msix(alx); if (!err) @@ -1016,20 +994,20 @@ static int alx_request_irq(struct alx_priv *alx) goto out; } - if (alx->flags & ALX_FLAG_USING_MSI) { + if (alx->hw.pdev->msi_enabled) { alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl | ALX_MSI_MASK_SEL_LINE); - err = request_irq(pdev->irq, alx_intr_msi, 0, + err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0, alx->dev->name, alx); if (!err) goto out; + /* fall back to legacy interrupt */ - alx->flags &= ~ALX_FLAG_USING_MSI; - pci_disable_msi(alx->hw.pdev); + pci_free_irq_vectors(alx->hw.pdev); } alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); - err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, + err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED, alx->dev->name, alx); out: if (!err) @@ -1042,18 +1020,15 @@ static int alx_request_irq(struct alx_priv *alx) static void alx_free_irq(struct alx_priv *alx) { struct pci_dev *pdev = alx->hw.pdev; - int i, vector = 0; + int i; - if (alx->flags & ALX_FLAG_USING_MSIX) { - free_irq(alx->msix_entries[vector++].vector, alx); + free_irq(pci_irq_vector(pdev, 0), alx); + if (alx->hw.pdev->msix_enabled) { for (i = 0; i < alx->num_napi; i++) - free_irq(alx->msix_entries[vector++].vector, - alx->qnapi[i]); - } else { - free_irq(pdev->irq, alx); + free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]); } - alx_disable_advanced_intr(alx); + pci_free_irq_vectors(pdev); } static int alx_identify_hw(struct alx_priv *alx) @@ -1221,7 +1196,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) { int err; - alx_init_intr(alx, true); + err = alx_enable_msix(alx); + if (err < 0) { + err = alx_init_intr(alx); + if (err) + return err; + } if (!resume) netif_carrier_off(alx->dev); @@ -1264,7 +1244,7 @@ static int __alx_open(struct alx_priv *alx, bool resume) alx_free_rings(alx); alx_free_napis(alx); out_disable_adv_intr: - alx_disable_advanced_intr(alx); + pci_free_irq_vectors(alx->hw.pdev); return err; } @@ -1637,11 +1617,11 @@ static void alx_poll_controller(struct net_device *netdev) struct alx_priv *alx = netdev_priv(netdev); int i; - if (alx->flags & ALX_FLAG_USING_MSIX) { + if (alx->hw.pdev->msix_enabled) { alx_intr_msix_misc(0, alx); for (i = 0; i < alx->num_txq; i++) alx_intr_msix_ring(0, alx->qnapi[i]); - } else if (alx->flags & ALX_FLAG_USING_MSI) + } else if (alx->hw.pdev->msi_enabled) alx_intr_msi(0, alx); else alx_intr_legacy(0, alx); @@ -1783,7 +1763,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &alx_netdev_ops; netdev->ethtool_ops = &alx_ethtool_ops; - netdev->irq = pdev->irq; + netdev->irq = pci_irq_vector(pdev, 0); netdev->watchdog_timeo = ALX_WATCHDOG_TIME; if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 022772e1e24988deec39b46c69a45349d7dba055..83d2db2abb45535c43f5921ab75791f9869ed4c0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1886,7 +1886,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) buffer_info->skb = skb; buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; + offset = offset_in_page(skb->data); buffer_info->dma = pci_map_page(pdev, page, offset, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); @@ -2230,7 +2230,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); buffer_info->length = hdr_len; page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; + offset = offset_in_page(skb->data); buffer_info->dma = pci_map_page(adapter->pdev, page, offset, hdr_len, PCI_DMA_TODEVICE); @@ -2254,9 +2254,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, data_len -= buffer_info->length; page = virt_to_page(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); - offset = (unsigned long)(skb->data + - (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & - ~PAGE_MASK; + offset = offset_in_page(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buffer_info->length, PCI_DMA_TODEVICE); @@ -2268,7 +2267,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, /* not TSO */ buffer_info->length = buf_len; page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; + offset = offset_in_page(skb->data); buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buf_len, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 940fb24bba210ecd73f968082fefda0697628be1..96413808c72699319573e82481b73f587db612d8 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -109,7 +109,6 @@ config TIGON3 tristate "Broadcom Tigon3 support" depends on PCI select PHYLIB - select HWMON imply PTP_1588_CLOCK ---help--- This driver supports Broadcom Tigon3 based gigabit Ethernet cards. @@ -117,6 +116,13 @@ config TIGON3 To compile this driver as a module, choose M here: the module will be called tg3. This is recommended. +config TIGON3_HWMON + bool "Broadcom Tigon3 HWMON support" + default y + depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m) + ---help--- + Say Y if you want to expose the thermal sensor on Tigon3 devices. + config BNX2X tristate "Broadcom NetXtremeII 10Gb support" depends on PCI diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a68d4889f5db74d895f1bfb9e74c46bd2b892dbc..099b374c1b17bbd8e9cabe68cdc7cd991a258737 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -284,6 +285,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), + /* Per TX-queue statistics are dynamically appended */ }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -338,7 +340,8 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) continue; j++; } - return j; + /* Include per-queue statistics */ + return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; default: return -EOPNOTSUPP; } @@ -349,6 +352,7 @@ static void bcm_sysport_get_strings(struct net_device *dev, { struct bcm_sysport_priv *priv = netdev_priv(dev); const struct bcm_sysport_stats *s; + char buf[128]; int i, j; switch (stringset) { @@ -363,6 +367,18 @@ static void bcm_sysport_get_strings(struct net_device *dev, ETH_GSTRING_LEN); j++; } + + for (i = 0; i < dev->num_tx_queues; i++) { + snprintf(buf, sizeof(buf), "txq%d_packets", i); + memcpy(data + j * ETH_GSTRING_LEN, buf, + ETH_GSTRING_LEN); + j++; + + snprintf(buf, sizeof(buf), "txq%d_bytes", i); + memcpy(data + j * ETH_GSTRING_LEN, buf, + ETH_GSTRING_LEN); + j++; + } break; default: break; @@ -418,6 +434,7 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *ring; int i, j; if (netif_running(dev)) @@ -436,6 +453,22 @@ static void bcm_sysport_get_stats(struct net_device *dev, data[j] = *(unsigned long *)p; j++; } + + /* For SYSTEMPORT Lite since we have holes in our statistics, j would + * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it + * needs to point to how many total statistics we have minus the + * number of per TX queue statistics + */ + j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; + + for (i = 0; i < dev->num_tx_queues; i++) { + ring = &priv->tx_rings[i]; + data[j] = ring->packets; + j++; + data[j] = ring->bytes; + j++; + } } static void bcm_sysport_get_wol(struct net_device *dev, @@ -637,6 +670,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, u16 len, status; struct bcm_rsb *rsb; + /* Clear status before servicing to reduce spurious interrupts */ + intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); + /* Determine how much we should process since last call, SYSTEMPORT Lite * groups the producer and consumer indexes into the same 32-bit * which we access using RDMA_CONS_INDEX @@ -647,11 +683,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, p_index = rdma_readl(priv, RDMA_CONS_INDEX); p_index &= RDMA_PROD_INDEX_MASK; - if (p_index < priv->rx_c_index) - to_process = (RDMA_CONS_INDEX_MASK + 1) - - priv->rx_c_index + p_index; - else - to_process = p_index - priv->rx_c_index; + to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; netif_dbg(priv, rx_status, ndev, "p_index=%d rx_c_index=%d to_process=%d\n", @@ -746,26 +778,26 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, return processed; } -static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, +static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, struct bcm_sysport_cb *cb, unsigned int *bytes_compl, unsigned int *pkts_compl) { + struct bcm_sysport_priv *priv = ring->priv; struct device *kdev = &priv->pdev->dev; - struct net_device *ndev = priv->netdev; if (cb->skb) { - ndev->stats.tx_bytes += cb->skb->len; + ring->bytes += cb->skb->len; *bytes_compl += cb->skb->len; dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); - ndev->stats.tx_packets++; + ring->packets++; (*pkts_compl)++; bcm_sysport_free_cb(cb); /* SKB fragment */ } else if (dma_unmap_addr(cb, dma_addr)) { - ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); + ring->bytes += dma_unmap_len(cb, dma_len); dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); @@ -782,6 +814,13 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_cb *cb; u32 hw_ind; + /* Clear status before servicing to reduce spurious interrupts */ + if (!ring->priv->is_lite) + intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); + else + intrl2_0_writel(ring->priv, BIT(ring->index + + INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); + /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; @@ -803,7 +842,7 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, while (last_tx_cn-- > 0) { cb = ring->cbs + last_c_index; - bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); + bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); ring->desc_count++; last_c_index++; @@ -1632,6 +1671,24 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) return 0; } +static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + struct bcm_sysport_tx_ring *ring; + unsigned int q; + + for (q = 0; q < dev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; + tx_bytes += ring->bytes; + tx_packets += ring->packets; + } + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + return &dev->stats; +} + static void bcm_sysport_netif_start(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -1893,6 +1950,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_sysport_poll_controller, #endif + .ndo_get_stats = bcm_sysport_get_nstats, }; #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 863ddd7870b77d2ce963685098fff71211f395a8..77a51c167a694734b5983f524464e9b94725e1d7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -647,6 +647,9 @@ enum bcm_sysport_stat_type { .reg_offset = ofs, \ } +/* TX bytes and packets */ +#define NUM_SYSPORT_TXQ_STAT 2 + struct bcm_sysport_stats { char stat_string[ETH_GSTRING_LEN]; int stat_sizeof; @@ -690,6 +693,8 @@ struct bcm_sysport_tx_ring { struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ + unsigned long packets; /* packets statistics */ + unsigned long bytes; /* bytes statistics */ }; /* Driver private structure */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index d59cfcc4c4d596d48957ecd06a77cf8a475090b7..6322594ab2600ac100a63140c9420855f07c86dc 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "bgmac.h" static inline bool bgmac_is_bcm4707_family(struct bcma_device *core) @@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core) struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; - u8 *mac; + const u8 *mac = NULL; int err; bgmac = bgmac_alloc(&core->dev); @@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core) bcma_set_drvdata(core, bgmac); - switch (core->core_unit) { - case 0: - mac = sprom->et0mac; - break; - case 1: - mac = sprom->et1mac; - break; - case 2: - mac = sprom->et2mac; - break; - default: - dev_err(bgmac->dev, "Unsupported core_unit %d\n", - core->core_unit); - err = -ENOTSUPP; - goto err; + if (bgmac->dev->of_node) + mac = of_get_mac_address(bgmac->dev->of_node); + + /* If no MAC address assigned via device tree, check SPROM */ + if (!mac) { + switch (core->core_unit) { + case 0: + mac = sprom->et0mac; + break; + case 1: + mac = sprom->et1mac; + break; + case 2: + mac = sprom->et2mac; + break; + default: + dev_err(bgmac->dev, "Unsupported core_unit %d\n", + core->core_unit); + err = -ENOTSUPP; + goto err; + } } ether_addr_copy(bgmac->net_dev->dev_addr, mac); @@ -192,36 +199,50 @@ static int bgmac_probe(struct bcma_device *core) goto err1; } - bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & - BGMAC_BFL_ENETROBO); + bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO); if (bgmac->has_robosw) dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n"); - if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) + if (sprom->boardflags_lo & BGMAC_BFL_ENETADM) dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n"); /* Feature Flags */ - switch (core->bus->chipinfo.id) { + switch (ci->id) { + /* BCM 471X/535X family */ + case BCMA_CHIP_ID_BCM4716: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + /* fallthrough */ + case BCMA_CHIP_ID_BCM47162: + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + break; case BCMA_CHIP_ID_BCM5357: + case BCMA_CHIP_ID_BCM53572: bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) { - bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + if (ci->pkg == BCMA_PKG_ID_BCM47188 || + ci->pkg == BCMA_PKG_ID_BCM47186) { bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; } - if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358) + if (ci->pkg == BCMA_PKG_ID_BCM5358) bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; break; - case BCMA_CHIP_ID_BCM53572: - bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + case BCMA_CHIP_ID_BCM53573: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; - bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; - bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) { - bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + if (ci->pkg == BCMA_PKG_ID_BCM47189) bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + if (core->core_unit == 0) { + bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= + BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII; + } else if (core->core_unit == 1) { + bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6; + bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII; } break; case BCMA_CHIP_ID_BCM4749: @@ -229,18 +250,11 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (core->bus->chipinfo.pkg == 10) { + if (ci->pkg == 10) { bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; } break; - case BCMA_CHIP_ID_BCM4716: - bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; - /* fallthrough */ - case BCMA_CHIP_ID_BCM47162: - bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; - bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; - break; /* bcm4707_family */ case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM47094: @@ -249,21 +263,6 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; break; - case BCMA_CHIP_ID_BCM53573: - bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; - bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; - if (ci->pkg == BCMA_PKG_ID_BCM47189) - bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; - if (core->core_unit == 0) { - bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE; - if (ci->pkg == BCMA_PKG_ID_BCM47189) - bgmac->feature_flags |= - BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII; - } else if (core->core_unit == 1) { - bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6; - bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII; - } - break; default: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index da1b8b225eb9d31e001435ee33c7362f3c5565b5..73aca97a96bc70fb1e79f04ec2585589d0616cc5 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -21,8 +21,12 @@ #include #include "bgmac.h" +#define NICPM_PADRING_CFG 0x00000004 #define NICPM_IOMUX_CTRL 0x00000008 +#define NICPM_PADRING_CFG_INIT_VAL 0x74000000 +#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000 + #define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000 #define NICPM_IOMUX_CTRL_SPD_SHIFT 10 #define NICPM_IOMUX_CTRL_SPD_10M 0 @@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) if (!bgmac->plat.nicpm_base) return; + /* SET RGMII IO CONFIG */ + writel(NICPM_PADRING_CFG_INIT_VAL, + bgmac->plat.nicpm_base + NICPM_PADRING_CFG); + val = NICPM_IOMUX_CTRL_INIT_VAL; switch (bgmac->net_dev->phydev->speed) { default: @@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int bgmac_suspend(struct device *dev) +{ + struct bgmac *bgmac = dev_get_drvdata(dev); + + return bgmac_enet_suspend(bgmac); +} + +static int bgmac_resume(struct device *dev) +{ + struct bgmac *bgmac = dev_get_drvdata(dev); + + return bgmac_enet_resume(bgmac); +} + +static const struct dev_pm_ops bgmac_pm_ops = { + .suspend = bgmac_suspend, + .resume = bgmac_resume +}; + +#define BGMAC_PM_OPS (&bgmac_pm_ops) +#else +#define BGMAC_PM_OPS NULL +#endif /* CONFIG_PM */ + static const struct of_device_id bgmac_of_enet_match[] = { {.compatible = "brcm,amac",}, {.compatible = "brcm,nsp-amac",}, @@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = { .driver = { .name = "bgmac-enet", .of_match_table = bgmac_of_enet_match, + .pm = BGMAC_PM_OPS }, .probe = bgmac_probe, .remove = bgmac_remove, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index fd66fca00e0177aa6f2ebf8e01d59420cb14353e..ba4d2e145bb9bb81c32ade2e3855743e493c1c59 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -1480,6 +1481,7 @@ int bgmac_enet_probe(struct bgmac *bgmac) net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); + dev_set_drvdata(bgmac->dev, bgmac); if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", @@ -1552,5 +1554,55 @@ void bgmac_enet_remove(struct bgmac *bgmac) } EXPORT_SYMBOL_GPL(bgmac_enet_remove); +int bgmac_enet_suspend(struct bgmac *bgmac) +{ + if (!netif_running(bgmac->net_dev)) + return 0; + + phy_stop(bgmac->net_dev->phydev); + + netif_stop_queue(bgmac->net_dev); + + napi_disable(&bgmac->napi); + + netif_tx_lock(bgmac->net_dev); + netif_device_detach(bgmac->net_dev); + netif_tx_unlock(bgmac->net_dev); + + bgmac_chip_intrs_off(bgmac); + bgmac_chip_reset(bgmac); + bgmac_dma_cleanup(bgmac); + + return 0; +} +EXPORT_SYMBOL_GPL(bgmac_enet_suspend); + +int bgmac_enet_resume(struct bgmac *bgmac) +{ + int rc; + + if (!netif_running(bgmac->net_dev)) + return 0; + + rc = bgmac_dma_init(bgmac); + if (rc) + return rc; + + bgmac_chip_init(bgmac); + + napi_enable(&bgmac->napi); + + netif_tx_lock(bgmac->net_dev); + netif_device_attach(bgmac->net_dev); + netif_tx_unlock(bgmac->net_dev); + + netif_start_queue(bgmac->net_dev); + + phy_start(bgmac->net_dev->phydev); + + return 0; +} +EXPORT_SYMBOL_GPL(bgmac_enet_resume); + MODULE_AUTHOR("Rafał Miłecki"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 6d1c6ff1ed963ef6bd672b8cd792b7b529c09b27..c1818766c501f8a33b309495b02566c2c1a1cb5e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -402,7 +402,7 @@ #define BGMAC_WEIGHT 64 -#define ETHER_MAX_LEN 1518 +#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN) /* Feature Flags */ #define BGMAC_FEAT_TX_MASK_SETUP BIT(0) @@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac); void bgmac_enet_remove(struct bgmac *bgmac); void bgmac_adjust_link(struct net_device *net_dev); int bgmac_phy_connect_direct(struct bgmac *bgmac); +int bgmac_enet_suspend(struct bgmac *bgmac); +int bgmac_enet_resume(struct bgmac *bgmac); struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac); void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9e8c06130c092d3f061089448797c1da74e15043..eccb3d1b6abb748c14567d0efcdc28a405b64fb5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2021,6 +2021,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVERHEAD + mtu + BNX2X_FW_RX_ALIGN_END; + fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; @@ -4277,7 +4278,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, { if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return bnx2x_setup_tc(dev, tc->tc); + + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnx2x_setup_tc(dev, tc->mqprio->num_tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index b209b7f6093e29da98be58312d9c375c04125ab3..7dd83d0ef0a0be93e63576d1ba1c5c9f30cdb63f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6161,103 +6161,40 @@ static void bnx2x_link_int_ack(struct link_params *params, } } +static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) +{ + str[0] = '\0'; + (*len)--; + return 0; +} + static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) { - u8 *str_ptr = str; - u32 mask = 0xf0000000; - u8 shift = 8*4; - u8 digit; - u8 remove_leading_zeros = 1; + u16 ret; + if (*len < 10) { /* Need more than 10chars for this format */ - *str_ptr = '\0'; - (*len)--; + bnx2x_null_format_ver(num, str, len); return -EINVAL; } - while (shift > 0) { - shift -= 4; - digit = ((num & mask) >> shift); - if (digit == 0 && remove_leading_zeros) { - *str_ptr = '0'; - } else { - if (digit < 0xa) - *str_ptr = digit + '0'; - else - *str_ptr = digit - 0xa + 'a'; - - remove_leading_zeros = 0; - str_ptr++; - (*len)--; - } - mask = mask >> 4; - if (shift == 4*4) { - if (remove_leading_zeros) { - str_ptr++; - (*len)--; - } - *str_ptr = '.'; - str_ptr++; - (*len)--; - remove_leading_zeros = 1; - } - } - if (remove_leading_zeros) - (*len)--; + ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num); + *len -= ret; return 0; } static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len) { - u8 *str_ptr = str; - u32 mask = 0x00f00000; - u8 shift = 8*3; - u8 digit; - u8 remove_leading_zeros = 1; + u16 ret; if (*len < 10) { /* Need more than 10chars for this format */ - *str_ptr = '\0'; - (*len)--; + bnx2x_null_format_ver(num, str, len); return -EINVAL; } - while (shift > 0) { - shift -= 4; - digit = ((num & mask) >> shift); - if (digit == 0 && remove_leading_zeros) { - *str_ptr = '0'; - } else { - if (digit < 0xa) - *str_ptr = digit + '0'; - else - *str_ptr = digit - 0xa + 'a'; - - remove_leading_zeros = 0; - str_ptr++; - (*len)--; - } - mask = mask >> 4; - if ((shift == 4*4) || (shift == 4*2)) { - if (remove_leading_zeros) { - str_ptr++; - (*len)--; - } - *str_ptr = '.'; - str_ptr++; - (*len)--; - remove_leading_zeros = 1; - } - } - if (remove_leading_zeros) - (*len)--; - return 0; -} - -static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) -{ - str[0] = '\0'; - (*len)--; + ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num); + *len -= ret; return 0; } @@ -10681,22 +10618,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len) { - int status = 0; u32 num; num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) | ((raw_ver & 0xF000) >> 12); - status = bnx2x_3_seq_format_ver(num, str, len); - return status; + return bnx2x_3_seq_format_ver(num, str, len); } static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) { - int status = 0; u32 spirom_ver; + spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); - status = bnx2x_format_ver(spirom_ver, str, len); - return status; + return bnx2x_format_ver(spirom_ver, str, len); } static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, @@ -12547,13 +12481,12 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, u32 shmem2_base, u8 port, struct bnx2x_phy *phy) { - int status = 0; phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; if (phy_index == INT_PHY) return bnx2x_populate_int_phy(bp, shmem_base, port, phy); - status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, + + return bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, port, phy); - return status; } static void bnx2x_phy_def_cfg(struct link_params *params, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1f1e54ba0ecb31ffd053161e458b4d6817cb510b..b3ba66032980274c49313c580ec2dd6590ae77a6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4482,9 +4482,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif - if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) & - FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)) - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (BNXT_PF(bp)) { + u16 flags = le16_to_cpu(resp->flags); + + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) + bp->flags |= BNXT_FLAG_MULTI_HOST; + } switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -4549,6 +4555,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + if (resp->flags & + cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)) + bp->flags |= BNXT_FLAG_WOL_CAP; } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -5198,9 +5207,10 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { #if defined(CONFIG_BNXT_SRIOV) if (BNXT_VF(bp)) - return bp->vf.max_irqs; + return min_t(unsigned int, bp->vf.max_irqs, + bp->vf.max_cp_rings); #endif - return bp->pf.max_irqs; + return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings); } void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) @@ -5467,7 +5477,8 @@ static void bnxt_report_link(struct bnxt *bp) if (bp->link_info.link_up) { const char *duplex; const char *flow_ctrl; - u16 speed, fec; + u32 speed; + u16 fec; netif_carrier_on(bp->dev); if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) @@ -5483,7 +5494,7 @@ static void bnxt_report_link(struct bnxt *bp) else flow_ctrl = "none"; speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); - netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", + netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", speed, duplex, flow_ctrl); if (bp->flags & BNXT_FLAG_EEE_CAP) netdev_info(bp->dev, "EEE is %s\n", @@ -5857,6 +5868,76 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) return 0; } +int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) +{ + struct hwrm_wol_filter_alloc_input req = {0}; + struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bp->wol_filter_id = resp->wol_filter_id; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) +{ + struct hwrm_wol_filter_free_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req.wol_filter_id = bp->wol_filter_id; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return rc; +} + +static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) +{ + struct hwrm_wol_filter_qcfg_input req = {0}; + struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u16 next_handle = 0; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.handle = cpu_to_le16(handle); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + next_handle = le16_to_cpu(resp->next_handle); + if (next_handle != 0) { + if (resp->wol_type == + WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { + bp->wol = 1; + bp->wol_filter_id = resp->wol_filter_id; + } + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return next_handle; +} + +static void bnxt_get_wol_settings(struct bnxt *bp) +{ + u16 handle = 0; + + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) + return; + + do { + handle = bnxt_hwrm_get_wol_fltrs(bp, handle); + } while (handle && handle != 0xffff); +} + static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -6042,6 +6123,43 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) return rc; } +/* rtnl_lock held, open the NIC half way by allocating all resources, but + * NAPI, IRQ, and TX are not enabled. This is mainly used for offline + * self tests. + */ +int bnxt_half_open_nic(struct bnxt *bp) +{ + int rc = 0; + + rc = bnxt_alloc_mem(bp, false); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto half_open_err; + } + rc = bnxt_init_nic(bp, false); + if (rc) { + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto half_open_err; + } + return 0; + +half_open_err: + bnxt_free_skbs(bp); + bnxt_free_mem(bp, false); + dev_close(bp->dev); + return rc; +} + +/* rtnl_lock held, this call can only be made after a previous successful + * call to bnxt_half_open_nic(). + */ +void bnxt_half_close_nic(struct bnxt *bp) +{ + bnxt_hwrm_resource_free(bp, false, false); + bnxt_free_skbs(bp); + bnxt_free_mem(bp, false); +} + static int bnxt_open(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -6923,7 +7041,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (ntc->type != TC_SETUP_MQPRIO) return -EINVAL; - return bnxt_setup_mq_tc(dev, ntc->tc); + ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL @@ -7224,6 +7344,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; @@ -7546,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_port_led_qcaps(bp); + bnxt_ethtool_init(bp); bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); @@ -7591,6 +7713,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + bnxt_get_wol_settings(bp); + if (bp->flags & BNXT_FLAG_WOL_CAP) + device_set_wakeup_enable(&pdev->dev, bp->wol); + else + device_set_wakeup_capable(&pdev->dev, false); + rc = register_netdev(dev); if (rc) goto init_err_clr_int; @@ -7614,6 +7742,88 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } +static void bnxt_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp; + + if (!dev) + return; + + rtnl_lock(); + bp = netdev_priv(dev); + if (!bp) + goto shutdown_exit; + + if (netif_running(dev)) + dev_close(dev); + + if (system_state == SYSTEM_POWER_OFF) { + bnxt_clear_int_mode(bp); + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } + +shutdown_exit: + rtnl_unlock(); +} + +#ifdef CONFIG_PM_SLEEP +static int bnxt_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rtnl_lock(); + if (netif_running(dev)) { + netif_device_detach(dev); + rc = bnxt_close(dev); + } + bnxt_hwrm_func_drv_unrgtr(bp); + rtnl_unlock(); + return rc; +} + +static int bnxt_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rtnl_lock(); + if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { + rc = -ENODEV; + goto resume_exit; + } + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + rc = -EBUSY; + goto resume_exit; + } + bnxt_get_wol_settings(bp); + if (netif_running(dev)) { + rc = bnxt_open(dev); + if (!rc) + netif_device_attach(dev); + } + +resume_exit: + rtnl_unlock(); + return rc; +} + +static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); +#define BNXT_PM_OPS (&bnxt_pm_ops) + +#else + +#define BNXT_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + /** * bnxt_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -7730,6 +7940,8 @@ static struct pci_driver bnxt_pci_driver = { .id_table = bnxt_pci_tbl, .probe = bnxt_init_one, .remove = bnxt_remove_one, + .shutdown = bnxt_shutdown, + .driver.pm = BNXT_PM_OPS, .err_handler = &bnxt_err_handler, #if defined(CONFIG_BNXT_SRIOV) .sriov_configure = bnxt_sriov_configure, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c7a5b84a5cb20ecb1112f831d868238f9cead76b..3ef42dbc63273f756bb7e52fe7da65ba1968eea4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -18,6 +18,8 @@ #define DRV_VER_MIN 7 #define DRV_VER_UPD 0 +#include + struct tx_bd { __le32 tx_bd_len_flags_type; #define TX_BD_TYPE (0x3f << 0) @@ -424,8 +426,6 @@ struct rx_tpa_end_cmp_ext { #define BNXT_MIN_PKT_SIZE 52 -#define BNXT_NUM_TESTS(bp) 0 - #define BNXT_DEFAULT_RX_RING_SIZE 511 #define BNXT_DEFAULT_TX_RING_SIZE 511 @@ -851,6 +851,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB +#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB u16 support_speeds; u16 auto_link_speeds; /* fw adv setting */ #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB @@ -862,6 +863,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB +#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB u16 support_auto_speeds; u16 lp_auto_link_speeds; u16 force_link_speed; @@ -909,6 +911,14 @@ struct bnxt_led_info { __le16 led_color_caps; }; +#define BNXT_MAX_TEST 8 + +struct bnxt_test_info { + u8 offline_mask; + u16 timeout; + char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; +}; + #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 @@ -987,6 +997,7 @@ struct bnxt { #define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_NEW_RSS_CAP 0x2000 + #define BNXT_FLAG_WOL_CAP 0x4000 #define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ @@ -994,6 +1005,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_FW_LLDP_AGENT 0x80000 + #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ @@ -1003,7 +1015,8 @@ struct bnxt { #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) #define BNXT_NPAR(bp) ((bp)->port_partition_type) -#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) +#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) +#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) @@ -1178,6 +1191,12 @@ struct bnxt { u32 lpi_tmr_lo; u32 lpi_tmr_hi; + u8 num_tests; + struct bnxt_test_info *test_info; + + u8 wol_filter_id; + u8 wol; + u8 num_leds; struct bnxt_led_info leds[BNXT_MAX_LED]; @@ -1236,8 +1255,12 @@ void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); +int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); +int bnxt_half_open_nic(struct bnxt *bp); +void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 03532061d211b168d1bd7774d2aeea9b4ed3a776..46de2f8ff024aad59341ecf5963ccbd9082dae9e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,6 +15,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_dcb.h" @@ -241,6 +243,92 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) return 0; } +static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, + bool add) +{ + struct hwrm_fw_set_structured_data_input set = {0}; + struct hwrm_fw_get_structured_data_input get = {0}; + struct hwrm_struct_data_dcbx_app *fw_app; + struct hwrm_struct_hdr *data; + dma_addr_t mapping; + size_t data_len; + int rc, n, i; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + n = IEEE_8021QAZ_MAX_TCS; + data_len = sizeof(*data) + sizeof(*fw_app) * n; + data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, + GFP_KERNEL); + if (!data) + return -ENOMEM; + + memset(data, 0, data_len); + bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); + get.dest_data_addr = cpu_to_le64(mapping); + get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get.count = 0; + rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT); + if (rc) + goto set_app_exit; + + fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1); + + if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) { + rc = -ENODEV; + goto set_app_exit; + } + + n = data->count; + for (i = 0; i < n; i++, fw_app++) { + if (fw_app->protocol_id == cpu_to_be16(app->protocol) && + fw_app->protocol_selector == app->selector && + fw_app->priority == app->priority) { + if (add) + goto set_app_exit; + else + break; + } + } + if (add) { + /* append */ + n++; + fw_app->protocol_id = cpu_to_be16(app->protocol); + fw_app->protocol_selector = app->selector; + fw_app->priority = app->priority; + fw_app->valid = 1; + } else { + size_t len = 0; + + /* not found, nothing to delete */ + if (n == i) + goto set_app_exit; + + len = (n - 1 - i) * sizeof(*fw_app); + if (len) + memmove(fw_app, fw_app + 1, len); + n--; + memset(fw_app + n, 0, sizeof(*fw_app)); + } + data->count = n; + data->len = cpu_to_le16(sizeof(*fw_app) * n); + data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + + bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1); + set.src_data_addr = cpu_to_le64(mapping); + set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set.hdr_cnt = 1; + rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + +set_app_exit: + dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); + return rc; +} + static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) { int total_ets_bw = 0; @@ -417,6 +505,15 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) return -EINVAL; rc = dcb_ieee_setapp(dev, app); + if (rc) + return rc; + + if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_IBOE) || + (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && + app->protocol == ROCE_V2_UDP_DPORT)) + rc = bnxt_hwrm_set_dcbx_app(bp, app, true); + return rc; } @@ -425,10 +522,19 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) struct bnxt *bp = netdev_priv(dev); int rc; - if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) return -EINVAL; rc = dcb_ieee_delapp(dev, app); + if (rc) + return rc; + if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_IBOE) || + (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && + app->protocol == ROCE_V2_UDP_DPORT)) + rc = bnxt_hwrm_set_dcbx_app(bp, app, false); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 35a0d28cf2fdc140c9f1a07b1734c4b89b8b271f..ecd0a5e46a493bf325e93deba1d973a9d1667dbf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6903a873f072ae14f4a7638514446d6ad7b1c6a0..11ddf0adc6e1e2bad4d681014136eb2706d3bca9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +18,7 @@ #include #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_xdp.h" #include "bnxt_ethtool.h" #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ @@ -209,6 +211,10 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset) return num_stats; } + case ETH_SS_TEST: + if (!bp->num_tests) + return -EOPNOTSUPP; + return bp->num_tests; default: return -EOPNOTSUPP; } @@ -306,6 +312,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } break; + case ETH_SS_TEST: + if (bp->num_tests) + memcpy(buf, bp->test_info->string, + bp->num_tests * ETH_GSTRING_LEN); + break; default: netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", stringset); @@ -824,7 +835,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; - info->testinfo_len = BNXT_NUM_TESTS(bp); + info->testinfo_len = bp->num_tests; /* TODO CHIMP_FW: eeprom dump details */ info->eedump_len = 0; /* TODO CHIMP FW: reg dump details */ @@ -832,6 +843,45 @@ static void bnxt_get_drvinfo(struct net_device *dev, kfree(pkglog); } +static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnxt *bp = netdev_priv(dev); + + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + if (bp->flags & BNXT_FLAG_WOL_CAP) { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + } +} + +static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnxt *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (!(bp->flags & BNXT_FLAG_WOL_CAP)) + return -EINVAL; + if (!bp->wol) { + if (bnxt_hwrm_alloc_wol_fltr(bp)) + return -EBUSY; + bp->wol = 1; + } + } else { + if (bp->wol) { + if (bnxt_hwrm_free_wol_fltr(bp)) + return -EBUSY; + bp->wol = 0; + } + } + return 0; +} + u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { u32 speed_mask = 0; @@ -879,6 +929,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 50000baseCR2_Full);\ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100000baseCR4_Full);\ if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ Pause); \ @@ -915,6 +968,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 50000baseCR2_Full)) \ (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ } static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, @@ -977,6 +1033,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) return SPEED_40000; case BNXT_LINK_SPEED_50GB: return SPEED_50000; + case BNXT_LINK_SPEED_100GB: + return SPEED_100000; default: return SPEED_UNKNOWN; } @@ -1042,7 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, return 0; } -static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) +static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; @@ -1082,6 +1140,10 @@ static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) if (support_spds & BNXT_LINK_SPEED_MSK_50GB) fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; break; + case SPEED_100000: + if (support_spds & BNXT_LINK_SPEED_MSK_100GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB; + break; default: netdev_err(dev, "unsupported speed!\n"); break; @@ -2128,12 +2190,372 @@ static int bnxt_set_phys_id(struct net_device *dev, return rc; } +static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) +{ + struct hwrm_selftest_irq_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_test_irq(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; + int rc; + + rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); + if (rc) + return rc; + } + return 0; +} + +static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) +{ + struct hwrm_port_mac_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); + + req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + if (enable) + req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + else + req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_disable_an_for_lpbk(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + struct bnxt_link_info *link_info = &bp->link_info; + u16 fw_advertising = link_info->advertising; + u16 fw_speed; + int rc; + + if (!link_info->autoneg) + return 0; + + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; + if (netif_carrier_ok(bp->dev)) + fw_speed = bp->link_info.link_speed; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + + req->force_link_speed = cpu_to_le16(fw_speed); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | + PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); + req->flags = 0; + req->force_link_speed = cpu_to_le16(0); + return rc; +} + +static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable) +{ + struct hwrm_port_phy_cfg_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + + if (enable) { + bnxt_disable_an_for_lpbk(bp, &req); + req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + } else { + req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + } + req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 raw_cons, int pkt_size) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_cmp *rxcmp; + u16 cp_cons, cons; + u8 *data; + u32 len; + int i; + + cp_cons = RING_CMP(raw_cons); + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = rxcmp->rx_cmp_opaque; + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data_ptr; + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; + if (len != pkt_size) + return -EIO; + i = ETH_ALEN; + if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) + return -EIO; + i += ETH_ALEN; + for ( ; i < pkt_size; i++) { + if (data[i] != (u8)(i & 0xff)) + return -EIO; + } + return 0; +} + +static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size) +{ + struct bnxt_napi *bnapi = bp->bnapi[0]; + struct bnxt_cp_ring_info *cpr; + struct tx_cmp *txcmp; + int rc = -EIO; + u32 raw_cons; + u32 cons; + int i; + + cpr = &bnapi->cp_ring; + raw_cons = cpr->cp_raw_cons; + for (i = 0; i < 200; i++) { + cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) { + udelay(5); + continue; + } + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) { + rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size); + raw_cons = NEXT_RAW_CMP(raw_cons); + raw_cons = NEXT_RAW_CMP(raw_cons); + break; + } + raw_cons = NEXT_RAW_CMP(raw_cons); + } + cpr->cp_raw_cons = raw_cons; + return rc; +} + +static int bnxt_run_loopback(struct bnxt *bp) +{ + struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; + int pkt_size, i = 0; + struct sk_buff *skb; + dma_addr_t map; + u8 *data; + int rc; + + pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); + skb = netdev_alloc_skb(bp->dev, pkt_size); + if (!skb) + return -ENOMEM; + data = skb_put(skb, pkt_size); + eth_broadcast_addr(data); + i += ETH_ALEN; + ether_addr_copy(&data[i], bp->dev->dev_addr); + i += ETH_ALEN; + for ( ; i < pkt_size; i++) + data[i] = (u8)(i & 0xff); + + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + bnxt_xmit_xdp(bp, txr, map, pkt_size, 0); + + /* Sync BD data before updating doorbell */ + wmb(); + + writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell); + writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell); + rc = bnxt_poll_loopback(bp, pkt_size); + + dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + return rc; +} + +static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) +{ + struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_selftest_exec_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + resp->test_success = 0; + req.flags = test_mask; + rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); + *test_results = resp->test_success; + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +#define BNXT_DRV_TESTS 3 +#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) +#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) +#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) + +static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *buf) +{ + struct bnxt *bp = netdev_priv(dev); + bool offline = false; + u8 test_results = 0; + u8 test_mask = 0; + int rc, i; + + if (!bp->num_tests || !BNXT_SINGLE_PF(bp)) + return; + memset(buf, 0, sizeof(u64) * bp->num_tests); + if (!netif_running(dev)) { + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + if (etest->flags & ETH_TEST_FL_OFFLINE) { + if (bp->pf.active_vfs) { + etest->flags |= ETH_TEST_FL_FAILED; + netdev_warn(dev, "Offline tests cannot be run with active VFs\n"); + return; + } + offline = true; + } + + for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { + u8 bit_val = 1 << i; + + if (!(bp->test_info->offline_mask & bit_val)) + test_mask |= bit_val; + else if (offline) + test_mask |= bit_val; + } + if (!offline) { + bnxt_run_fw_tests(bp, test_mask, &test_results); + } else { + rc = bnxt_close_nic(bp, false, false); + if (rc) + return; + bnxt_run_fw_tests(bp, test_mask, &test_results); + + buf[BNXT_MACLPBK_TEST_IDX] = 1; + bnxt_hwrm_mac_loopback(bp, true); + msleep(250); + rc = bnxt_half_open_nic(bp); + if (rc) { + bnxt_hwrm_mac_loopback(bp, false); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + if (bnxt_run_loopback(bp)) + etest->flags |= ETH_TEST_FL_FAILED; + else + buf[BNXT_MACLPBK_TEST_IDX] = 0; + + bnxt_hwrm_mac_loopback(bp, false); + bnxt_hwrm_phy_loopback(bp, true); + msleep(1000); + if (bnxt_run_loopback(bp)) { + buf[BNXT_PHYLPBK_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + bnxt_hwrm_phy_loopback(bp, false); + bnxt_half_close_nic(bp); + bnxt_open_nic(bp, false, true); + } + if (bnxt_test_irq(bp)) { + buf[BNXT_IRQ_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { + u8 bit_val = 1 << i; + + if ((test_mask & bit_val) && !(test_results & bit_val)) { + buf[i] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } +} + +void bnxt_ethtool_init(struct bnxt *bp) +{ + struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_selftest_qlist_input req = {0}; + struct bnxt_test_info *test_info; + int i, rc; + + if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) + return; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto ethtool_init_exit; + + test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); + if (!test_info) + goto ethtool_init_exit; + + bp->test_info = test_info; + bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; + if (bp->num_tests > BNXT_MAX_TEST) + bp->num_tests = BNXT_MAX_TEST; + + test_info->offline_mask = resp->offline_tests; + test_info->timeout = le16_to_cpu(resp->test_timeout); + if (!test_info->timeout) + test_info->timeout = HWRM_CMD_TIMEOUT; + for (i = 0; i < bp->num_tests; i++) { + char *str = test_info->string[i]; + char *fw_str = resp->test0_name + i * 32; + + if (i == BNXT_MACLPBK_TEST_IDX) { + strcpy(str, "Mac loopback test (offline)"); + } else if (i == BNXT_PHYLPBK_TEST_IDX) { + strcpy(str, "Phy loopback test (offline)"); + } else if (i == BNXT_IRQ_TEST_IDX) { + strcpy(str, "Interrupt_test (offline)"); + } else { + strlcpy(str, fw_str, ETH_GSTRING_LEN); + strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); + if (test_info->offline_mask & (1 << i)) + strncat(str, " (offline)", + ETH_GSTRING_LEN - strlen(str)); + else + strncat(str, " (online)", + ETH_GSTRING_LEN - strlen(str)); + } + } + +ethtool_init_exit: + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void bnxt_ethtool_free(struct bnxt *bp) +{ + kfree(bp->test_info); + bp->test_info = NULL; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, + .get_wol = bnxt_get_wol, + .set_wol = bnxt_set_wol, .get_coalesce = bnxt_get_coalesce, .set_coalesce = bnxt_set_coalesce, .get_msglevel = bnxt_get_msglevel, @@ -2161,4 +2583,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_module_eeprom = bnxt_get_module_eeprom, .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, + .self_test = bnxt_self_test, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index ed1e555292e9ce404b44017eca84fac5edb23d85..f1bc90b6fb5bdbec7fbc7896fa32c46b71d104e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,5 +39,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); u16 bnxt_get_fw_auto_link_speeds(u32); +void bnxt_ethtool_init(struct bnxt *bp); +void bnxt_ethtool_free(struct bnxt *bp); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 6e275c23d68bfe22561903e9b1d538a160ab8948..7dc71bb95837b8c48eb1668fc094ece6fba07b4b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,19 +11,21 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.7.0 */ +/* HSI and HWRM Specification 1.7.6 */ #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_UPDATE 6 -#define HWRM_VERSION_STR "1.7.0" +#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */ + +#define HWRM_VERSION_STR "1.7.6.2" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */ +#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ #define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ @@ -571,9 +573,10 @@ struct hwrm_ver_get_output { __le16 max_req_win_len; __le16 max_resp_len; __le16 def_req_timeout; + u8 init_pending; + #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL u8 unused_0; u8 unused_1; - u8 unused_2; u8 valid; }; @@ -809,6 +812,8 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -827,10 +832,12 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL - u8 unused_0; + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 unused_1; - u8 unused_2; + u8 host_cnt; + #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL + u8 unused_0; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -867,12 +874,12 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_3; + u8 unused_1; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; - u8 unused_4; + u8 unused_2; u8 valid; }; @@ -888,16 +895,13 @@ struct hwrm_func_cfg_input { u8 unused_0; u8 unused_1; __le32 flags; - #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL - #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL - #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL - #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL - #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL - #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL - #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL - #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL - #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL - #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1013,7 +1017,7 @@ struct hwrm_func_qstats_output { __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; - __le64 tx_err_pkts; + __le64 tx_discard_pkts; __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; @@ -1021,7 +1025,7 @@ struct hwrm_func_qstats_output { __le64 rx_ucast_pkts; __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; - __le64 rx_err_pkts; + __le64 rx_discard_pkts; __le64 rx_drop_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; @@ -4743,25 +4747,72 @@ struct hwrm_temp_monitor_query_output { u8 valid; }; -/* hwrm_nvm_read */ -/* Input (40 bytes) */ -struct hwrm_nvm_read_input { +/* hwrm_wol_filter_alloc */ +/* Input (64 bytes) */ +struct hwrm_wol_filter_alloc_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; - __le64 host_dest_addr; - __le16 dir_idx; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL u8 unused_0; - u8 unused_1; - __le32 offset; - __le32 len; + __le32 unused_1; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; __le32 unused_2; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; }; /* Output (16 bytes) */ -struct hwrm_nvm_read_output { +struct hwrm_wol_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + +/* hwrm_wol_filter_free */ +/* Input (32 bytes) */ +struct hwrm_wol_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; +}; + +/* Output (16 bytes) */ +struct hwrm_wol_filter_free_output { __le16 error_code; __le16 req_type; __le16 seq_id; @@ -4773,21 +4824,107 @@ struct hwrm_nvm_read_output { u8 valid; }; -/* hwrm_nvm_raw_dump */ -/* Input (32 bytes) */ -struct hwrm_nvm_raw_dump_input { +/* hwrm_wol_filter_qcfg */ +/* Input (56 bytes) */ +struct hwrm_wol_filter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + __le32 unused_0; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1; + u8 unused_2; + u8 unused_3[3]; + u8 unused_4; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + __le16 unused_5[3]; +}; + +/* Output (32 bytes) */ +struct hwrm_wol_filter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg */ +/* Input (40 bytes) */ +struct hwrm_wol_reason_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0; + u8 unused_1; + u8 unused_2[3]; + u8 unused_3; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + __le16 unused_4[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_wol_reason_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + u8 wol_pkt_len; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_nvm_read */ +/* Input (40 bytes) */ +struct hwrm_nvm_read_input { __le16 req_type; __le16 cmpl_ring; __le16 seq_id; __le16 target_id; __le64 resp_addr; __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0; + u8 unused_1; __le32 offset; __le32 len; + __le32 unused_2; }; /* Output (16 bytes) */ -struct hwrm_nvm_raw_dump_output { +struct hwrm_nvm_read_output { __le16 error_code; __le16 req_type; __le16 seq_id; @@ -4881,6 +5018,15 @@ struct hwrm_nvm_write_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_write_cmd_err { + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + u8 unused_0[7]; +}; + /* hwrm_nvm_modify */ /* Input (40 bytes) */ struct hwrm_nvm_modify_input { @@ -5112,6 +5258,100 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +/* hwrm_selftest_qlist */ +/* Input (16 bytes) */ +struct hwrm_selftest_qlist_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (248 bytes) */ +struct hwrm_selftest_qlist_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1; + u8 unused_2; + char test0_name[32]; + char test1_name[32]; + char test2_name[32]; + char test3_name[32]; + char test4_name[32]; + char test5_name[32]; + char test6_name[32]; + char test7_name[32]; +}; + +/* hwrm_selftest_exec */ +/* Input (24 bytes) */ +struct hwrm_selftest_exec_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_selftest_exec_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + __le16 unused_0[3]; +}; + +/* hwrm_selftest_irq */ +/* Input (16 bytes) */ +struct hwrm_selftest_irq_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* Output (8 bytes) */ +struct hwrm_selftest_irq_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + /* Hardware Resource Manager Specification */ /* Input (16 bytes) */ struct input { @@ -5130,6 +5370,16 @@ struct output { __le16 resp_len; }; +/* Short Command Structure (16 bytes) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + __le16 unused_0; + __le16 size; + __le64 req_addr; +}; + /* Command numbering (8 bytes) */ struct cmd_nums { __le16 req_type; @@ -5252,11 +5502,15 @@ struct cmd_nums { #define HWRM_CFA_FLOW_FLUSH (0x105UL) #define HWRM_CFA_FLOW_STATS (0x106UL) #define HWRM_CFA_FLOW_INFO (0x107UL) + #define HWRM_SELFTEST_QLIST (0x200UL) + #define HWRM_SELFTEST_EXEC (0x201UL) + #define HWRM_SELFTEST_IRQ (0x202UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) #define HWRM_NVM_GET_VARIABLE (0xfff1UL) @@ -5464,6 +5718,7 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL __le16 len; u8 version; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 0b8cd7443843241efcff2e632869c6ccc7a40fbe..b8e7248294d9b093129945c61ac4ca34a2667213 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -84,6 +85,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) u32 func_flags; int rc; + if (bp->hwrm_spec_code < 0x10701) + return -ENOTSUPP; + rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; @@ -96,9 +100,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK; + func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ @@ -134,8 +138,11 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; ivi->vlan = vf->vlan; - ivi->qos = vf->flags & BNXT_VF_QOS; - ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK; + if (vf->flags & BNXT_VF_QOS) + ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; + else + ivi->qos = 0; + ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -300,7 +307,6 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { vf = &bp->pf.vf[i]; memset(vf, 0, sizeof(*vf)); - vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP; } return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index 1ab72e4820af18bde648c992f8ced320ca35fe1b..dbc8d977fc5a3a06a9a23fc86585326bb4284f58 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,6 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 899c30fb51886d78f4de3d0f89124116b7bb4ef1..9dae32756767cff8c4869b90c5c702bcc5ff00b9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -19,11 +19,10 @@ #include "bnxt.h" #include "bnxt_xdp.h" -static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len, u16 rx_prod) +void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod) { struct bnxt_sw_tx_bd *tx_buf; - struct tx_bd_ext *txbd1; struct tx_bd *txbd; u32 flags; u16 prod; @@ -33,22 +32,12 @@ static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, tx_buf->rx_prod = rx_prod; txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | - (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW | + flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = prod; txbd->tx_bd_haddr = cpu_to_le64(mapping); - prod = NEXT_TX(prod); - txbd1 = (struct tx_bd_ext *) - &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - - txbd1->tx_bd_hsize_lflags = cpu_to_le32(0); - txbd1->tx_bd_mss = cpu_to_le32(0); - txbd1->tx_bd_cfa_action = cpu_to_le32(0); - txbd1->tx_bd_cfa_meta = cpu_to_le32(0); - prod = NEXT_TX(prod); txr->tx_prod = prod; } @@ -66,7 +55,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) for (i = 0; i < nr_pkts; i++) { last_tx_cons = tx_cons; tx_cons = NEXT_TX(tx_cons); - tx_cons = NEXT_TX(tx_cons); } txr->tx_cons = tx_cons; if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) { @@ -133,7 +121,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, return false; case XDP_TX: - if (tx_avail < 2) { + if (tx_avail < 1) { trace_xdp_exception(bp->dev, xdp_prog, act); bnxt_reuse_rx_data(rxr, cons, page); return true; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index b529f2c5355b4361b93086c44fe0a94eb2cac512..12a5ad66b564cbfdbbf20c7753fe0a679e88e065 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -10,6 +10,8 @@ #ifndef BNXT_XDP_H #define BNXT_XDP_H +void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 365895ed3c3e240584da21fad2caebc6384482d5..a205a9ff9e179ba9b5f854b9e9707a29b36a45bb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev, /* GENET TDMA hardware does not support a configurable timeout, but will * always generate an interrupt either after MBDONE packets have been - * transmitted, or when the ring is emtpy. + * transmitted, or when the ring is empty. */ if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) @@ -707,6 +707,19 @@ struct bcmgenet_stats { .reg_offset = offset, \ } +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) /* There is a 0xC gap between the end of RX and beginning of TX stats and then * between the end of TX stats and the beginning of the RX RUNT @@ -801,6 +814,12 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -1078,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, /* Power down LED */ if (priv->hw_params->flags & GENET_HAS_EXT) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= (EXT_PWR_DOWN_PHY | - EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + if (GENET_IS_V5(priv)) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); bcmgenet_phy_power_set(priv->dev, false); @@ -1104,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | - EXT_PWR_DOWN_BIAS); - /* fallthrough */ + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + if (GENET_IS_V5(priv)) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + bcmgenet_mii_reset(priv->dev); + break; + case GENET_POWER_CABLE_SENSE: /* enable APD */ - reg |= EXT_PWR_DN_EN_LD; + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } break; case GENET_POWER_WOL_MAGIC: bcmgenet_wol_power_up_cfg(priv, mode); @@ -1117,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, default: break; } - - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - if (mode == GENET_POWER_PASSIVE) { - bcmgenet_phy_power_set(priv->dev, true); - bcmgenet_mii_reset(priv->dev); - } } /* ioctl handle special commands that are not present in ethtool. */ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct bcmgenet_priv *priv = netdev_priv(dev); - int val = 0; if (!netif_running(dev)) return -EINVAL; - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - if (!priv->phydev) - val = -ENODEV; - else - val = phy_mii_ioctl(priv->phydev, rq, cmd); - break; - - default: - val = -EINVAL; - break; - } + if (!priv->phydev) + return -ENODEV; - return val; + return phy_mii_ioctl(priv->phydev, rq, cmd); } static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, @@ -1240,14 +1271,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, unsigned int txbds_ready; unsigned int txbds_processed = 0; - /* Compute how many buffers are transmitted since last xmit call */ - c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); - c_index &= DMA_C_INDEX_MASK; - - if (likely(c_index >= ring->c_index)) - txbds_ready = c_index - ring->c_index; + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); else - txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; netif_dbg(priv, tx_done, dev, "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", @@ -1280,15 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, } ring->free_bds += txbds_processed; - ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; + ring->c_index = c_index; - dev->stats.tx_packets += pkts_compl; - dev->stats.tx_bytes += bytes_compl; + ring->packets += pkts_compl; + ring->bytes += bytes_compl; netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), pkts_compl, bytes_compl); - return pkts_compl; + return txbds_processed; } static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, @@ -1657,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned long dma_flag; int len; unsigned int rxpktprocessed = 0, rxpkttoprocess; - unsigned int p_index; + unsigned int p_index, mask; unsigned int discards; unsigned int chksum_ok = 0; + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & DMA_P_INDEX_DISCARD_CNT_MASK; if (discards > ring->old_discards) { discards = discards - ring->old_discards; - dev->stats.rx_missed_errors += discards; - dev->stats.rx_errors += discards; + ring->errors += discards; ring->old_discards += discards; /* Clear HW register when we reach 75% of maximum 0xFFFF */ @@ -1680,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, } p_index &= DMA_P_INDEX_MASK; - - if (likely(p_index >= ring->c_index)) - rxpkttoprocess = p_index - ring->c_index; - else - rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + - p_index; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; netif_dbg(priv, rx_status, dev, "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); @@ -1696,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, skb = bcmgenet_rx_refill(priv, cb); if (unlikely(!skb)) { - dev->stats.rx_dropped++; + ring->dropped++; goto next; } @@ -1724,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); - dev->stats.rx_errors++; + ring->errors++; dev_kfree_skb_any(skb); goto next; } @@ -1773,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, /*Finish setting up the received SKB and send it to the kernel*/ skb->protocol = eth_type_trans(skb, priv->dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + ring->packets++; + ring->bytes += len; if (dma_flag & DMA_RX_MULT) dev->stats.multicast++; @@ -1912,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) /* Mask all interrupts.*/ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); - bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); - bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) @@ -1942,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv) int ret; u32 reg; u32 int0_enable = 0; - u32 int1_enable = 0; - int i; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); @@ -1970,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intr_disable(priv); - /* Enable Rx default queue 16 interrupts */ - int0_enable |= UMAC_IRQ_RXDMA_DONE; - - /* Enable Tx default queue 16 interrupts */ - int0_enable |= UMAC_IRQ_TXDMA_DONE; - /* Configure backpressure vectors for MoCA */ if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { reg = bcmgenet_bp_mc_get(priv); @@ -1993,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv) if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); - /* Enable Rx priority queue interrupts */ - for (i = 0; i < priv->hw_params->rx_queues; ++i) - int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); - - /* Enable Tx priority queue interrupts */ - for (i = 0; i < priv->hw_params->tx_queues; ++i) - int1_enable |= (1 << i); - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); - /* Enable rx/tx engine.*/ dev_dbg(kdev, "done init umac\n"); return 0; @@ -2136,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; + u32 int0_enable = UMAC_IRQ_TXDMA_DONE; + u32 int1_enable = 0; struct bcmgenet_tx_ring *ring; for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_enable(&ring->napi); + int1_enable |= (1 << i); } ring = &priv->tx_rings[DESC_INDEX]; napi_enable(&ring->napi); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); } static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; + u32 int0_disable = UMAC_IRQ_TXDMA_DONE; + u32 int1_disable = 0xffff; struct bcmgenet_tx_ring *ring; + bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); + for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_disable(&ring->napi); @@ -2264,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; + u32 int0_enable = UMAC_IRQ_RXDMA_DONE; + u32 int1_enable = 0; struct bcmgenet_rx_ring *ring; for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_enable(&ring->napi); + int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); } ring = &priv->rx_rings[DESC_INDEX]; napi_enable(&ring->napi); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); } static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; + u32 int0_disable = UMAC_IRQ_RXDMA_DONE; + u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT; struct bcmgenet_rx_ring *ring; + bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); + for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); @@ -2634,6 +2676,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } } + if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | + UMAC_IRQ_PHY_DET_F | + UMAC_IRQ_LINK_EVENT | + UMAC_IRQ_HFB_SM | + UMAC_IRQ_HFB_MM)) { + /* all other interested interrupts handled in bottom half */ + schedule_work(&priv->bcmgenet_irq_work); + } + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { wake_up(&priv->wq); @@ -2921,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev) if (ret) return ret; - /* Disable MAC transmit. TX DMA disabled have to done before this */ + /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); /* tx reclaim */ @@ -3101,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) return 0; } +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + return &dev->stats; +} + static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_open = bcmgenet_open, .ndo_stop = bcmgenet_close, @@ -3113,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcmgenet_poll_controller, #endif + .ndo_get_stats = bcmgenet_get_stats, }; /* Array of GENET hardware parameters/characteristics */ @@ -3186,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, }; /* Infer hardware parameters from the detected GENET version */ @@ -3196,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) u8 major; u16 gphy_rev; - if (GENET_IS_V4(priv)) { + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; genet_dma_ring_regs = genet_dma_ring_regs_v4; priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; - priv->version = GENET_V4; } else if (GENET_IS_V3(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; genet_dma_ring_regs = genet_dma_ring_regs_v123; priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; - priv->version = GENET_V3; } else if (GENET_IS_V2(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v2; genet_dma_ring_regs = genet_dma_ring_regs_v123; priv->dma_rx_chk_bit = DMA_RX_CHK_V12; - priv->version = GENET_V2; } else if (GENET_IS_V1(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v1; genet_dma_ring_regs = genet_dma_ring_regs_v123; priv->dma_rx_chk_bit = DMA_RX_CHK_V12; - priv->version = GENET_V1; } /* enum genet_version starts at 1 */ @@ -3225,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) /* Read GENET HW version */ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); major = (reg >> 24 & 0x0f); - if (major == 5) + if (major == 6) + major = 5; + else if (major == 5) major = 4; else if (major == 0) major = 1; @@ -3253,19 +3364,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) */ gphy_rev = reg & 0xffff; + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } /* This is reserved so should require special treatment */ - if (gphy_rev == 0 || gphy_rev == 0x01ff) { + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); return; - } - /* This is the good old scheme, just GPHY major, no minor nor patch */ - if ((gphy_rev & 0xf0) != 0) + } else if ((gphy_rev & 0xf0) != 0) { priv->gphy_rev = gphy_rev << 8; - /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ - else if ((gphy_rev & 0xff00) != 0) + } else if ((gphy_rev & 0xff00) != 0) { priv->gphy_rev = gphy_rev; + } #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) @@ -3295,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = { { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, + { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 }, { }, }; MODULE_DEVICE_TABLE(of, bcmgenet_match); @@ -3493,7 +3611,7 @@ static int bcmgenet_suspend(struct device *d) if (ret) return ret; - /* Disable MAC transmit. TX DMA disabled have to done before this */ + /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); /* tx reclaim */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index db7f289d65ae2abd1589446ee0cadc00ffbf0254..efd07020b89fc3a7bd3c68fce1bbd7fe406acfcf 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -355,8 +355,14 @@ struct bcmgenet_mib_counters { #define EXT_PWR_DN_EN_LD (1 << 3) #define EXT_ENERGY_DET (1 << 4) #define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) #define EXT_PHY_RESET (1 << 8) #define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) #define EXT_RGMII_OOB_CTRL 0x0C #define RGMII_LINK (1 << 4) @@ -499,13 +505,15 @@ enum bcmgenet_version { GENET_V1 = 1, GENET_V2, GENET_V3, - GENET_V4 + GENET_V4, + GENET_V5 }; #define GENET_IS_V1(p) ((p)->version == GENET_V1) #define GENET_IS_V2(p) ((p)->version == GENET_V2) #define GENET_IS_V3(p) ((p)->version == GENET_V3) #define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) /* Hardware flags */ #define GENET_HAS_40BITS (1 << 0) @@ -544,6 +552,8 @@ struct bcmgenet_skb_cb { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; unsigned int index; /* ring index */ unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ @@ -562,6 +572,10 @@ struct bcmgenet_tx_ring { struct bcmgenet_rx_ring { struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; unsigned int index; /* Rx ring index */ struct enet_cb *cbs; /* Rx ring buffer control block */ unsigned int size; /* Rx ring size */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index b97122926d3aa91210a8945d45f268d370c86ee4..2fbd027f0148f96003a08405177883e9df832769 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -1,7 +1,7 @@ /* * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support * - * Copyright (c) 2014 Broadcom Corporation + * Copyright (c) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { struct net_device *dev = priv->dev; - u32 cpu_mask_clear; int retries = 0; u32 reg; @@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); } - /* Enable the MPD interrupt */ - cpu_mask_clear = UMAC_IRQ_MPD_R; - - bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); - return 0; } void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { - u32 cpu_mask_set; u32 reg; if (mode != GENET_POWER_WOL_MAGIC) { @@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); priv->crc_fwd_en = 0; - - /* Stop monitoring magic packet IRQ */ - cpu_mask_set = UMAC_IRQ_MPD_R; - - /* Stop monitoring magic packet IRQ */ - bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 2f9281936f0e434a328e1c716a29a76be2d9090c..285676f8da6bf3b9b0bb42e8a5407be56e478796 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -1,7 +1,7 @@ /* * Broadcom GENET MDIO routines * - * Copyright (c) 2014 Broadcom Corporation + * Copyright (c) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -195,39 +195,43 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) u32 reg = 0; /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ - if (!GENET_IS_V4(priv)) - return; - - reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); - if (enable) { - reg &= ~EXT_CK25_DIS; - bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); - mdelay(1); - - reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); - reg |= EXT_GPHY_RESET; + if (GENET_IS_V4(priv)) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); - mdelay(1); - - reg &= ~EXT_GPHY_RESET; + udelay(60); } else { - reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET; - bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); mdelay(1); - reg |= EXT_CK25_DIS; } - bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); - udelay(60); } static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) { u32 reg; - /* Speed settings are set in bcmgenet_mii_setup() */ - reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); - reg |= LED_ACT_SOURCE_MAC; - bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + if (!GENET_IS_V5(priv)) { + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + } if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) fixed_phy_set_link_update(priv->phydev, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 30d1eb9ebec9afab2271db1f8c0b4f448b64da08..f395b951f5e77bca9a926ea3f1210bf1fcb13ded 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) return timeout_us ? 0 : -EBUSY; } +#ifdef CONFIG_TIGON3_HWMON static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) { u32 i, apedata; @@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, return 0; } +#endif static int tg3_ape_send_event(struct tg3 *tp, u32 event) { @@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy) return tg3_reset_hw(tp, reset_phy); } +#ifdef CONFIG_TIGON3_HWMON static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) { int i; @@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp) dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); } } +#else +static inline void tg3_hwmon_close(struct tg3 *tp) { } +static inline void tg3_hwmon_open(struct tg3 *tp) { } +#endif /* CONFIG_TIGON3_HWMON */ #define TG3_STAT_ADD32(PSTAT, REG) \ diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 30606b11b128e9d169f421cacbe6b80b962ed821..91f7492623d3f1d614afeb7f525ecfb491e5965e 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev) } pdata = dev_get_platdata(&bp->pdev->dev); - if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, - "phy int"); - if (!ret) { - phy_irq = gpio_to_irq(pdata->phy_irq_pin); - phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + if (pdata) { + if (gpio_is_valid(pdata->phy_irq_pin)) { + ret = devm_gpio_request(&bp->pdev->dev, + pdata->phy_irq_pin, "phy int"); + if (!ret) { + phy_irq = gpio_to_irq(pdata->phy_irq_pin); + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + } + } else { + phydev->irq = PHY_POLL; } - } else { - phydev->irq = PHY_POLL; } /* attach the mac to the phy */ @@ -684,8 +686,8 @@ static void macb_tx_error_task(struct work_struct *work) netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", macb_tx_ring_wrap(bp, tail), skb->data); - bp->stats.tx_packets++; - bp->stats.tx_bytes += skb->len; + bp->dev->stats.tx_packets++; + bp->dev->stats.tx_bytes += skb->len; } } else { /* "Buffers exhausted mid-frame" errors may only happen @@ -778,8 +780,8 @@ static void macb_tx_interrupt(struct macb_queue *queue) netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", macb_tx_ring_wrap(bp, tail), skb->data); - bp->stats.tx_packets++; - bp->stats.tx_bytes += skb->len; + bp->dev->stats.tx_packets++; + bp->dev->stats.tx_bytes += skb->len; } /* Now we can safely release resources */ @@ -911,14 +913,14 @@ static int gem_rx(struct macb *bp, int budget) if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { netdev_err(bp->dev, "not whole frame pointed by descriptor\n"); - bp->stats.rx_dropped++; + bp->dev->stats.rx_dropped++; break; } skb = bp->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(bp->dev, "inconsistent Rx descriptor chain\n"); - bp->stats.rx_dropped++; + bp->dev->stats.rx_dropped++; break; } /* now everything is ready for receiving packet */ @@ -938,8 +940,8 @@ static int gem_rx(struct macb *bp, int budget) GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) skb->ip_summed = CHECKSUM_UNNECESSARY; - bp->stats.rx_packets++; - bp->stats.rx_bytes += skb->len; + bp->dev->stats.rx_packets++; + bp->dev->stats.rx_bytes += skb->len; #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", @@ -984,7 +986,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, */ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); if (!skb) { - bp->stats.rx_dropped++; + bp->dev->stats.rx_dropped++; for (frag = first_frag; ; frag++) { desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -1030,8 +1032,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, __skb_pull(skb, NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, bp->dev); - bp->stats.rx_packets++; - bp->stats.rx_bytes += skb->len; + bp->dev->stats.rx_packets++; + bp->dev->stats.rx_bytes += skb->len; netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", skb->len, skb->csum); netif_receive_skb(skb); @@ -2210,7 +2212,7 @@ static void gem_update_stats(struct macb *bp) static struct net_device_stats *gem_get_stats(struct macb *bp) { struct gem_stats *hwstat = &bp->hw_stats.gem; - struct net_device_stats *nstat = &bp->stats; + struct net_device_stats *nstat = &bp->dev->stats; gem_update_stats(bp); @@ -2281,7 +2283,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) static struct net_device_stats *macb_get_stats(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - struct net_device_stats *nstat = &bp->stats; + struct net_device_stats *nstat = &bp->dev->stats; struct macb_stats *hwstat = &bp->hw_stats.macb; if (macb_is_gem(bp)) @@ -2993,15 +2995,15 @@ static void at91ether_rx(struct net_device *dev) memcpy(skb_put(skb, pktlen), p_recv, pktlen); skb->protocol = eth_type_trans(skb, dev); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pktlen; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pktlen; netif_rx(skb); } else { - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) - lp->stats.multicast++; + dev->stats.multicast++; /* reset ownership bit */ desc->addr &= ~MACB_BIT(RX_USED); @@ -3036,15 +3038,15 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) if (intstatus & MACB_BIT(TCOMP)) { /* The TCOM bit is set even if the transmission failed */ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (lp->skb) { dev_kfree_skb_irq(lp->skb); lp->skb = NULL; dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); - lp->stats.tx_packets++; - lp->stats.tx_bytes += lp->skb_length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += lp->skb_length; } netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 234a49eaccfd2dd2f80f5e402e262af957285cec..ec037b0fa2a4d53ae77ff5ff5ed5fb5eb9866319 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -919,7 +919,6 @@ struct macb { struct clk *rx_clk; struct net_device *dev; struct napi_struct napi; - struct net_device_stats stats; union { struct macb_stats macb; struct gem_stats gem; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 2fedd91f3df88fb5ea88f288e5121a8fc8fb3cf0..dee604651ba7d309686cca04f1b41222221adfe1 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -43,6 +43,8 @@ struct octeon_cn23xx_pf { struct octeon_config *conf; }; +#define CN23XX_SLI_DEF_BP 0x40 + int setup_cn23xx_octeon_pf_device(struct octeon_device *oct); int validate_cn23xx_pf_config_info(struct octeon_device *oct, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index f629c2fe04a44b16794db66d2a3e4c6bf5c09876..796c2cbc11f6b5de044c5c465806184d5ad17825 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -26,6 +26,9 @@ #include "octeon_main.h" #include "octeon_network.h" +/* OOM task polling interval */ +#define LIO_OOM_POLL_INTERVAL_MS 250 + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); @@ -124,6 +127,17 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct octeon_device *oct = lio->oct_dev; u8 *mac; + if (nctrl->completion && nctrl->response_code) { + /* Signal whoever is interested that the response code from the + * firmware has arrived. + */ + WRITE_ONCE(*nctrl->response_code, nctrl->status); + complete(nctrl->completion); + } + + if (nctrl->status) + return; + switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: case OCTNET_CMD_SET_MULTI_LIST: @@ -131,11 +145,20 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) case OCTNET_CMD_CHANGE_MACADDR: mac = ((u8 *)&nctrl->udd[0]) + 2; - netif_info(lio, probe, lio->netdev, - "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", - mac[0], mac[1], - mac[2], mac[3], - mac[4], mac[5]); + if (nctrl->ncmd.s.param1) { + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + int vfidx = nctrl->ncmd.s.param1 - 1; + bool mac_is_admin_assigned = nctrl->ncmd.s.param2; + + if (mac_is_admin_assigned) + netif_info(lio, probe, lio->netdev, + "MAC Address %pM is configured for VF %d\n", + mac, vfidx); + } else { + netif_info(lio, probe, lio->netdev, + " MACAddr changed to %pM\n", + mac); + } break; case OCTNET_CMD_CHANGE_MTU: @@ -284,3 +307,56 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) * the PF did that already */ } + +static void octnet_poll_check_rxq_oom_status(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + struct octeon_device *oct = lio->oct_dev; + struct octeon_droq *droq; + int q, q_no = 0; + + if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + for (q = 0; q < lio->linfo.num_rxpciq; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + droq = oct->droq[q_no]; + if (!droq) + continue; + octeon_droq_check_oom(droq); + } + } + queue_delayed_work(lio->rxq_status_wq.wq, + &lio->rxq_status_wq.wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); +} + +int setup_rx_oom_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status", + WQ_MEM_RECLAIM, 0); + if (!lio->rxq_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); + return -ENOMEM; + } + INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work, + octnet_poll_check_rxq_oom_status); + lio->rxq_status_wq.wk.ctxptr = lio; + queue_delayed_work(lio->rxq_status_wq.wq, + &lio->rxq_status_wq.wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + return 0; +} + +void cleanup_rx_oom_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->rxq_status_wq.wq) { + cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work); + flush_workqueue(lio->rxq_status_wq.wq); + destroy_workqueue(lio->rxq_status_wq.wq); + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 50384cede8be9b84431690074022bbff4bbc9199..579dc7336f58d712c4b04b8c8d1492f48d64ed08 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -33,6 +33,19 @@ static int octnet_get_link_stats(struct net_device *netdev); +struct oct_intrmod_context { + int octeon_id; + wait_queue_head_t wc; + int cond; + int status; +}; + +struct oct_intrmod_resp { + u64 rh; + struct oct_intrmod_cfg intrmod; + u64 status; +}; + struct oct_mdio_cmd_context { int octeon_id; wait_queue_head_t wc; @@ -213,17 +226,23 @@ static int lio_get_link_ksettings(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; - u32 supported, advertising; + u32 supported = 0, advertising = 0; linfo = &lio->linfo; if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ecmd->base.port = PORT_FIBRE; - supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | - SUPPORTED_Pause); - advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); + + if (linfo->link.s.speed == SPEED_10000) { + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; + } + + supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising |= ADVERTISED_Pause; ethtool_convert_legacy_u32_to_link_mode( ecmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode( @@ -1292,95 +1311,101 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset) } } -static int lio_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *intr_coal) +/* Callback function for intrmod */ +static void octnet_intrmod_callback(struct octeon_device *oct_dev, + u32 status, + void *ptr) { - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; - struct octeon_instr_queue *iq; - struct oct_intrmod_cfg *intrmod_cfg; + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_intrmod_context *ctx; - intrmod_cfg = &oct->intrmod; + ctx = (struct oct_intrmod_context *)sc->ctxptr; - switch (oct->chip_id) { - case OCTEON_CN23XX_PF_VID: - case OCTEON_CN23XX_VF_VID: - if (!intrmod_cfg->rx_enable) { - intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs; - intr_coal->rx_max_coalesced_frames = - intrmod_cfg->rx_frames; - } - if (!intrmod_cfg->tx_enable) - intr_coal->tx_max_coalesced_frames = - intrmod_cfg->tx_frames; - break; - case OCTEON_CN68XX: - case OCTEON_CN66XX: { - struct octeon_cn6xxx *cn6xxx = - (struct octeon_cn6xxx *)oct->chip; + ctx->status = status; - if (!intrmod_cfg->rx_enable) { - intr_coal->rx_coalesce_usecs = - CFG_GET_OQ_INTR_TIME(cn6xxx->conf); - intr_coal->rx_max_coalesced_frames = - CFG_GET_OQ_INTR_PKT(cn6xxx->conf); - } - iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; - intr_coal->tx_max_coalesced_frames = iq->fill_threshold; - break; - } - default: - netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + WRITE_ONCE(ctx->cond, 1); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/* get interrupt moderation parameters */ +static int octnet_get_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) +{ + struct octeon_soft_command *sc; + struct oct_intrmod_context *ctx; + struct oct_intrmod_resp *resp; + int retval; + struct octeon_device *oct_dev = lio->oct_dev; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_intrmod_resp), + sizeof(struct oct_intrmod_context)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_intrmod_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_intrmod_resp)); + + ctx = (struct oct_intrmod_context *)sc->ctxptr; + memset(ctx, 0, sizeof(struct oct_intrmod_context)); + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct_dev); + init_waitqueue_head(&ctx->wc); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); + + sc->callback = octnet_intrmod_callback; + sc->callback_arg = sc; + sc->wait_time = 1000; + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); return -EINVAL; } - if (intrmod_cfg->rx_enable) { - intr_coal->use_adaptive_rx_coalesce = - intrmod_cfg->rx_enable; - intr_coal->rate_sample_interval = - intrmod_cfg->check_intrvl; - intr_coal->pkt_rate_high = - intrmod_cfg->maxpkt_ratethr; - intr_coal->pkt_rate_low = - intrmod_cfg->minpkt_ratethr; - intr_coal->rx_max_coalesced_frames_high = - intrmod_cfg->rx_maxcnt_trigger; - intr_coal->rx_coalesce_usecs_high = - intrmod_cfg->rx_maxtmr_trigger; - intr_coal->rx_coalesce_usecs_low = - intrmod_cfg->rx_mintmr_trigger; - intr_coal->rx_max_coalesced_frames_low = - intrmod_cfg->rx_mincnt_trigger; + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { + dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); + goto intrmod_info_wait_intr; } - if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && - (intrmod_cfg->tx_enable)) { - intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable; - intr_coal->tx_max_coalesced_frames_high = - intrmod_cfg->tx_maxcnt_trigger; - intr_coal->tx_max_coalesced_frames_low = - intrmod_cfg->tx_mincnt_trigger; + + retval = ctx->status || resp->status; + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "Get interrupt moderation parameters failed\n"); + goto intrmod_info_wait_fail; } - return 0; -} -/* Callback function for intrmod */ -static void octnet_intrmod_callback(struct octeon_device *oct_dev, - u32 status, - void *ptr) -{ - struct oct_intrmod_cmd *cmd = ptr; - struct octeon_soft_command *sc = cmd->sc; + octeon_swap_8B_data((u64 *)&resp->intrmod, + (sizeof(struct oct_intrmod_cfg)) / 8); + memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); + octeon_free_soft_command(oct_dev, sc); - oct_dev = cmd->oct_dev; + return 0; - if (status) - dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n", - CVM_CAST64(status)); - else - dev_info(&oct_dev->pci_dev->dev, - "Rx-Adaptive Interrupt moderation enabled:%llx\n", - oct_dev->intrmod.rx_enable); +intrmod_info_wait_fail: octeon_free_soft_command(oct_dev, sc); + +intrmod_info_wait_intr: + + return -ENODEV; } /* Configure interrupt moderation parameters */ @@ -1388,7 +1413,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_cmd *cmd; + struct oct_intrmod_context *ctx; struct oct_intrmod_cfg *cfg; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -1398,19 +1423,21 @@ static int octnet_set_intrmod_cfg(struct lio *lio, octeon_alloc_soft_command(oct_dev, sizeof(struct oct_intrmod_cfg), 0, - sizeof(struct oct_intrmod_cmd)); + sizeof(struct oct_intrmod_context)); if (!sc) return -ENOMEM; - cmd = (struct oct_intrmod_cmd *)sc->ctxptr; + ctx = (struct oct_intrmod_context *)sc->ctxptr; + + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct_dev); + init_waitqueue_head(&ctx->wc); + cfg = (struct oct_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); - cmd->sc = sc; - cmd->cfg = cfg; - cmd->oct_dev = oct_dev; sc->iq_no = lio->linfo.txpciq[0].s.q_no; @@ -1418,7 +1445,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); sc->callback = octnet_intrmod_callback; - sc->callback_arg = cmd; + sc->callback_arg = sc; sc->wait_time = 1000; retval = octeon_send_soft_command(oct_dev, sc); @@ -1427,7 +1454,29 @@ static int octnet_set_intrmod_cfg(struct lio *lio, return -EINVAL; } - return 0; + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { + retval = ctx->status; + if (retval) + dev_err(&oct_dev->pci_dev->dev, + "intrmod config failed. Status: %llx\n", + CVM_CAST64(retval)); + else + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation %s\n", + (intr_cfg->rx_enable) ? + "enabled" : "disabled"); + + octeon_free_soft_command(oct_dev, sc); + + return ((retval) ? -ENODEV : 0); + } + + dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); + + return -EINTR; } static void @@ -1584,80 +1633,106 @@ static int octnet_get_link_stats(struct net_device *netdev) return 0; } -/* Enable/Disable auto interrupt Moderation */ -static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce - *intr_coal) +static int lio_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *intr_coal) { - int ret = 0; + struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct oct_intrmod_cfg *intrmod_cfg; - - intrmod_cfg = &oct->intrmod; - - if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) { - if (intr_coal->rate_sample_interval) - intrmod_cfg->check_intrvl = - intr_coal->rate_sample_interval; - else - intrmod_cfg->check_intrvl = - LIO_INTRMOD_CHECK_INTERVAL; + struct octeon_instr_queue *iq; + struct oct_intrmod_cfg intrmod_cfg; - if (intr_coal->pkt_rate_high) - intrmod_cfg->maxpkt_ratethr = - intr_coal->pkt_rate_high; - else - intrmod_cfg->maxpkt_ratethr = - LIO_INTRMOD_MAXPKT_RATETHR; + if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) + return -ENODEV; - if (intr_coal->pkt_rate_low) - intrmod_cfg->minpkt_ratethr = - intr_coal->pkt_rate_low; - else - intrmod_cfg->minpkt_ratethr = - LIO_INTRMOD_MINPKT_RATETHR; + switch (oct->chip_id) { + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: { + if (!intrmod_cfg.rx_enable) { + intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; + intr_coal->rx_max_coalesced_frames = + oct->rx_max_coalesced_frames; + } + if (!intrmod_cfg.tx_enable) + intr_coal->tx_max_coalesced_frames = + oct->tx_max_coalesced_frames; + break; } - if (oct->intrmod.rx_enable) { - if (intr_coal->rx_max_coalesced_frames_high) - intrmod_cfg->rx_maxcnt_trigger = - intr_coal->rx_max_coalesced_frames_high; - else - intrmod_cfg->rx_maxcnt_trigger = - LIO_INTRMOD_RXMAXCNT_TRIGGER; + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; - if (intr_coal->rx_coalesce_usecs_high) - intrmod_cfg->rx_maxtmr_trigger = - intr_coal->rx_coalesce_usecs_high; - else - intrmod_cfg->rx_maxtmr_trigger = - LIO_INTRMOD_RXMAXTMR_TRIGGER; + if (!intrmod_cfg.rx_enable) { + intr_coal->rx_coalesce_usecs = + CFG_GET_OQ_INTR_TIME(cn6xxx->conf); + intr_coal->rx_max_coalesced_frames = + CFG_GET_OQ_INTR_PKT(cn6xxx->conf); + } + iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; + intr_coal->tx_max_coalesced_frames = iq->fill_threshold; + break; + } + default: + netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + return -EINVAL; + } + if (intrmod_cfg.rx_enable) { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg.rx_enable; + intr_coal->rate_sample_interval = + intrmod_cfg.check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg.maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg.minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg.rx_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg.rx_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg.rx_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg.rx_mincnt_trigger; + } + if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && + (intrmod_cfg.tx_enable)) { + intr_coal->use_adaptive_tx_coalesce = + intrmod_cfg.tx_enable; + intr_coal->tx_max_coalesced_frames_high = + intrmod_cfg.tx_maxcnt_trigger; + intr_coal->tx_max_coalesced_frames_low = + intrmod_cfg.tx_mincnt_trigger; + } + return 0; +} - if (intr_coal->rx_coalesce_usecs_low) - intrmod_cfg->rx_mintmr_trigger = - intr_coal->rx_coalesce_usecs_low; - else - intrmod_cfg->rx_mintmr_trigger = - LIO_INTRMOD_RXMINTMR_TRIGGER; +/* Enable/Disable auto interrupt Moderation */ +static int oct_cfg_adaptive_intr(struct lio *lio, + struct oct_intrmod_cfg *intrmod_cfg, + struct ethtool_coalesce *intr_coal) +{ + int ret = 0; - if (intr_coal->rx_max_coalesced_frames_low) - intrmod_cfg->rx_mincnt_trigger = - intr_coal->rx_max_coalesced_frames_low; - else - intrmod_cfg->rx_mincnt_trigger = - LIO_INTRMOD_RXMINCNT_TRIGGER; + if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { + intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; + intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; + intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; } - if (oct->intrmod.tx_enable) { - if (intr_coal->tx_max_coalesced_frames_high) - intrmod_cfg->tx_maxcnt_trigger = - intr_coal->tx_max_coalesced_frames_high; - else - intrmod_cfg->tx_maxcnt_trigger = - LIO_INTRMOD_TXMAXCNT_TRIGGER; - if (intr_coal->tx_max_coalesced_frames_low) - intrmod_cfg->tx_mincnt_trigger = - intr_coal->tx_max_coalesced_frames_low; - else - intrmod_cfg->tx_mincnt_trigger = - LIO_INTRMOD_TXMINCNT_TRIGGER; + if (intrmod_cfg->rx_enable) { + intrmod_cfg->rx_maxcnt_trigger = + intr_coal->rx_max_coalesced_frames_high; + intrmod_cfg->rx_maxtmr_trigger = + intr_coal->rx_coalesce_usecs_high; + intrmod_cfg->rx_mintmr_trigger = + intr_coal->rx_coalesce_usecs_low; + intrmod_cfg->rx_mincnt_trigger = + intr_coal->rx_max_coalesced_frames_low; + } + if (intrmod_cfg->tx_enable) { + intrmod_cfg->tx_maxcnt_trigger = + intr_coal->tx_max_coalesced_frames_high; + intrmod_cfg->tx_mincnt_trigger = + intr_coal->tx_max_coalesced_frames_low; } ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); @@ -1666,7 +1741,9 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce } static int -oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) +oct_cfg_rx_intrcnt(struct lio *lio, + struct oct_intrmod_cfg *intrmod, + struct ethtool_coalesce *intr_coal) { struct octeon_device *oct = lio->oct_dev; u32 rx_max_coalesced_frames; @@ -1692,7 +1769,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) int q_no; if (!intr_coal->rx_max_coalesced_frames) - rx_max_coalesced_frames = oct->intrmod.rx_frames; + rx_max_coalesced_frames = intrmod->rx_frames; else rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; @@ -1703,17 +1780,18 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) (octeon_read_csr64( oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & (0x3fffff00000000UL)) | - rx_max_coalesced_frames); + (rx_max_coalesced_frames - 1)); /*consider setting resend bit*/ } - oct->intrmod.rx_frames = rx_max_coalesced_frames; + intrmod->rx_frames = rx_max_coalesced_frames; + oct->rx_max_coalesced_frames = rx_max_coalesced_frames; break; } case OCTEON_CN23XX_VF_VID: { int q_no; if (!intr_coal->rx_max_coalesced_frames) - rx_max_coalesced_frames = oct->intrmod.rx_frames; + rx_max_coalesced_frames = intrmod->rx_frames; else rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; @@ -1724,9 +1802,10 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & (0x3fffff00000000UL)) | rx_max_coalesced_frames); - /* consider writing to resend bit here */ + /*consider writing to resend bit here*/ } - oct->intrmod.rx_frames = rx_max_coalesced_frames; + intrmod->rx_frames = rx_max_coalesced_frames; + oct->rx_max_coalesced_frames = rx_max_coalesced_frames; break; } default: @@ -1736,6 +1815,7 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) } static int oct_cfg_rx_intrtime(struct lio *lio, + struct oct_intrmod_cfg *intrmod, struct ethtool_coalesce *intr_coal) { struct octeon_device *oct = lio->oct_dev; @@ -1766,7 +1846,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio, int q_no; if (!intr_coal->rx_coalesce_usecs) - rx_coalesce_usecs = oct->intrmod.rx_usecs; + rx_coalesce_usecs = intrmod->rx_usecs; else rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; time_threshold = @@ -1775,11 +1855,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio, q_no += oct->sriov_info.pf_srn; octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), - (oct->intrmod.rx_frames | - (time_threshold << 32))); + (intrmod->rx_frames | + ((u64)time_threshold << 32))); /*consider writing to resend bit here*/ } - oct->intrmod.rx_usecs = rx_coalesce_usecs; + intrmod->rx_usecs = rx_coalesce_usecs; + oct->rx_coalesce_usecs = rx_coalesce_usecs; break; } case OCTEON_CN23XX_VF_VID: { @@ -1787,7 +1868,7 @@ static int oct_cfg_rx_intrtime(struct lio *lio, int q_no; if (!intr_coal->rx_coalesce_usecs) - rx_coalesce_usecs = oct->intrmod.rx_usecs; + rx_coalesce_usecs = intrmod->rx_usecs; else rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; @@ -1796,11 +1877,12 @@ static int oct_cfg_rx_intrtime(struct lio *lio, for (q_no = 0; q_no < oct->num_oqs; q_no++) { octeon_write_csr64( oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), - (oct->intrmod.rx_frames | - (time_threshold << 32))); - /* consider setting resend bit */ + (intrmod->rx_frames | + ((u64)time_threshold << 32))); + /*consider setting resend bit*/ } - oct->intrmod.rx_usecs = rx_coalesce_usecs; + intrmod->rx_usecs = rx_coalesce_usecs; + oct->rx_coalesce_usecs = rx_coalesce_usecs; break; } default: @@ -1811,8 +1893,9 @@ static int oct_cfg_rx_intrtime(struct lio *lio, } static int -oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal - __attribute__((unused))) +oct_cfg_tx_intrcnt(struct lio *lio, + struct oct_intrmod_cfg *intrmod, + struct ethtool_coalesce *intr_coal) { struct octeon_device *oct = lio->oct_dev; u32 iq_intr_pkt; @@ -1839,12 +1922,13 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal val = readq(inst_cnt_reg); /*clear wmark and count.dont want to write count back*/ val = (val & 0xFFFF000000000000ULL) | - ((u64)iq_intr_pkt + ((u64)(iq_intr_pkt - 1) << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); writeq(val, inst_cnt_reg); /*consider setting resend bit*/ } - oct->intrmod.tx_frames = iq_intr_pkt; + intrmod->tx_frames = iq_intr_pkt; + oct->tx_max_coalesced_frames = iq_intr_pkt; break; } default: @@ -1859,6 +1943,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); int ret; struct octeon_device *oct = lio->oct_dev; + struct oct_intrmod_cfg intrmod = {0}; u32 j, q_no; int db_max, db_min; @@ -1877,8 +1962,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev, } else { dev_err(&oct->pci_dev->dev, "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", - intr_coal->tx_max_coalesced_frames, db_min, - db_max); + intr_coal->tx_max_coalesced_frames, + db_min, db_max); return -EINVAL; } break; @@ -1889,24 +1974,36 @@ static int lio_set_intr_coalesce(struct net_device *netdev, return -EINVAL; } - oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; - oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; + intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; + intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; + intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); + intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); - ret = oct_cfg_adaptive_intr(lio, intr_coal); + ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); if (!intr_coal->use_adaptive_rx_coalesce) { - ret = oct_cfg_rx_intrtime(lio, intr_coal); + ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); if (ret) goto ret_intrmod; - ret = oct_cfg_rx_intrcnt(lio, intr_coal); + ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); if (ret) goto ret_intrmod; + } else { + oct->rx_coalesce_usecs = + CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); + oct->rx_max_coalesced_frames = + CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); } + if (!intr_coal->use_adaptive_tx_coalesce) { - ret = oct_cfg_tx_intrcnt(lio, intr_coal); + ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); if (ret) goto ret_intrmod; + } else { + oct->tx_max_coalesced_frames = + CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); } return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 92f46b1375c32527b29e24a4476d6b455835bd46..927617cbf6a91a4c8738ffcb161eed0d61e9483d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -16,6 +16,7 @@ * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ #include +#include #include #include #include @@ -60,12 +61,6 @@ MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); static int ptp_enable = 1; -/* Bit mask values for lio->ifstate */ -#define LIO_IFSTATE_DROQ_OPS 0x01 -#define LIO_IFSTATE_REGISTERED 0x02 -#define LIO_IFSTATE_RUNNING 0x04 -#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 - /* Polling interval for determining when NIC application is alive */ #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 @@ -178,6 +173,8 @@ static int liquidio_stop(struct net_device *netdev); static void liquidio_remove(struct pci_dev *pdev); static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, + int linkstate); static struct handshake handshake[MAX_OCTEON_DEVICES]; static struct completion first_stage; @@ -530,36 +527,6 @@ static void liquidio_deinit_pci(void) pci_unregister_driver(&liquidio_pci_driver); } -/** - * \brief check interface state - * @param lio per-network private data - * @param state_flag flag state to check - */ -static inline int ifstate_check(struct lio *lio, int state_flag) -{ - return atomic_read(&lio->ifstate) & state_flag; -} - -/** - * \brief set interface state - * @param lio per-network private data - * @param state_flag flag state to set - */ -static inline void ifstate_set(struct lio *lio, int state_flag) -{ - atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); -} - -/** - * \brief clear interface state - * @param lio per-network private data - * @param state_flag flag state to clear - */ -static inline void ifstate_reset(struct lio *lio, int state_flag) -{ - atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); -} - /** * \brief Stop Tx queues * @param netdev network device @@ -748,7 +715,8 @@ static void delete_glists(struct lio *lio) kfree(g); } while (g); - if (lio->glists_virt_base && lio->glists_virt_base[i]) { + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { lio_dma_free(lio->oct_dev, lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i], @@ -805,7 +773,7 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) } for (i = 0; i < num_iqs; i++) { - int numa_node = cpu_to_node(i % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); spin_lock_init(&lio->glist_lock[i]); @@ -967,14 +935,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num) INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); netif_wake_subqueue(netdev, iq->q_index); - } else { - if (!octnet_iq_is_full(oct, lio->txq)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, - tx_restart, 1); - wake_q(netdev, lio->txq); - } } + } else if (netif_queue_stopped(netdev) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, tx_restart, 1); + netif_wake_queue(netdev); } } @@ -1084,16 +1051,35 @@ static int octeon_setup_interrupt(struct octeon_device *oct) int i; int num_ioq_vectors; int num_alloc_ioq_vectors; + char *queue_irq_names = NULL; + char *aux_irq_name = NULL; if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { oct->num_msix_irqs = oct->sriov_info.num_pf_rings; /* one non ioq interrupt for handling sli_mac_pf_int_sum */ oct->num_msix_irqs += 1; + /* allocate storage for the names assigned to each irq */ + oct->irq_name_storage = + kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ, + GFP_KERNEL); + if (!oct->irq_name_storage) { + dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); + return -ENOMEM; + } + + queue_irq_names = oct->irq_name_storage; + aux_irq_name = &queue_irq_names + [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; + oct->msix_entries = kcalloc( oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) - return 1; + if (!oct->msix_entries) { + dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return -ENOMEM; + } msix_entries = (struct msix_entry *)oct->msix_entries; /*Assumption is that pf msix vectors start from pf srn to pf to @@ -1111,7 +1097,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct) dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); kfree(oct->msix_entries); oct->msix_entries = NULL; - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return num_alloc_ioq_vectors; } dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); @@ -1119,9 +1107,12 @@ static int octeon_setup_interrupt(struct octeon_device *oct) /** For PF, there is one non-ioq interrupt handler */ num_ioq_vectors -= 1; + + snprintf(aux_irq_name, INTRNAMSIZ, + "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num); irqret = request_irq(msix_entries[num_ioq_vectors].vector, - liquidio_legacy_intr_handler, 0, "octeon", - oct); + liquidio_legacy_intr_handler, 0, + aux_irq_name, oct); if (irqret) { dev_err(&oct->pci_dev->dev, "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", @@ -1129,13 +1120,20 @@ static int octeon_setup_interrupt(struct octeon_device *oct) pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); oct->msix_entries = NULL; - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; } for (i = 0; i < num_ioq_vectors; i++) { + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, + "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, i); + irqret = request_irq(msix_entries[i].vector, liquidio_msix_intr_handler, 0, - "octeon", &oct->ioq_vector[i]); + &queue_irq_names[IRQ_NAME_OFF(i)], + &oct->ioq_vector[i]); if (irqret) { dev_err(&oct->pci_dev->dev, "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", @@ -1155,7 +1153,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct) pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); oct->msix_entries = NULL; - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; } oct->ioq_vector[i].vector = msix_entries[i].vector; /* assign the cpu mask for this msix interrupt vector */ @@ -1173,111 +1173,150 @@ static int octeon_setup_interrupt(struct octeon_device *oct) else oct->flags |= LIO_FLAG_MSI_ENABLED; + /* allocate storage for the names assigned to the irq */ + oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) + return -ENOMEM; + + queue_irq_names = oct->irq_name_storage; + + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, 0); + irqret = request_irq(oct->pci_dev->irq, - liquidio_legacy_intr_handler, IRQF_SHARED, - "octeon", oct); + liquidio_legacy_intr_handler, + IRQF_SHARED, + &queue_irq_names[IRQ_NAME_OFF(0)], oct); if (irqret) { if (oct->flags & LIO_FLAG_MSI_ENABLED) pci_disable_msi(oct->pci_dev); dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", irqret); - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; } } return 0; } +static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) +{ + struct octeon_device *other_oct; + + other_oct = lio_get_device(oct->octeon_id + 1); + + if (other_oct && other_oct->pci_dev) { + int oct_busnum, other_oct_busnum; + + oct_busnum = oct->pci_dev->bus->number; + other_oct_busnum = other_oct->pci_dev->bus->number; + + if (oct_busnum == other_oct_busnum) { + int oct_slot, other_oct_slot; + + oct_slot = PCI_SLOT(oct->pci_dev->devfn); + other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); + + if (oct_slot == other_oct_slot) + return other_oct; + } + } + + return NULL; +} + +static void disable_all_vf_links(struct octeon_device *oct) +{ + struct net_device *netdev; + int max_vfs, vf, i; + + if (!oct) + return; + + max_vfs = oct->sriov_info.max_vfs; + + for (i = 0; i < oct->ifcount; i++) { + netdev = oct->props[i].netdev; + if (!netdev) + continue; + + for (vf = 0; vf < max_vfs; vf++) + liquidio_set_vf_link_state(netdev, vf, + IFLA_VF_LINK_STATE_DISABLE); + } +} + static int liquidio_watchdog(void *param) { - u64 wdog; - u16 mask_of_stuck_cores = 0; - u16 mask_of_crashed_cores = 0; - int core_num; - u8 core_is_stuck[LIO_MAX_CORES]; - u8 core_crashed[LIO_MAX_CORES]; + bool err_msg_was_printed[LIO_MAX_CORES]; + u16 mask_of_crashed_or_stuck_cores = 0; + bool all_vf_links_are_disabled = false; struct octeon_device *oct = param; + struct octeon_device *other_oct; +#ifdef CONFIG_MODULE_UNLOAD + long refcount, vfs_referencing_pf; + u64 vfs_mask1, vfs_mask2; +#endif + int core; - memset(core_is_stuck, 0, sizeof(core_is_stuck)); - memset(core_crashed, 0, sizeof(core_crashed)); + memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); while (!kthread_should_stop()) { - mask_of_crashed_cores = + /* sleep for a couple of seconds so that we don't hog the CPU */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(2000)); + + mask_of_crashed_or_stuck_cores = (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); - for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) { - if (!core_is_stuck[core_num]) { - wdog = lio_pci_readq(oct, CIU3_WDOG(core_num)); - - /* look at watchdog state field */ - wdog &= CIU3_WDOG_MASK; - if (wdog) { - /* this watchdog timer has expired */ - core_is_stuck[core_num] = - LIO_MONITOR_WDOG_EXPIRE; - mask_of_stuck_cores |= (1 << core_num); - } - } + if (!mask_of_crashed_or_stuck_cores) + continue; - if (!core_crashed[core_num]) - core_crashed[core_num] = - (mask_of_crashed_cores >> core_num) & 1; - } + WRITE_ONCE(oct->cores_crashed, true); + other_oct = get_other_octeon_device(oct); + if (other_oct) + WRITE_ONCE(other_oct->cores_crashed, true); - if (mask_of_stuck_cores) { - for (core_num = 0; core_num < LIO_MAX_CORES; - core_num++) { - if (core_is_stuck[core_num] == 1) { - dev_err(&oct->pci_dev->dev, - "ERROR: Octeon core %d is stuck!\n", - core_num); - /* 2 means we have printk'd an error - * so no need to repeat the same printk - */ - core_is_stuck[core_num] = - LIO_MONITOR_CORE_STUCK_MSGD; - } - } - } + for (core = 0; core < LIO_MAX_CORES; core++) { + bool core_crashed_or_got_stuck; - if (mask_of_crashed_cores) { - for (core_num = 0; core_num < LIO_MAX_CORES; - core_num++) { - if (core_crashed[core_num] == 1) { - dev_err(&oct->pci_dev->dev, - "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n", - core_num); - /* 2 means we have printk'd an error - * so no need to repeat the same printk - */ - core_crashed[core_num] = - LIO_MONITOR_CORE_STUCK_MSGD; - } + core_crashed_or_got_stuck = + (mask_of_crashed_or_stuck_cores + >> core) & 1; + + if (core_crashed_or_got_stuck && + !err_msg_was_printed[core]) { + dev_err(&oct->pci_dev->dev, + "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", + core); + err_msg_was_printed[core] = true; } } + + if (all_vf_links_are_disabled) + continue; + + disable_all_vf_links(oct); + disable_all_vf_links(other_oct); + all_vf_links_are_disabled = true; + #ifdef CONFIG_MODULE_UNLOAD - if (mask_of_stuck_cores || mask_of_crashed_cores) { - /* make module refcount=0 so that rmmod will work */ - long refcount; + vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); + vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); - refcount = module_refcount(THIS_MODULE); + vfs_referencing_pf = hweight64(vfs_mask1); + vfs_referencing_pf += hweight64(vfs_mask2); - while (refcount > 0) { + refcount = module_refcount(THIS_MODULE); + if (refcount >= vfs_referencing_pf) { + while (vfs_referencing_pf) { module_put(THIS_MODULE); - refcount = module_refcount(THIS_MODULE); - } - - /* compensate for and withstand an unlikely (but still - * possible) race condition - */ - while (refcount < 0) { - try_module_get(THIS_MODULE); - refcount = module_refcount(THIS_MODULE); + vfs_referencing_pf--; } } #endif - /* sleep for two seconds */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(2 * HZ); } return 0; @@ -1369,6 +1408,12 @@ liquidio_probe(struct pci_dev *pdev, return 0; } +static bool fw_type_is_none(void) +{ + return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, + sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; +} + /** *\brief Destroy resources associated with octeon device * @param pdev PCI device structure @@ -1449,6 +1494,9 @@ static void octeon_destroy_resources(struct octeon_device *oct) pci_disable_msi(oct->pci_dev); } + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + /* fallthrough */ case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: if (OCTEON_CN23XX_PF(oct)) @@ -1508,9 +1556,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_PCI_MAP_DONE: - /* Soft reset the octeon device before exiting */ - if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id) - oct->fn_list.soft_reset(oct); + if (!fw_type_is_none()) { + /* Soft reset the octeon device before exiting */ + if (!OCTEON_CN23XX_PF(oct) || + (OCTEON_CN23XX_PF(oct) && !oct->octeon_id)) + oct->fn_list.soft_reset(oct); + } octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); @@ -1643,6 +1694,15 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) liquidio_stop(netdev); + if (fw_type_is_none()) { + struct octnic_ctrl_pkt nctrl; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + octnet_send_nic_ctrl_pkt(oct, &nctrl); + } + if (oct->props[lio->ifidx].napi_enabled == 1) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1658,6 +1718,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_link_status_change_wq(netdev); + cleanup_rx_oom_poll_fn(netdev); + delete_glists(lio); free_netdev(netdev); @@ -2126,8 +2188,7 @@ static int load_firmware(struct octeon_device *oct) char fw_name[LIO_MAX_FW_FILENAME_LEN]; char *tmp_fw_type; - if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, - sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) { + if (fw_type_is_none()) { dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); return ret; } @@ -2211,8 +2272,8 @@ static void if_cfg_callback(struct octeon_device *oct, oct = lio_get_device(ctx->octeon_id); if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n", + CVM_CAST64(resp->status), status); WRITE_ONCE(ctx->cond, 1); snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", @@ -2437,8 +2498,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Flush the instruction queue */ iq = oct->instr_queue[iq_no]; if (iq) { - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -2555,6 +2619,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, __func__); return 1; } + + if (octeon_dev->ioq_vector) { + struct octeon_ioq_vector *ioq_vector; + + ioq_vector = &octeon_dev->ioq_vector[q]; + netif_set_xps_queue(netdev, + &ioq_vector->affinity_mask, + ioq_vector->iq_index); + } } return 0; @@ -3426,6 +3499,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, struct octnic_ctrl_pkt nctrl; int ret = 0; + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; @@ -3459,6 +3534,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, struct octnic_ctrl_pkt nctrl; int ret = 0; + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.more = vxlan_cmd_bit; @@ -3596,7 +3673,8 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; nctrl.wait_time = LIO_CMD_WAIT_TM; nctrl.udd[0] = 0; @@ -4122,6 +4200,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (setup_link_status_change_wq(netdev)) goto setup_nic_dev_fail; + if (setup_rx_oom_poll_fn(netdev)) + goto setup_nic_dev_fail; + /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); @@ -4271,7 +4352,6 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) */ static int liquidio_init_nic_module(struct octeon_device *oct) { - struct oct_intrmod_cfg *intrmod_cfg; int i, retval = 0; int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); @@ -4296,22 +4376,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct) liquidio_ptp_init(oct); - /* Initialize interrupt moderation params */ - intrmod_cfg = &((struct octeon_device *)oct)->intrmod; - intrmod_cfg->rx_enable = 1; - intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; - intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; - intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; - intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; - intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; - intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; - intrmod_cfg->tx_enable = 1; - intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; - intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; - intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); - intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); - intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); return retval; @@ -4373,6 +4437,7 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) struct octeon_device *oct = (struct octeon_device *)buf; struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; int i, notice, vf_idx; + bool cores_crashed; u64 *data, vf_num; notice = recv_pkt->rh.r.ossp; @@ -4383,19 +4448,23 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) octeon_swap_8B_data(&vf_num, 1); vf_idx = (int)vf_num - 1; + cores_crashed = READ_ONCE(oct->cores_crashed); + if (notice == VF_DRV_LOADED) { if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); dev_info(&oct->pci_dev->dev, "driver for VF%d was loaded\n", vf_idx); - try_module_get(THIS_MODULE); + if (!cores_crashed) + try_module_get(THIS_MODULE); } } else if (notice == VF_DRV_REMOVED) { if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); dev_info(&oct->pci_dev->dev, "driver for VF%d was removed\n", vf_idx); - module_put(THIS_MODULE); + if (!cores_crashed) + module_put(THIS_MODULE); } } else if (notice == VF_DRV_MACADDR_CHANGED) { u8 *b = (u8 *)&data[1]; @@ -4447,14 +4516,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev) if (OCTEON_CN23XX_PF(octeon_dev)) { if (!cn23xx_fw_loaded(octeon_dev)) { fw_loaded = 0; - /* Do a soft reset of the Octeon device. */ - if (octeon_dev->fn_list.soft_reset(octeon_dev)) - return 1; - /* things might have changed */ - if (!cn23xx_fw_loaded(octeon_dev)) - fw_loaded = 0; - else - fw_loaded = 1; + if (!fw_type_is_none()) { + /* Do a soft reset of the Octeon device. */ + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + /* things might have changed */ + if (!cn23xx_fw_loaded(octeon_dev)) + fw_loaded = 0; + else + fw_loaded = 1; + } } else { fw_loaded = 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 7b83be4ce1fe0ce5cab0c7ff889edbf334a5a065..34c77821fad9f2a49d49841bef2b0c601ca402aa 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -16,6 +16,7 @@ * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ #include +#include #include #include #include "liquidio_common.h" @@ -39,12 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -/* Bit mask values for lio->ifstate */ -#define LIO_IFSTATE_DROQ_OPS 0x01 -#define LIO_IFSTATE_REGISTERED 0x02 -#define LIO_IFSTATE_RUNNING 0x04 -#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 - struct liquidio_if_cfg_context { int octeon_id; @@ -335,36 +330,6 @@ static struct pci_driver liquidio_vf_pci_driver = { .err_handler = &liquidio_vf_err_handler, /* For AER */ }; -/** - * \brief check interface state - * @param lio per-network private data - * @param state_flag flag state to check - */ -static int ifstate_check(struct lio *lio, int state_flag) -{ - return atomic_read(&lio->ifstate) & state_flag; -} - -/** - * \brief set interface state - * @param lio per-network private data - * @param state_flag flag state to set - */ -static void ifstate_set(struct lio *lio, int state_flag) -{ - atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); -} - -/** - * \brief clear interface state - * @param lio per-network private data - * @param state_flag flag state to clear - */ -static void ifstate_reset(struct lio *lio, int state_flag) -{ - atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); -} - /** * \brief Stop Tx queues * @param netdev network device @@ -506,7 +471,8 @@ static void delete_glists(struct lio *lio) kfree(g); } while (g); - if (lio->glists_virt_base && lio->glists_virt_base[i]) { + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { lio_dma_free(lio->oct_dev, lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i], @@ -722,13 +688,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num) netif_wake_subqueue(netdev, iq->q_index); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); - } else { - if (!octnet_iq_is_full(oct, lio->txq)) { - INCR_INSTRQUEUE_PKT_COUNT( - lio->oct_dev, lio->txq, tx_restart, 1); - wake_q(netdev, lio->txq); - } } + } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, tx_restart, 1); + netif_wake_queue(netdev); } } @@ -780,6 +745,7 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) static int octeon_setup_interrupt(struct octeon_device *oct) { struct msix_entry *msix_entries; + char *queue_irq_names = NULL; int num_alloc_ioq_vectors; int num_ioq_vectors; int irqret; @@ -788,10 +754,25 @@ static int octeon_setup_interrupt(struct octeon_device *oct) if (oct->msix_on) { oct->num_msix_irqs = oct->sriov_info.rings_per_vf; + /* allocate storage for the names assigned to each irq */ + oct->irq_name_storage = + kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ, + GFP_KERNEL); + if (!oct->irq_name_storage) { + dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); + return -ENOMEM; + } + + queue_irq_names = oct->irq_name_storage; + oct->msix_entries = kcalloc( oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) - return 1; + if (!oct->msix_entries) { + dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return -ENOMEM; + } msix_entries = (struct msix_entry *)oct->msix_entries; @@ -805,16 +786,23 @@ static int octeon_setup_interrupt(struct octeon_device *oct) dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); kfree(oct->msix_entries); oct->msix_entries = NULL; - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return num_alloc_ioq_vectors; } dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); num_ioq_vectors = oct->num_msix_irqs; for (i = 0; i < num_ioq_vectors; i++) { + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, + "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, i); + irqret = request_irq(msix_entries[i].vector, liquidio_msix_intr_handler, 0, - "octeon", &oct->ioq_vector[i]); + &queue_irq_names[IRQ_NAME_OFF(i)], + &oct->ioq_vector[i]); if (irqret) { dev_err(&oct->pci_dev->dev, "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", @@ -830,7 +818,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct) pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); oct->msix_entries = NULL; - return 1; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; } oct->ioq_vector[i].vector = msix_entries[i].vector; /* assign the cpu mask for this msix interrupt vector */ @@ -975,6 +965,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); oct->msix_entries = NULL; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; } /* Soft reset the octeon device before exiting */ if (oct->pci_dev->reset_fn) @@ -1163,6 +1155,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); + cleanup_rx_oom_poll_fn(netdev); + cleanup_link_status_change_wq(netdev); delete_glists(lio); @@ -1642,8 +1636,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Flush the instruction queue */ iq = oct->instr_queue[iq_no]; if (iq) { - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; + /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -2486,6 +2484,8 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + struct completion compl; + u16 response_code; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -2497,14 +2497,25 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + init_completion(&compl); + nctrl.completion = &compl; + nctrl.response_code = &response_code; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); + return -EIO; } - return ret; + if (!wait_for_completion_timeout(&compl, + msecs_to_jiffies(nctrl.wait_time))) + return -EPERM; + + if (READ_ONCE(response_code)) + return -EPERM; + + return 0; } static int @@ -2549,6 +2560,8 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, struct octnic_ctrl_pkt nctrl; int ret = 0; + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; @@ -2581,6 +2594,8 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, struct octnic_ctrl_pkt nctrl; int ret = 0; + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.more = vxlan_cmd_bit; @@ -3003,6 +3018,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (setup_link_status_change_wq(netdev)) goto setup_nic_dev_fail; + if (setup_rx_oom_poll_fn(netdev)) + goto setup_nic_dev_fail; + /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); @@ -3057,7 +3075,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) */ static int liquidio_init_nic_module(struct octeon_device *oct) { - struct oct_intrmod_cfg *intrmod_cfg; int num_nic_ports = 1; int i, retval = 0; @@ -3079,22 +3096,6 @@ static int liquidio_init_nic_module(struct octeon_device *oct) goto octnet_init_failure; } - /* Initialize interrupt moderation params */ - intrmod_cfg = &((struct octeon_device *)oct)->intrmod; - intrmod_cfg->rx_enable = 1; - intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; - intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; - intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; - intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; - intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; - intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; - intrmod_cfg->tx_enable = 1; - intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; - intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; - intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); - intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); - intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); return retval; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 294c6f3c6b48254044c610c78625c9c3c86e9b1f..8ea2323d8d676a0b96b5653b3fba966bfd5dcd5d 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -27,7 +27,7 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 -#define LIQUIDIO_BASE_MINOR_VERSION 4 +#define LIQUIDIO_BASE_MINOR_VERSION 5 #define LIQUIDIO_BASE_MICRO_VERSION 1 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) @@ -83,6 +83,7 @@ enum octeon_tag_type { #define OPCODE_NIC_INTRMOD_CFG 0x08 #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A +#define OPCODE_NIC_INTRMOD_PARAMS 0x0B #define VF_DRV_LOADED 1 #define VF_DRV_REMOVED -1 #define VF_DRV_MACADDR_CHANGED 2 @@ -100,6 +101,11 @@ enum octeon_tag_type { #define BYTES_PER_DHLEN_UNIT 8 #define MAX_REG_CNT 2000000U +#define INTRNAMSIZ 32 +#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ) +#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2) +#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2) + static inline u32 incr_index(u32 index, u32 count, u32 max) { @@ -181,6 +187,7 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_Q 0 /* NIC Command types */ +#define OCTNET_CMD_RESET_PF 0x0 #define OCTNET_CMD_CHANGE_MTU 0x1 #define OCTNET_CMD_CHANGE_MACADDR 0x2 #define OCTNET_CMD_CHANGE_DEVFLAGS 0x3 @@ -845,29 +852,6 @@ struct oct_mdio_cmd { #define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) -/* intrmod: max. packet rate threshold */ -#define LIO_INTRMOD_MAXPKT_RATETHR 196608 -/* intrmod: min. packet rate threshold */ -#define LIO_INTRMOD_MINPKT_RATETHR 9216 -/* intrmod: max. packets to trigger interrupt */ -#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384 -/* intrmod: min. packets to trigger interrupt */ -#define LIO_INTRMOD_RXMINCNT_TRIGGER 0 -/* intrmod: max. time to trigger interrupt */ -#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128 -/* 66xx:intrmod: min. time to trigger interrupt - * (value of 1 is optimum for TCP_RR) - */ -#define LIO_INTRMOD_RXMINTMR_TRIGGER 1 - -/* intrmod: max. packets to trigger interrupt */ -#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64 -/* intrmod: min. packets to trigger interrupt */ -#define LIO_INTRMOD_TXMINCNT_TRIGGER 0 - -/* intrmod: poll interval in seconds */ -#define LIO_INTRMOD_CHECK_INTERVAL 1 - struct oct_intrmod_cfg { u64 rx_enable; u64 tx_enable; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 9675ffbf25e6bd9bf34d346204f1b0fbcbcfa185..e21b477d0159f1e17570259e2db3bcb155378728 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -793,7 +793,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct) u32 num_descs = 0; u32 iq_no = 0; union oct_txpciq txpciq; - int numa_node = cpu_to_node(iq_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); if (OCTEON_CN6XXX(oct)) num_descs = @@ -837,7 +837,7 @@ int octeon_setup_output_queues(struct octeon_device *oct) u32 num_descs = 0; u32 desc_size = 0; u32 oq_no = 0; - int numa_node = cpu_to_node(oq_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); if (OCTEON_CN6XXX(oct)) { num_descs = diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index c301a3852482845ee65bf260c48dbd69853e9522..92f67de111aa0ac942a92fbff2120141e10f32b8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -453,9 +453,6 @@ struct octeon_device { /** List of dispatch functions */ struct octeon_dispatch_list dispatch; - /* Interrupt Moderation */ - struct oct_intrmod_cfg intrmod; - u32 int_status; u64 droq_intr; @@ -517,6 +514,9 @@ struct octeon_device { void *msix_entries; + /* when requesting IRQs, the names are stored here */ + void *irq_name_storage; + struct octeon_sriov_info sriov_info; struct octeon_pf_vf_hs_word pfvf_hsword; @@ -538,6 +538,12 @@ struct octeon_device { u32 priv_flags; void *watchdog_task; + + u32 rx_coalesce_usecs; + u32 rx_max_coalesced_frames; + u32 tx_max_coalesced_frames; + + bool cores_crashed; }; #define OCT_DRV_ONLINE 1 @@ -551,12 +557,6 @@ struct octeon_device { #define CHIP_CONF(oct, TYPE) \ (((struct octeon_ ## TYPE *)((oct)->chip))->conf) -struct oct_intrmod_cmd { - struct octeon_device *oct_dev; - struct octeon_soft_command *sc; - struct oct_intrmod_cfg *cfg; -}; - /*------------------ Function Prototypes ----------------------*/ /** Initialize device list memory */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 79f809479af6e7d865cc7c280c84232622af982e..286be5539cef707c9464f1c4488cdd6134e9b343 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -226,8 +226,7 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_droq *droq; u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; u32 c_pkts_per_intr = 0, c_refill_threshold = 0; - int orig_node = dev_to_node(&oct->pci_dev->dev); - int numa_node = cpu_to_node(q_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); @@ -267,13 +266,8 @@ int octeon_init_droq(struct octeon_device *oct, droq->buffer_size = c_buf_size; desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; - set_dev_node(&oct->pci_dev->dev, numa_node); droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, (dma_addr_t *)&droq->desc_ring_dma); - set_dev_node(&oct->pci_dev->dev, orig_node); - if (!droq->desc_ring) - droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, - (dma_addr_t *)&droq->desc_ring_dma); if (!droq->desc_ring) { dev_err(&oct->pci_dev->dev, @@ -519,6 +513,32 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) return desc_refilled; } +/** check if we can allocate packets to get out of oom. + * @param droq - Droq being checked. + * @return does not return anything + */ +void octeon_droq_check_oom(struct octeon_droq *droq) +{ + int desc_refilled; + struct octeon_device *oct = droq->oct_dev; + + if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { + spin_lock_bh(&droq->lock); + desc_refilled = octeon_droq_refill(oct, droq); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); + } + spin_unlock_bh(&droq->lock); + } +} + static inline u32 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) { @@ -970,7 +990,7 @@ int octeon_create_droq(struct octeon_device *oct, u32 desc_size, void *app_ctx) { struct octeon_droq *droq; - int numa_node = cpu_to_node(q_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); if (oct->droq[q_no]) { dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 6982c0af5eccb7129123fcbb4ba8363bb7f9710a..9781577115e76ff7d1ef27966bf3cbdba84ee7b4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -426,4 +426,6 @@ int octeon_droq_process_packets(struct octeon_device *oct, int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, u32 arg); +void octeon_droq_check_oom(struct octeon_droq *droq); + #endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 4608a5af35a3204b54378dc03eef94c976370ac1..5063a12613e53646b9930f10090eae13c32301e3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -152,7 +152,7 @@ struct octeon_instr_queue { struct oct_iq_stats stats; /** DMA mapped base address of the input descriptor ring. */ - u64 base_addr_dma; + dma_addr_t base_addr_dma; /** Application context */ void *app_ctx; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 201b9875f9bbef778d2687b7750424ba55320de8..5cca73b8880b0dc1a95de176b31b0278bc0d2fb2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -313,6 +313,7 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox) return 0; } + spin_unlock_irqrestore(&mbox->lock, flags); WARN_ON(1); return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index eef2a1e8a7e3f96b26f004ec0eec93e447a5d61f..bf483932ff25021652ecf44e550426713c3af1d8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -28,6 +28,12 @@ #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) #define LIO_MIN_MTU_SIZE ETH_MIN_MTU +/* Bit mask values for lio->ifstate */ +#define LIO_IFSTATE_DROQ_OPS 0x01 +#define LIO_IFSTATE_REGISTERED 0x02 +#define LIO_IFSTATE_RUNNING 0x04 +#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 + struct oct_nic_stats_resp { u64 rh; struct oct_link_stats stats; @@ -123,6 +129,9 @@ struct lio { /* work queue for txq status */ struct cavium_wq txq_status_wq; + /* work queue for rxq oom status */ + struct cavium_wq rxq_status_wq; + /* work queue for link status */ struct cavium_wq link_status_wq; @@ -132,10 +141,6 @@ struct lio { #define LIO_SIZE (sizeof(struct lio)) #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) -#define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3)) -#define CIU3_WDOG_MASK 12ULL -#define LIO_MONITOR_WDOG_EXPIRE 1 -#define LIO_MONITOR_CORE_STUCK_MSGD 2 #define LIO_MAX_CORES 12 /** @@ -146,6 +151,10 @@ struct lio { */ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); +int setup_rx_oom_poll_fn(struct net_device *netdev); + +void cleanup_rx_oom_poll_fn(struct net_device *netdev); + /** * \brief Link control command completion callback * @param nctrl_ptr pointer to control packet structure @@ -438,4 +447,34 @@ static inline void octeon_fast_packet_next(struct octeon_droq *droq, get_rbd(droq->recv_buf_list[idx].buffer), copy_len); } +/** + * \brief check interface state + * @param lio per-network private data + * @param state_flag flag state to check + */ +static inline int ifstate_check(struct lio *lio, int state_flag) +{ + return atomic_read(&lio->ifstate) & state_flag; +} + +/** + * \brief set interface state + * @param lio per-network private data + * @param state_flag flag state to set + */ +static inline void ifstate_set(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); +} + +/** + * \brief clear interface state + * @param lio per-network private data + * @param state_flag flag state to clear + */ +static inline void ifstate_reset(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); +} + #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 0243be8dd56fc32736c13cef0487514efcfe6a68..b457cf23fce6196e1f5ec5cc144644a5441d641c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -100,14 +100,16 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct, nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr; - /* Call the callback function if status is OK. - * Status is OK only if a response was expected and core returned - * success. + /* Call the callback function if status is zero (meaning OK) or status + * contains a firmware status code bigger than zero (meaning the + * firmware is reporting an error). * If no response was expected, status is OK if the command was posted * successfully. */ - if (!status && nctrl->cb_fn) + if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) { + nctrl->status = status; nctrl->cb_fn(nctrl); + } octeon_free_soft_command(oct, sc); } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 0c7a5c9b2932d4db89066d3496b28d134a8bcedc..6480ef8634418dca43ab0a4187ee046276dac7ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -62,6 +62,10 @@ struct octnic_ctrl_pkt { /** Callback function called when the command has been fetched */ octnic_ctrl_pkt_cb_fn_t cb_fn; + + u32 status; + u16 *response_code; + struct completion *completion; }; #define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 707bc15adec61351c1384b8454c85a87a2c4b437..261f448f9de23d059d0e5a5f1552bef5069e4c65 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -62,8 +62,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, u32 iq_no = (u32)txpciq.s.q_no; u32 q_size; struct cavium_wq *db_wq; - int orig_node = dev_to_node(&oct->pci_dev->dev); - int numa_node = cpu_to_node(iq_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); @@ -91,13 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->oct_dev = oct; - set_dev_node(&oct->pci_dev->dev, numa_node); - iq->base_addr = lio_dma_alloc(oct, q_size, - (dma_addr_t *)&iq->base_addr_dma); - set_dev_node(&oct->pci_dev->dev, orig_node); - if (!iq->base_addr) - iq->base_addr = lio_dma_alloc(oct, q_size, - (dma_addr_t *)&iq->base_addr_dma); + iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); @@ -211,7 +204,7 @@ int octeon_setup_iq(struct octeon_device *oct, void *app_ctx) { u32 iq_no = (u32)txpciq.s.q_no; - int numa_node = cpu_to_node(iq_no % num_online_cpus()); + int numa_node = dev_to_node(&oct->pci_dev->dev); if (oct->instr_queue[iq_no]) { dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index 2fbaae96b505fbd0dcd45e85d366ee2585ef9e55..3d691c69f74d1cf7567b2d58730dcfd656c0eefd 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -69,50 +69,53 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, int resp_to_process = MAX_ORD_REQS_TO_PROCESS; u32 status; u64 status64; - struct octeon_instr_rdp *rdp; - u64 rptr; ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; do { spin_lock_bh(&ordered_sc_list->lock); - if (ordered_sc_list->head.next == &ordered_sc_list->head) { + if (list_empty(&ordered_sc_list->head)) { spin_unlock_bh(&ordered_sc_list->lock); return 1; } - sc = (struct octeon_soft_command *)ordered_sc_list-> - head.next; - if (OCTEON_CN23XX_PF(octeon_dev) || - OCTEON_CN23XX_VF(octeon_dev)) { - rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; - rptr = sc->cmd.cmd3.rptr; - } else { - rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; - rptr = sc->cmd.cmd2.rptr; - } + sc = list_first_entry(&ordered_sc_list->head, + struct octeon_soft_command, node); status = OCTEON_REQUEST_PENDING; /* check if octeon has finished DMA'ing a response * to where rptr is pointing to */ - dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, - rptr, rdp->rlen, - DMA_FROM_DEVICE); status64 = *sc->status_word; if (status64 != COMPLETION_WORD_INIT) { + /* This logic ensures that all 64b have been written. + * 1. check byte 0 for non-FF + * 2. if non-FF, then swap result from BE to host order + * 3. check byte 7 (swapped to 0) for non-FF + * 4. if non-FF, use the low 32-bit status code + * 5. if either byte 0 or byte 7 is FF, don't use status + */ if ((status64 & 0xff) != 0xff) { octeon_swap_8B_data(&status64, 1); if (((status64 & 0xff) != 0xff)) { - status = (u32)(status64 & - 0xffffffffULL); + /* retrieve 16-bit firmware status */ + status = (u32)(status64 & 0xffffULL); + if (status) { + status = + FIRMWARE_STATUS_CODE(status); + } else { + /* i.e. no error */ + status = OCTEON_REQUEST_DONE; + } } } } else if (force_quit || (sc->timeout && time_after(jiffies, (unsigned long)sc->timeout))) { + dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n", + __func__, (long)jiffies, (long)sc->timeout); status = OCTEON_REQUEST_TIMEOUT; } diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index cbb2d84e89323aea4852c31c164c2ff49c40257c..9169c2815dba36c59b7b8cea5642a7fb27bff439 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -78,6 +78,8 @@ enum { /*------------ Error codes used by host driver -----------------*/ #define DRIVER_MAJOR_ERROR_CODE 0x0000 +/*------ Error codes used by firmware (bits 15..0 set by firmware */ +#define FIRMWARE_MAJOR_ERROR_CODE 0x0001 /** A value of 0x00000000 indicates no error i.e. success */ #define DRIVER_ERROR_NONE 0x00000000 @@ -116,6 +118,9 @@ enum { }; +#define FIRMWARE_STATUS_CODE(status) \ + ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status)) + /** Initialize the response lists. The number of response lists to create is * given by count. * @param octeon_dev - the octeon device structure. diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 2269ff562d9562eede1d1d02c2e66ee3dcfed89b..4a02e618e318fa69c186b4271c167da6f3496d68 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -252,12 +252,14 @@ struct nicvf_drv_stats { u64 tx_csum_overflow; /* driver debug stats */ - u64 rcv_buffer_alloc_failures; u64 tx_tso; u64 tx_timeout; u64 txq_stop; u64 txq_wake; + u64 rcv_buffer_alloc_failures; + u64 page_alloc; + struct u64_stats_sync syncp; }; @@ -266,9 +268,9 @@ struct nicvf { struct net_device *netdev; struct pci_dev *pdev; void __iomem *reg_base; + struct bpf_prog *xdp_prog; #define MAX_QUEUES_PER_QSET 8 struct queue_set *qs; - struct nicvf_cq_poll *napi[8]; void *iommu_domain; u8 vf_id; u8 sqs_id; @@ -294,6 +296,7 @@ struct nicvf { /* Queue count */ u8 rx_queues; u8 tx_queues; + u8 xdp_tx_queues; u8 max_queues; u8 node; @@ -318,10 +321,11 @@ struct nicvf { struct nicvf_drv_stats __percpu *drv_stats; struct bgx_stats bgx_stats; + /* Napi */ + struct nicvf_cq_poll *napi[8]; + /* MSI-X */ - bool msix_enabled; u8 num_vec; - struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15]; bool irq_allocated[NIC_VF_MSIX_VECTORS]; cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS]; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 767234e2e8f94bb0520bee29e1813f8934cae7a8..fb770b0182d3766324fe88f13d8f8506afd29134 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -65,9 +65,7 @@ struct nicpf { bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ - bool msix_enabled; u8 num_vec; - struct msix_entry *msix_entries; bool irq_allocated[NIC_PF_MSIX_VECTORS]; char irq_name[NIC_PF_MSIX_VECTORS][20]; }; @@ -1088,7 +1086,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) u64 intr; u8 vf, vf_per_mbx_reg = 64; - if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector) + if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) mbx = 0; else mbx = 1; @@ -1107,51 +1105,13 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) return IRQ_HANDLED; } -static int nic_enable_msix(struct nicpf *nic) -{ - int i, ret; - - nic->num_vec = pci_msix_vec_count(nic->pdev); - - nic->msix_entries = kmalloc_array(nic->num_vec, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!nic->msix_entries) - return -ENOMEM; - - for (i = 0; i < nic->num_vec; i++) - nic->msix_entries[i].entry = i; - - ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); - if (ret) { - dev_err(&nic->pdev->dev, - "Request for #%d msix vectors failed, returned %d\n", - nic->num_vec, ret); - kfree(nic->msix_entries); - return ret; - } - - nic->msix_enabled = 1; - return 0; -} - -static void nic_disable_msix(struct nicpf *nic) -{ - if (nic->msix_enabled) { - pci_disable_msix(nic->pdev); - kfree(nic->msix_entries); - nic->msix_enabled = 0; - nic->num_vec = 0; - } -} - static void nic_free_all_interrupts(struct nicpf *nic) { int irq; for (irq = 0; irq < nic->num_vec; irq++) { if (nic->irq_allocated[irq]) - free_irq(nic->msix_entries[irq].vector, nic); + free_irq(pci_irq_vector(nic->pdev, irq), nic); nic->irq_allocated[irq] = false; } } @@ -1159,18 +1119,24 @@ static void nic_free_all_interrupts(struct nicpf *nic) static int nic_register_interrupts(struct nicpf *nic) { int i, ret; + nic->num_vec = pci_msix_vec_count(nic->pdev); /* Enable MSI-X */ - ret = nic_enable_msix(nic); - if (ret) - return ret; + ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec, + PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&nic->pdev->dev, + "Request for #%d msix vectors failed, returned %d\n", + nic->num_vec, ret); + return 1; + } /* Register mailbox interrupt handler */ for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) { sprintf(nic->irq_name[i], "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); - ret = request_irq(nic->msix_entries[i].vector, + ret = request_irq(pci_irq_vector(nic->pdev, i), nic_mbx_intr_handler, 0, nic->irq_name[i], nic); if (ret) @@ -1186,14 +1152,16 @@ static int nic_register_interrupts(struct nicpf *nic) fail: dev_err(&nic->pdev->dev, "Request irq failed\n"); nic_free_all_interrupts(nic); - nic_disable_msix(nic); + pci_free_irq_vectors(nic->pdev); + nic->num_vec = 0; return ret; } static void nic_unregister_interrupts(struct nicpf *nic) { nic_free_all_interrupts(nic); - nic_disable_msix(nic); + pci_free_irq_vectors(nic->pdev); + nic->num_vec = 0; } static int nic_num_sqs_en(struct nicpf *nic, int vf_en) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 02a986cdbb39cf96ed8f28c6a9326d05af60a1f3..b9ece9cbf98b9c0de97e355bf0cfd39e46970def 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -100,11 +100,12 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(tx_csum_overlap), NICVF_DRV_STAT(tx_csum_overflow), - NICVF_DRV_STAT(rcv_buffer_alloc_failures), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), + NICVF_DRV_STAT(page_alloc), }; static const struct nicvf_stat nicvf_queue_stats[] = { @@ -720,7 +721,7 @@ static int nicvf_set_channels(struct net_device *dev, struct nicvf *nic = netdev_priv(dev); int err = 0; bool if_up = netif_running(dev); - int cqcount; + u8 cqcount, txq_count; if (!channel->rx_count || !channel->tx_count) return -EINVAL; @@ -729,10 +730,26 @@ static int nicvf_set_channels(struct net_device *dev, if (channel->tx_count > nic->max_queues) return -EINVAL; + if (nic->xdp_prog && + ((channel->tx_count + channel->rx_count) > nic->max_queues)) { + netdev_err(nic->netdev, + "XDP mode, RXQs + TXQs > Max %d\n", + nic->max_queues); + return -EINVAL; + } + if (if_up) nicvf_stop(dev); - cqcount = max(channel->rx_count, channel->tx_count); + nic->rx_queues = channel->rx_count; + nic->tx_queues = channel->tx_count; + if (!nic->xdp_prog) + nic->xdp_tx_queues = 0; + else + nic->xdp_tx_queues = channel->rx_count; + + txq_count = nic->xdp_tx_queues + nic->tx_queues; + cqcount = max(nic->rx_queues, txq_count); if (cqcount > MAX_CMP_QUEUES_PER_QS) { nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS); @@ -741,12 +758,10 @@ static int nicvf_set_channels(struct net_device *dev, nic->sqs_count = 0; } - nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS); - nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS); + nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS); + nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS); nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); - nic->rx_queues = channel->rx_count; - nic->tx_queues = channel->tx_count; err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues); if (err) return err; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 24017588f5317107142897ca77aa0668046532d5..d6477af880858240a25fb7037f17d118abdf1275 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -17,6 +17,9 @@ #include #include #include +#include +#include +#include #include "nic_reg.h" #include "nic.h" @@ -397,8 +400,10 @@ static void nicvf_request_sqs(struct nicvf *nic) if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS) rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS; - if (nic->tx_queues > MAX_SND_QUEUES_PER_QS) - tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS; + + tx_queues = nic->tx_queues + nic->xdp_tx_queues; + if (tx_queues > MAX_SND_QUEUES_PER_QS) + tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS; /* Set no of Rx/Tx queues in each of the SQsets */ for (sqs = 0; sqs < nic->sqs_count; sqs++) { @@ -496,12 +501,99 @@ static int nicvf_init_resources(struct nicvf *nic) return 0; } +static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, + struct cqe_rx_t *cqe_rx, struct snd_queue *sq, + struct sk_buff **skb) +{ + struct xdp_buff xdp; + struct page *page; + u32 action; + u16 len, offset = 0; + u64 dma_addr, cpu_addr; + void *orig_data; + + /* Retrieve packet buffer's DMA address and length */ + len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64)))); + dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64)))); + + cpu_addr = nicvf_iova_to_phys(nic, dma_addr); + if (!cpu_addr) + return false; + cpu_addr = (u64)phys_to_virt(cpu_addr); + page = virt_to_page((void *)cpu_addr); + + xdp.data_hard_start = page_address(page); + xdp.data = (void *)cpu_addr; + xdp.data_end = xdp.data + len; + orig_data = xdp.data; + + rcu_read_lock(); + action = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); + + /* Check if XDP program has changed headers */ + if (orig_data != xdp.data) { + len = xdp.data_end - xdp.data; + offset = orig_data - xdp.data; + dma_addr -= offset; + } + + switch (action) { + case XDP_PASS: + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } + + /* Build SKB and pass on packet to network stack */ + *skb = build_skb(xdp.data, + RCV_FRAG_LEN - cqe_rx->align_pad + offset); + if (!*skb) + put_page(page); + else + skb_put(*skb, len); + return false; + case XDP_TX: + nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); + return true; + default: + bpf_warn_invalid_xdp_action(action); + case XDP_ABORTED: + trace_xdp_exception(nic->netdev, prog, action); + case XDP_DROP: + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) == 1) { + dma_addr &= PAGE_MASK; + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, + RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + } + put_page(page); + return true; + } + return false; +} + static void nicvf_snd_pkt_handler(struct net_device *netdev, struct cqe_send_t *cqe_tx, - int cqe_type, int budget, + int budget, int *subdesc_cnt, unsigned int *tx_pkts, unsigned int *tx_bytes) { struct sk_buff *skb = NULL; + struct page *page; struct nicvf *nic = netdev_priv(netdev); struct snd_queue *sq; struct sq_hdr_subdesc *hdr; @@ -513,12 +605,26 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) return; - netdev_dbg(nic->netdev, - "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", - __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, - cqe_tx->sqe_ptr, hdr->subdesc_cnt); + /* Check for errors */ + if (cqe_tx->send_status) + nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx); + + /* Is this a XDP designated Tx queue */ + if (sq->is_xdp) { + page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr]; + /* Check if it's recycled page or else unmap DMA mapping */ + if (page && (page_ref_count(page) == 1)) + nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, + hdr->subdesc_cnt); + + /* Release page reference for recycling */ + if (page) + put_page(page); + sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL; + *subdesc_cnt += hdr->subdesc_cnt + 1; + return; + } - nicvf_check_cqe_tx_errs(nic, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; if (skb) { /* Check for dummy descriptor used for HW TSO offload on 88xx */ @@ -528,12 +634,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, tso_sqe->subdesc_cnt); - nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); + *subdesc_cnt += tso_sqe->subdesc_cnt + 1; } else { nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, hdr->subdesc_cnt); } - nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + *subdesc_cnt += hdr->subdesc_cnt + 1; prefetch(skb); (*tx_pkts)++; *tx_bytes += skb->len; @@ -544,7 +650,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, * a SKB attached, so just free SQEs here. */ if (!nic->hw_tso) - nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + *subdesc_cnt += hdr->subdesc_cnt + 1; } } @@ -578,9 +684,9 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cqe_rx_t *cqe_rx) + struct cqe_rx_t *cqe_rx, struct snd_queue *sq) { - struct sk_buff *skb; + struct sk_buff *skb = NULL; struct nicvf *nic = netdev_priv(netdev); struct nicvf *snic = nic; int err = 0; @@ -595,16 +701,25 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, } /* Check for errors */ - err = nicvf_check_cqe_rx_errs(nic, cqe_rx); - if (err && !cqe_rx->rb_cnt) - return; + if (cqe_rx->err_level || cqe_rx->err_opcode) { + err = nicvf_check_cqe_rx_errs(nic, cqe_rx); + if (err && !cqe_rx->rb_cnt) + return; + } - skb = nicvf_get_rcv_skb(snic, cqe_rx); - if (!skb) { - netdev_dbg(nic->netdev, "Packet not received\n"); - return; + /* For XDP, ignore pkts spanning multiple pages */ + if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) { + /* Packet consumed by XDP */ + if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb)) + return; + } else { + skb = nicvf_get_rcv_skb(snic, cqe_rx, + nic->xdp_prog ? true : false); } + if (!skb) + return; + if (netif_msg_pktdata(nic)) { netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, skb, skb->len); @@ -646,13 +761,14 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, { int processed_cqe, work_done = 0, tx_done = 0; int cqe_count, cqe_head; + int subdesc_cnt = 0; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct cqe_rx_t *cq_desc; struct netdev_queue *txq; - struct snd_queue *sq; - unsigned int tx_pkts = 0, tx_bytes = 0; + struct snd_queue *sq = &qs->sq[cq_idx]; + unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; spin_lock_bh(&cq->lock); loop: @@ -667,8 +783,6 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; - netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", - __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); @@ -682,17 +796,15 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, break; } - netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", - cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq_desc); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq); work_done++; break; case CQE_TYPE_SEND: - nicvf_snd_pkt_handler(netdev, - (void *)cq_desc, CQE_TYPE_SEND, - budget, &tx_pkts, &tx_bytes); + nicvf_snd_pkt_handler(netdev, (void *)cq_desc, + budget, &subdesc_cnt, + &tx_pkts, &tx_bytes); tx_done++; break; case CQE_TYPE_INVALID: @@ -704,9 +816,6 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, } processed_cqe++; } - netdev_dbg(nic->netdev, - "%s CQ%d processed_cqe %d work_done %d budget %d\n", - __func__, cq_idx, processed_cqe, work_done, budget); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, @@ -716,13 +825,26 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, goto loop; done: + /* Update SQ's descriptor free count */ + if (subdesc_cnt) + nicvf_put_sq_desc(sq, subdesc_cnt); + + txq_idx = nicvf_netdev_qidx(nic, cq_idx); + /* Handle XDP TX queues */ + if (nic->pnicvf->xdp_prog) { + if (txq_idx < nic->pnicvf->xdp_tx_queues) { + nicvf_xdp_sq_doorbell(nic, sq, cq_idx); + goto out; + } + nic = nic->pnicvf; + txq_idx -= nic->pnicvf->xdp_tx_queues; + } + /* Wakeup TXQ if its stopped earlier due to SQ full */ - sq = &nic->qs->sq[cq_idx]; if (tx_done || (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { netdev = nic->pnicvf->netdev; - txq = netdev_get_tx_queue(netdev, - nicvf_netdev_qidx(nic, cq_idx)); + txq = netdev_get_tx_queue(netdev, txq_idx); if (tx_pkts) netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); @@ -735,10 +857,11 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit queue wakeup SQ%d\n", - netdev->name, cq_idx); + netdev->name, txq_idx); } } +out: spin_unlock_bh(&cq->lock); return work_done; } @@ -882,38 +1005,9 @@ static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq) return IRQ_HANDLED; } -static int nicvf_enable_msix(struct nicvf *nic) -{ - int ret, vec; - - nic->num_vec = NIC_VF_MSIX_VECTORS; - - for (vec = 0; vec < nic->num_vec; vec++) - nic->msix_entries[vec].entry = vec; - - ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); - if (ret) { - netdev_err(nic->netdev, - "Req for #%d msix vectors failed\n", nic->num_vec); - return 0; - } - nic->msix_enabled = 1; - return 1; -} - -static void nicvf_disable_msix(struct nicvf *nic) -{ - if (nic->msix_enabled) { - pci_disable_msix(nic->pdev); - nic->msix_enabled = 0; - nic->num_vec = 0; - } -} - static void nicvf_set_irq_affinity(struct nicvf *nic) { int vec, cpu; - int irqnum; for (vec = 0; vec < nic->num_vec; vec++) { if (!nic->irq_allocated[vec]) @@ -930,15 +1024,14 @@ static void nicvf_set_irq_affinity(struct nicvf *nic) cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), nic->affinity_mask[vec]); - irqnum = nic->msix_entries[vec].vector; - irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]); + irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec), + nic->affinity_mask[vec]); } } static int nicvf_register_interrupts(struct nicvf *nic) { int irq, ret = 0; - int vector; for_each_cq_irq(irq) sprintf(nic->irq_name[irq], "%s-rxtx-%d", @@ -957,8 +1050,8 @@ static int nicvf_register_interrupts(struct nicvf *nic) /* Register CQ interrupts */ for (irq = 0; irq < nic->qs->cq_cnt; irq++) { - vector = nic->msix_entries[irq].vector; - ret = request_irq(vector, nicvf_intr_handler, + ret = request_irq(pci_irq_vector(nic->pdev, irq), + nicvf_intr_handler, 0, nic->irq_name[irq], nic->napi[irq]); if (ret) goto err; @@ -968,8 +1061,8 @@ static int nicvf_register_interrupts(struct nicvf *nic) /* Register RBDR interrupt */ for (irq = NICVF_INTR_ID_RBDR; irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) { - vector = nic->msix_entries[irq].vector; - ret = request_irq(vector, nicvf_rbdr_intr_handler, + ret = request_irq(pci_irq_vector(nic->pdev, irq), + nicvf_rbdr_intr_handler, 0, nic->irq_name[irq], nic); if (ret) goto err; @@ -981,7 +1074,7 @@ static int nicvf_register_interrupts(struct nicvf *nic) nic->pnicvf->netdev->name, nic->sqs_mode ? (nic->sqs_id + 1) : 0); irq = NICVF_INTR_ID_QS_ERR; - ret = request_irq(nic->msix_entries[irq].vector, + ret = request_irq(pci_irq_vector(nic->pdev, irq), nicvf_qs_err_intr_handler, 0, nic->irq_name[irq], nic); if (ret) @@ -1001,6 +1094,7 @@ static int nicvf_register_interrupts(struct nicvf *nic) static void nicvf_unregister_interrupts(struct nicvf *nic) { + struct pci_dev *pdev = nic->pdev; int irq; /* Free registered interrupts */ @@ -1008,19 +1102,20 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) if (!nic->irq_allocated[irq]) continue; - irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL); + irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL); free_cpumask_var(nic->affinity_mask[irq]); if (irq < NICVF_INTR_ID_SQ) - free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); + free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]); else - free_irq(nic->msix_entries[irq].vector, nic); + free_irq(pci_irq_vector(pdev, irq), nic); nic->irq_allocated[irq] = false; } /* Disable MSI-X */ - nicvf_disable_msix(nic); + pci_free_irq_vectors(pdev); + nic->num_vec = 0; } /* Initialize MSIX vectors and register MISC interrupt. @@ -1032,16 +1127,22 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic) int irq = NICVF_INTR_ID_MISC; /* Return if mailbox interrupt is already registered */ - if (nic->msix_enabled) + if (nic->pdev->msix_enabled) return 0; /* Enable MSI-X */ - if (!nicvf_enable_msix(nic)) + nic->num_vec = pci_msix_vec_count(nic->pdev); + ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec, + PCI_IRQ_MSIX); + if (ret < 0) { + netdev_err(nic->netdev, + "Req for #%d msix vectors failed\n", nic->num_vec); return 1; + } sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); /* Register Misc interrupt */ - ret = request_irq(nic->msix_entries[irq].vector, + ret = request_irq(pci_irq_vector(nic->pdev, irq), nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); if (ret) @@ -1076,6 +1177,13 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } + /* In XDP case, initial HW tx queues are used for XDP, + * but stack's queue mapping starts at '0', so skip the + * Tx queues attached to Rx queues for XDP. + */ + if (nic->xdp_prog) + qid += nic->xdp_tx_queues; + snic = nic; /* Get secondary Qset's SQ structure */ if (qid >= MAX_SND_QUEUES_PER_QS) { @@ -1164,7 +1272,7 @@ int nicvf_stop(struct net_device *netdev) /* Wait for pending IRQ handlers to finish */ for (irq = 0; irq < nic->num_vec; irq++) - synchronize_irq(nic->msix_entries[irq].vector); + synchronize_irq(pci_irq_vector(nic->pdev, irq)); tasklet_kill(&nic->rbdr_task); tasklet_kill(&nic->qs_err_task); @@ -1365,7 +1473,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - if (nic->msix_enabled) { + if (nic->pdev->msix_enabled) { if (nicvf_hw_set_mac_addr(nic, netdev)) return -EBUSY; } else { @@ -1553,6 +1661,114 @@ static int nicvf_set_features(struct net_device *netdev, return 0; } +static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached) +{ + u8 cq_count, txq_count; + + /* Set XDP Tx queue count same as Rx queue count */ + if (!bpf_attached) + nic->xdp_tx_queues = 0; + else + nic->xdp_tx_queues = nic->rx_queues; + + /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets + * needs to be allocated, check how many. + */ + txq_count = nic->xdp_tx_queues + nic->tx_queues; + cq_count = max(nic->rx_queues, txq_count); + if (cq_count > MAX_CMP_QUEUES_PER_QS) { + nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS); + nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1; + } else { + nic->sqs_count = 0; + } + + /* Set primary Qset's resources */ + nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS); + nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS); + nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt); + + /* Update stack */ + nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues); +} + +static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) +{ + struct net_device *dev = nic->netdev; + bool if_up = netif_running(nic->netdev); + struct bpf_prog *old_prog; + bool bpf_attached = false; + + /* For now just support only the usual MTU sized frames */ + if (prog && (dev->mtu > 1500)) { + netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + dev->mtu); + return -EOPNOTSUPP; + } + + /* ALL SQs attached to CQs i.e same as RQs, are treated as + * XDP Tx queues and more Tx queues are allocated for + * network stack to send pkts out. + * + * No of Tx queues are either same as Rx queues or whatever + * is left in max no of queues possible. + */ + if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) { + netdev_warn(dev, + "Failed to attach BPF prog, RXQs + TXQs > Max %d\n", + nic->max_queues); + return -ENOMEM; + } + + if (if_up) + nicvf_stop(nic->netdev); + + old_prog = xchg(&nic->xdp_prog, prog); + /* Detach old prog, if any */ + if (old_prog) + bpf_prog_put(old_prog); + + if (nic->xdp_prog) { + /* Attach BPF program */ + nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); + if (!IS_ERR(nic->xdp_prog)) + bpf_attached = true; + } + + /* Calculate Tx queues needed for XDP and network stack */ + nicvf_set_xdp_queues(nic, bpf_attached); + + if (if_up) { + /* Reinitialize interface, clean slate */ + nicvf_open(nic->netdev); + netif_trans_update(nic->netdev); + } + + return 0; +} + +static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +{ + struct nicvf *nic = netdev_priv(netdev); + + /* To avoid checks while retrieving buffer address from CQE_RX, + * do not support XDP for T88 pass1.x silicons which are anyway + * not in use widely. + */ + if (pass1_silicon(nic->pdev)) + return -EOPNOTSUPP; + + switch (xdp->command) { + case XDP_SETUP_PROG: + return nicvf_xdp_setup(nic, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!nic->xdp_prog; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops nicvf_netdev_ops = { .ndo_open = nicvf_open, .ndo_stop = nicvf_stop, @@ -1563,6 +1779,7 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_tx_timeout = nicvf_tx_timeout, .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, + .ndo_xdp = nicvf_xdp, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1665,8 +1882,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_unregister_interrupts; - netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_GRO | + netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_RX); netdev->hw_features |= NETIF_F_RXHASH; @@ -1674,7 +1892,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features |= netdev->hw_features; netdev->hw_features |= NETIF_F_LOOPBACK; - netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index f13289f0d2386d09e348b13c310346d42e20b74d..2b181762ad4908476c4fb78acd7ed6ea650762c5 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -19,16 +19,8 @@ #include "q_struct.h" #include "nicvf_queues.h" -#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0) - -static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr) -{ - /* Translation is installed only when IOMMU is present */ - if (nic->iommu_domain) - return iommu_iova_to_phys(nic->iommu_domain, dma_addr); - return dma_addr; -} - +static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, + int size, u64 data); static void nicvf_get_page(struct nicvf *nic) { if (!nic->rb_pageref || !nic->rb_page) @@ -90,46 +82,152 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) dmem->base = NULL; } -/* Allocate buffer for packet reception - * HW returns memory address where packet is DMA'ed but not a pointer - * into RBDR ring, so save buffer address at the start of fragment and - * align the start address to a cache aligned address +#define XDP_PAGE_REFCNT_REFILL 256 + +/* Allocate a new page or recycle one if possible + * + * We cannot optimize dma mapping here, since + * 1. It's only one RBDR ring for 8 Rx queues. + * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed + * and not idx into RBDR ring, so can't refer to saved info. + * 3. There are multiple receive buffers per page */ -static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, - u32 buf_len, u64 **rbuf) +static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, + struct rbdr *rbdr, gfp_t gfp) +{ + int ref_count; + struct page *page = NULL; + struct pgcache *pgcache, *next; + + /* Check if page is already allocated */ + pgcache = &rbdr->pgcache[rbdr->pgidx]; + page = pgcache->page; + /* Check if page can be recycled */ + if (page) { + ref_count = page_ref_count(page); + /* Check if this page has been used once i.e 'put_page' + * called after packet transmission i.e internal ref_count + * and page's ref_count are equal i.e page can be recycled. + */ + if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) + pgcache->ref_count--; + else + page = NULL; + + /* In non-XDP mode, page's ref_count needs to be '1' for it + * to be recycled. + */ + if (!rbdr->is_xdp && (ref_count != 1)) + page = NULL; + } + + if (!page) { + page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0); + if (!page) + return NULL; + + this_cpu_inc(nic->pnicvf->drv_stats->page_alloc); + + /* Check for space */ + if (rbdr->pgalloc >= rbdr->pgcnt) { + /* Page can still be used */ + nic->rb_page = page; + return NULL; + } + + /* Save the page in page cache */ + pgcache->page = page; + pgcache->dma_addr = 0; + pgcache->ref_count = 0; + rbdr->pgalloc++; + } + + /* Take additional page references for recycling */ + if (rbdr->is_xdp) { + /* Since there is single RBDR (i.e single core doing + * page recycling) per 8 Rx queues, in XDP mode adjusting + * page references atomically is the biggest bottleneck, so + * take bunch of references at a time. + * + * So here, below reference counts defer by '1'. + */ + if (!pgcache->ref_count) { + pgcache->ref_count = XDP_PAGE_REFCNT_REFILL; + page_ref_add(page, XDP_PAGE_REFCNT_REFILL); + } + } else { + /* In non-XDP case, single 64K page is divided across multiple + * receive buffers, so cost of recycling is less anyway. + * So we can do with just one extra reference. + */ + page_ref_add(page, 1); + } + + rbdr->pgidx++; + rbdr->pgidx &= (rbdr->pgcnt - 1); + + /* Prefetch refcount of next page in page cache */ + next = &rbdr->pgcache[rbdr->pgidx]; + page = next->page; + if (page) + prefetch(&page->_refcount); + + return pgcache; +} + +/* Allocate buffer for packet reception */ +static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, + gfp_t gfp, u32 buf_len, u64 *rbuf) { - int order = NICVF_PAGE_ORDER; + struct pgcache *pgcache = NULL; - /* Check if request can be accomodated in previous allocated page */ - if (nic->rb_page && - ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { + /* Check if request can be accomodated in previous allocated page. + * But in XDP mode only one buffer per page is permitted. + */ + if (!rbdr->is_xdp && nic->rb_page && + ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) { nic->rb_pageref++; goto ret; } nicvf_get_page(nic); + nic->rb_page = NULL; - /* Allocate a new page */ - nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, - order); - if (!nic->rb_page) { + /* Get new page, either recycled or new one */ + pgcache = nicvf_alloc_page(nic, rbdr, gfp); + if (!pgcache && !nic->rb_page) { this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); return -ENOMEM; } + nic->rb_page_offset = 0; + + /* Reserve space for header modifications by BPF program */ + if (rbdr->is_xdp) + buf_len += XDP_PACKET_HEADROOM; + + /* Check if it's recycled */ + if (pgcache) + nic->rb_page = pgcache->page; ret: - /* HW will ensure data coherency, CPU sync not required */ - *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, + if (rbdr->is_xdp && pgcache && pgcache->dma_addr) { + *rbuf = pgcache->dma_addr; + } else { + /* HW will ensure data coherency, CPU sync not required */ + *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, nic->rb_page_offset, buf_len, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC)); - if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { - if (!nic->rb_page_offset) - __free_pages(nic->rb_page, order); - nic->rb_page = NULL; - return -ENOMEM; + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { + if (!nic->rb_page_offset) + __free_pages(nic->rb_page, 0); + nic->rb_page = NULL; + return -ENOMEM; + } + if (pgcache) + pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; + nic->rb_page_offset += buf_len; } - nic->rb_page_offset += buf_len; return 0; } @@ -159,7 +257,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, int buf_size) { int idx; - u64 *rbuf; + u64 rbuf; struct rbdr_entry_t *desc; int err; @@ -177,10 +275,34 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, rbdr->head = 0; rbdr->tail = 0; + /* Initialize page recycling stuff. + * + * Can't use single buffer per page especially with 64K pages. + * On embedded platforms i.e 81xx/83xx available memory itself + * is low and minimum ring size of RBDR is 8K, that takes away + * lots of memory. + * + * But for XDP it has to be a single buffer per page. + */ + if (!nic->pnicvf->xdp_prog) { + rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size); + rbdr->is_xdp = false; + } else { + rbdr->pgcnt = ring_len; + rbdr->is_xdp = true; + } + rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt); + rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) * + rbdr->pgcnt, GFP_KERNEL); + if (!rbdr->pgcache) + return -ENOMEM; + rbdr->pgidx = 0; + rbdr->pgalloc = 0; + nic->rb_page = NULL; for (idx = 0; idx < ring_len; idx++) { - err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, - &rbuf); + err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL, + RCV_FRAG_LEN, &rbuf); if (err) { /* To free already allocated and mapped ones */ rbdr->tail = idx - 1; @@ -188,7 +310,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, } desc = GET_RBDR_DESC(rbdr, idx); - desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; + desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); } nicvf_get_page(nic); @@ -201,6 +323,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) { int head, tail; u64 buf_addr, phys_addr; + struct pgcache *pgcache; struct rbdr_entry_t *desc; if (!rbdr) @@ -216,7 +339,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) /* Release page references */ while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); - buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; + buf_addr = desc->buf_addr; phys_addr = nicvf_iova_to_phys(nic, buf_addr); dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); @@ -227,13 +350,31 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) } /* Release buffer of tail desc */ desc = GET_RBDR_DESC(rbdr, tail); - buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; + buf_addr = desc->buf_addr; phys_addr = nicvf_iova_to_phys(nic, buf_addr); dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (phys_addr) put_page(virt_to_page(phys_to_virt(phys_addr))); + /* Sync page cache info */ + smp_rmb(); + + /* Release additional page references held for recycling */ + head = 0; + while (head < rbdr->pgcnt) { + pgcache = &rbdr->pgcache[head]; + if (pgcache->page && page_ref_count(pgcache->page) != 0) { + if (!rbdr->is_xdp) { + put_page(pgcache->page); + continue; + } + page_ref_sub(pgcache->page, pgcache->ref_count - 1); + put_page(pgcache->page); + } + head++; + } + /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); } @@ -248,7 +389,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) int refill_rb_cnt; struct rbdr *rbdr; struct rbdr_entry_t *desc; - u64 *rbuf; + u64 rbuf; int new_rb = 0; refill: @@ -269,17 +410,20 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) else refill_rb_cnt = qs->rbdr_len - qcount - 1; + /* Sync page cache info */ + smp_rmb(); + /* Start filling descs from tail */ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; while (refill_rb_cnt) { tail++; tail &= (rbdr->dmem.q_len - 1); - if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) + if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf)) break; desc = GET_RBDR_DESC(rbdr, tail); - desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; + desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); refill_rb_cnt--; new_rb++; } @@ -362,7 +506,7 @@ static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) /* Initialize transmit queue */ static int nicvf_init_snd_queue(struct nicvf *nic, - struct snd_queue *sq, int q_len) + struct snd_queue *sq, int q_len, int qidx) { int err; @@ -375,17 +519,38 @@ static int nicvf_init_snd_queue(struct nicvf *nic, sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); if (!sq->skbuff) return -ENOMEM; + sq->head = 0; sq->tail = 0; - atomic_set(&sq->free_cnt, q_len - 1); sq->thresh = SND_QUEUE_THRESH; - /* Preallocate memory for TSO segment's header */ - sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, - q_len * TSO_HEADER_SIZE, - &sq->tso_hdrs_phys, GFP_KERNEL); - if (!sq->tso_hdrs) - return -ENOMEM; + /* Check if this SQ is a XDP TX queue */ + if (nic->sqs_mode) + qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); + if (qidx < nic->pnicvf->xdp_tx_queues) { + /* Alloc memory to save page pointers for XDP_TX */ + sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); + if (!sq->xdp_page) + return -ENOMEM; + sq->xdp_desc_cnt = 0; + sq->xdp_free_cnt = q_len - 1; + sq->is_xdp = true; + } else { + sq->xdp_page = NULL; + sq->xdp_desc_cnt = 0; + sq->xdp_free_cnt = 0; + sq->is_xdp = false; + + atomic_set(&sq->free_cnt, q_len - 1); + + /* Preallocate memory for TSO segment's header */ + sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, + q_len * TSO_HEADER_SIZE, + &sq->tso_hdrs_phys, + GFP_KERNEL); + if (!sq->tso_hdrs) + return -ENOMEM; + } return 0; } @@ -411,6 +576,7 @@ void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { struct sk_buff *skb; + struct page *page; struct sq_hdr_subdesc *hdr; struct sq_hdr_subdesc *tso_sqe; @@ -428,8 +594,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) smp_rmb(); while (sq->head != sq->tail) { skb = (struct sk_buff *)sq->skbuff[sq->head]; - if (!skb) + if (!skb || !sq->xdp_page) + goto next; + + page = (struct page *)sq->xdp_page[sq->head]; + if (!page) goto next; + else + put_page(page); + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); /* Check for dummy descriptor used for HW TSO offload on 88xx */ if (hdr->dont_send) { @@ -442,12 +615,14 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) nicvf_unmap_sndq_buffers(nic, sq, sq->head, hdr->subdesc_cnt); } - dev_kfree_skb_any(skb); + if (skb) + dev_kfree_skb_any(skb); next: sq->head++; sq->head &= (sq->dmem.q_len - 1); } kfree(sq->skbuff); + kfree(sq->xdp_page); nicvf_free_q_desc_mem(nic, &sq->dmem); } @@ -838,7 +1013,7 @@ static int nicvf_alloc_resources(struct nicvf *nic) /* Alloc send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) { - if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) + if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) goto alloc_fail; } @@ -876,6 +1051,7 @@ int nicvf_set_qset_resources(struct nicvf *nic) nic->rx_queues = qs->rq_cnt; nic->tx_queues = qs->sq_cnt; + nic->xdp_tx_queues = 0; return 0; } @@ -940,7 +1116,10 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) int qentry; qentry = sq->tail; - atomic_sub(desc_cnt, &sq->free_cnt); + if (!sq->is_xdp) + atomic_sub(desc_cnt, &sq->free_cnt); + else + sq->xdp_free_cnt -= desc_cnt; sq->tail += desc_cnt; sq->tail &= (sq->dmem.q_len - 1); @@ -958,7 +1137,10 @@ static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, /* Free descriptor back to SQ for future use */ void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) { - atomic_add(desc_cnt, &sq->free_cnt); + if (!sq->is_xdp) + atomic_add(desc_cnt, &sq->free_cnt); + else + sq->xdp_free_cnt += desc_cnt; sq->head += desc_cnt; sq->head &= (sq->dmem.q_len - 1); } @@ -1016,6 +1198,58 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, } } +/* XDP Transmit APIs */ +void nicvf_xdp_sq_doorbell(struct nicvf *nic, + struct snd_queue *sq, int sq_num) +{ + if (!sq->xdp_desc_cnt) + return; + + /* make sure all memory stores are done before ringing doorbell */ + wmb(); + + /* Inform HW to xmit all TSO segments */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + sq_num, sq->xdp_desc_cnt); + sq->xdp_desc_cnt = 0; +} + +static inline void +nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, + int subdesc_cnt, u64 data, int len) +{ + struct sq_hdr_subdesc *hdr; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + hdr->subdesc_cnt = subdesc_cnt; + hdr->tot_len = len; + hdr->post_cqe = 1; + sq->xdp_page[qentry] = (u64)virt_to_page((void *)data); +} + +int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, + u64 bufaddr, u64 dma_addr, u16 len) +{ + int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; + int qentry; + + if (subdesc_cnt > sq->xdp_free_cnt) + return 0; + + qentry = nicvf_get_sq_desc(sq, subdesc_cnt); + + nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len); + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr); + + sq->xdp_desc_cnt += subdesc_cnt; + + return 1; +} + /* Calculate no of SQ subdescriptors needed to transmit all * segments of this TSO packet. * Taken from 'Tilera network driver' with a minor modification. @@ -1094,7 +1328,13 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, { int proto; struct sq_hdr_subdesc *hdr; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + ip.hdr = skb_network_header(skb); hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; @@ -1119,7 +1359,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); - proto = ip_hdr(skb)->protocol; + proto = (ip.v4->version == 4) ? ip.v4->protocol : + ip.v6->nexthdr; + switch (proto) { case IPPROTO_TCP: hdr->csum_l4 = SEND_L4_CSUM_TCP; @@ -1366,8 +1608,33 @@ static inline unsigned frag_num(unsigned i) #endif } +static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, + u64 buf_addr, bool xdp) +{ + struct page *page = NULL; + int len = RCV_FRAG_LEN; + + if (xdp) { + page = virt_to_page(phys_to_virt(buf_addr)); + /* Check if it's a recycled page, if not + * unmap the DMA mapping. + * + * Recycled page holds an extra reference. + */ + if (page_ref_count(page) != 1) + return; + + len += XDP_PACKET_HEADROOM; + /* Receive buffers in XDP mode are mapped from page start */ + dma_addr &= PAGE_MASK; + } + dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +} + /* Returns SKB for a received packet */ -struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, + struct cqe_rx_t *cqe_rx, bool xdp) { int frag; int payload_len = 0; @@ -1402,10 +1669,9 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) if (!frag) { /* First fragment */ - dma_unmap_page_attrs(&nic->pdev->dev, - *rb_ptrs - cqe_rx->align_pad, - RCV_FRAG_LEN, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); + nicvf_unmap_rcv_buffer(nic, + *rb_ptrs - cqe_rx->align_pad, + phys_addr, xdp); skb = nicvf_rb_ptr_to_skb(nic, phys_addr - cqe_rx->align_pad, payload_len); @@ -1415,9 +1681,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) skb_put(skb, payload_len); } else { /* Add fragments */ - dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs, - RCV_FRAG_LEN, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); + nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp); page = virt_to_page(phys_to_virt(phys_addr)); offset = phys_to_virt(phys_addr) - page_address(page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, @@ -1547,9 +1811,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { - if (!cqe_rx->err_level && !cqe_rx->err_opcode) - return 0; - if (netif_msg_rx_err(nic)) netdev_err(nic->netdev, "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", @@ -1638,8 +1899,6 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) { switch (cqe_tx->send_status) { - case CQ_TX_ERROP_GOOD: - return 0; case CQ_TX_ERROP_DESC_FAULT: this_cpu_inc(nic->drv_stats->tx_desc_fault); break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 10cb4b84625b14a0446996776689ae6733f4ccee..57858522c33c88cb1e2b1e04a8477a27efafaa0f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -10,6 +10,7 @@ #define NICVF_QUEUES_H #include +#include #include "q_struct.h" #define MAX_QUEUE_SET 128 @@ -213,6 +214,12 @@ struct q_desc_mem { void *unalign_base; }; +struct pgcache { + struct page *page; + int ref_count; + u64 dma_addr; +}; + struct rbdr { bool enable; u32 dma_size; @@ -222,6 +229,13 @@ struct rbdr { u32 head; u32 tail; struct q_desc_mem dmem; + bool is_xdp; + + /* For page recycling */ + int pgidx; + int pgcnt; + int pgalloc; + struct pgcache *pgcache; } ____cacheline_aligned_in_smp; struct rcv_queue { @@ -258,6 +272,10 @@ struct snd_queue { u32 tail; u64 *skbuff; void *desc; + u64 *xdp_page; + u16 xdp_desc_cnt; + u16 xdp_free_cnt; + bool is_xdp; #define TSO_HEADER_SIZE 128 /* For TSO segment's header */ @@ -301,6 +319,14 @@ struct queue_set { #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) +static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr) +{ + /* Translation is installed only when IOMMU is present */ + if (nic->iommu_domain) + return iommu_iova_to_phys(nic->iommu_domain, dma_addr); + return dma_addr; +} + void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, int hdr_sqe, u8 subdesc_cnt); void nicvf_config_vlan_stripping(struct nicvf *nic, @@ -318,8 +344,12 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, int qidx); int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, struct sk_buff *skb, u8 sq_num); +int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, + u64 bufaddr, u64 dma_addr, u16 len); +void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num); -struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, + struct cqe_rx_t *cqe_rx, bool xdp); void nicvf_rbdr_task(unsigned long data); void nicvf_rbdr_work(struct work_struct *work); diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h index f36347237a5425c7baf48540ad3fa2fd555b882d..e47205aa87eabf7605df93fc465ffe1a1a24083b 100644 --- a/drivers/net/ethernet/cavium/thunder/q_struct.h +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -359,15 +359,7 @@ union cq_desc_t { }; struct rbdr_entry_t { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 rsvd0:15; - u64 buf_addr:42; - u64 cache_align:7; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 cache_align:7; - u64 buf_addr:42; - u64 rsvd0:15; -#endif + u64 buf_addr; }; /* TCP reassembly context */ diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h index 6916c62f2487dd9a9bca381685cea6b4c771ef1f..94b9482f14a55da8dc015c6b2e25e23d1309e00f 100644 --- a/drivers/net/ethernet/chelsio/cxgb/common.h +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -223,7 +223,6 @@ struct port_info { struct cmac *mac; struct cphy *phy; struct link_config link_config; - struct net_device_stats netstats; }; struct sge; diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index d8aff7a4b3c7cc087139cfefc022b4e8cf75fda5..8623be13bf86f2cc82f579daadc8121dcfb8432d 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -296,7 +296,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; - struct net_device_stats *ns = &p->netstats; + struct net_device_stats *ns = &dev->stats; const struct cmac_statistics *pstats; /* Do a full update of the MAC stats */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h index 8b395b537330507a21df8a0705f292e077b77e36..087ff0ffb59777e625395b6caf9753116316289c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h @@ -72,7 +72,6 @@ struct port_info { struct cphy phy; struct cmac mac; struct link_config link_config; - struct net_device_stats netstats; int activity; __be32 iscsi_ipv4addr; struct iscsi_config iscsic; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index d76491676b5175e52b383e8c17760859b4f69040..2ff6bd139c96ccf077acab356a4464b13fa61b94 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1489,7 +1489,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - struct net_device_stats *ns = &pi->netstats; + struct net_device_stats *ns = &dev->stats; const struct mac_stats *pstats; spin_lock(&adapter->stats_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6faaca1b48b3d95fb76d0c9a00d41d6945038a2b..c12c4a3b82b567df405a24dc72331c008973d5ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2338,6 +2338,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, f->locked = 1; f->fs.rpttid = 1; + /* Save the actual tid. We need this to get the corresponding + * filter entry structure in filter_rpl. + */ + f->tid = stid + adap->tids.ftid_base; ret = set_filter_wr(adap, stid); if (ret) { clear_filter(adap, f); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 87000cd397372ab999d25ea2c935f0385fa941ee..0de8eb72325c53ae50cd3ec535915486a9fb5064 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6369,7 +6369,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, unsigned int stat_len = cache_line_size > 64 ? 128 : 64; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align_log = fls(fl_align) - 1; - unsigned int ingpad; t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, HOSTPAGESIZEPF0_V(sge_hps) | @@ -6389,6 +6388,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, INGPADBOUNDARY_SHIFT_X) | EGRSTATUSPAGESIZE_V(stat_len != 64)); } else { + unsigned int pack_align; + unsigned int ingpad, ingpack; + unsigned int pcie_cap; + /* T5 introduced the separation of the Free List Padding and * Packing Boundaries. Thus, we can select a smaller Padding * Boundary to avoid uselessly chewing up PCIe Link and Memory @@ -6401,27 +6404,62 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, * Size (the minimum unit of transfer to/from Memory). If we * have a Padding Boundary which is smaller than the Memory * Line Size, that'll involve a Read-Modify-Write cycle on the - * Memory Controller which is never good. For T5 the smallest - * Padding Boundary which we can select is 32 bytes which is - * larger than any known Memory Controller Line Size so we'll - * use that. - * - * T5 has a different interpretation of the "0" value for the - * Packing Boundary. This corresponds to 16 bytes instead of - * the expected 32 bytes. We never have a Packing Boundary - * less than 32 bytes so we can't use that special value but - * on the other hand, if we wanted 32 bytes, the best we can - * really do is 64 bytes. - */ - if (fl_align <= 32) { + * Memory Controller which is never good. + */ + + /* We want the Packing Boundary to be based on the Cache Line + * Size in order to help avoid False Sharing performance + * issues between CPUs, etc. We also want the Packing + * Boundary to incorporate the PCI-E Maximum Payload Size. We + * get best performance when the Packing Boundary is a + * multiple of the Maximum Payload Size. + */ + pack_align = fl_align; + pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP); + if (pcie_cap) { + unsigned int mps, mps_log; + u16 devctl; + + /* The PCIe Device Control Maximum Payload Size field + * [bits 7:5] encodes sizes as powers of 2 starting at + * 128 bytes. + */ + pci_read_config_word(adap->pdev, + pcie_cap + PCI_EXP_DEVCTL, + &devctl); + mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; + mps = 1 << mps_log; + if (mps > pack_align) + pack_align = mps; + } + + /* N.B. T5/T6 have a crazy special interpretation of the "0" + * value for the Packing Boundary. This corresponds to 16 + * bytes instead of the expected 32 bytes. So if we want 32 + * bytes, the best we can really do is 64 bytes ... + */ + if (pack_align <= 16) { + ingpack = INGPACKBOUNDARY_16B_X; + fl_align = 16; + } else if (pack_align == 32) { + ingpack = INGPACKBOUNDARY_64B_X; fl_align = 64; - fl_align_log = 6; + } else { + unsigned int pack_align_log = fls(pack_align) - 1; + + ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X; + fl_align = pack_align; } + /* Use the smallest Ingress Padding which isn't smaller than + * the Memory Controller Read/Write Size. We'll take that as + * being 8 bytes since we don't know of any system with a + * wider Memory Controller Bus Width. + */ if (is_t5(adap->params.chip)) - ingpad = INGPCIEBOUNDARY_32B_X; + ingpad = INGPADBOUNDARY_32B_X; else - ingpad = T6_INGPADBOUNDARY_32B_X; + ingpad = T6_INGPADBOUNDARY_8B_X; t4_set_reg_field(adap, SGE_CONTROL_A, INGPADBOUNDARY_V(INGPADBOUNDARY_M) | @@ -6430,8 +6468,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, EGRSTATUSPAGESIZE_V(stat_len != 64)); t4_set_reg_field(adap, SGE_CONTROL2_A, INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), - INGPACKBOUNDARY_V(fl_align_log - - INGPACKBOUNDARY_SHIFT_X)); + INGPACKBOUNDARY_V(ingpack)); } /* * Adjust various SGE Free List Host Buffer Sizes. diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index 36cf3073ca37d59c2cf3a129603637fc54619c13..f6558cbfc54ec3f7369699d137003cafb003f4c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -54,11 +54,15 @@ #define INGPADBOUNDARY_SHIFT_X 5 #define T6_INGPADBOUNDARY_SHIFT_X 3 +#define T6_INGPADBOUNDARY_8B_X 0 #define T6_INGPADBOUNDARY_32B_X 2 +#define INGPADBOUNDARY_32B_X 0 + /* CONTROL2 register */ #define INGPACKBOUNDARY_SHIFT_X 5 #define INGPACKBOUNDARY_16B_X 0 +#define INGPACKBOUNDARY_64B_X 1 /* GTS register */ #define SGE_TIMERREGS 6 diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 3647b28e8de019839e38234ff0a1b495ce4b05fa..47384f7323acb974eba4beb2f7de29ebd34ff953 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1896,7 +1896,7 @@ static int cs89x0_platform_remove(struct platform_device *pdev) return 0; } -static const struct __maybe_unused of_device_id cs89x0_match[] = { +static const struct of_device_id __maybe_unused cs89x0_match[] = { { .compatible = "cirrus,cs8900", }, { .compatible = "cirrus,cs8920", }, { }, diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 127ce9707378c151c3037c92dcfc6eb8da16dcb3..91b8f6f5a7653cd74b40f77c1b0e71180c2bbcfd 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -312,8 +312,6 @@ struct de_private { u32 msg_enable; - struct net_device_stats net_stats; - struct pci_dev *pdev; u16 setup_frame[DE_SETUP_FRAME_WORDS]; @@ -388,14 +386,14 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail, netif_warn(de, rx_err, de->dev, "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", status); - de->net_stats.rx_length_errors++; + de->dev->stats.rx_length_errors++; } } else if (status & RxError) { /* There was a fatal error. */ - de->net_stats.rx_errors++; /* end of a packet.*/ - if (status & 0x0890) de->net_stats.rx_length_errors++; - if (status & RxErrCRC) de->net_stats.rx_crc_errors++; - if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++; + de->dev->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x0890) de->dev->stats.rx_length_errors++; + if (status & RxErrCRC) de->dev->stats.rx_crc_errors++; + if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++; } } @@ -423,7 +421,7 @@ static void de_rx (struct de_private *de) mapping = de->rx_skb[rx_tail].mapping; if (unlikely(drop)) { - de->net_stats.rx_dropped++; + de->dev->stats.rx_dropped++; goto rx_next; } @@ -441,7 +439,7 @@ static void de_rx (struct de_private *de) buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; copy_skb = netdev_alloc_skb(de->dev, buflen); if (unlikely(!copy_skb)) { - de->net_stats.rx_dropped++; + de->dev->stats.rx_dropped++; drop = 1; rx_work = 100; goto rx_next; @@ -470,8 +468,8 @@ static void de_rx (struct de_private *de) skb->protocol = eth_type_trans (skb, de->dev); - de->net_stats.rx_packets++; - de->net_stats.rx_bytes += skb->len; + de->dev->stats.rx_packets++; + de->dev->stats.rx_bytes += skb->len; rc = netif_rx (skb); if (rc == NET_RX_DROP) drop = 1; @@ -572,18 +570,18 @@ static void de_tx (struct de_private *de) netif_dbg(de, tx_err, de->dev, "tx err, status 0x%x\n", status); - de->net_stats.tx_errors++; + de->dev->stats.tx_errors++; if (status & TxOWC) - de->net_stats.tx_window_errors++; + de->dev->stats.tx_window_errors++; if (status & TxMaxCol) - de->net_stats.tx_aborted_errors++; + de->dev->stats.tx_aborted_errors++; if (status & TxLinkFail) - de->net_stats.tx_carrier_errors++; + de->dev->stats.tx_carrier_errors++; if (status & TxFIFOUnder) - de->net_stats.tx_fifo_errors++; + de->dev->stats.tx_fifo_errors++; } else { - de->net_stats.tx_packets++; - de->net_stats.tx_bytes += skb->len; + de->dev->stats.tx_packets++; + de->dev->stats.tx_bytes += skb->len; netif_dbg(de, tx_done, de->dev, "tx done, slot %d\n", tx_tail); } @@ -814,9 +812,9 @@ static void de_set_rx_mode (struct net_device *dev) static inline void de_rx_missed(struct de_private *de, u32 rx_missed) { if (unlikely(rx_missed & RxMissedOver)) - de->net_stats.rx_missed_errors += RxMissedMask; + de->dev->stats.rx_missed_errors += RxMissedMask; else - de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask); + de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask); } static void __de_get_stats(struct de_private *de) @@ -836,7 +834,7 @@ static struct net_device_stats *de_get_stats(struct net_device *dev) __de_get_stats(de); spin_unlock_irq(&de->lock); - return &de->net_stats; + return &dev->stats; } static inline int de_is_running (struct de_private *de) @@ -1348,7 +1346,7 @@ static void de_clean_rings (struct de_private *de) struct sk_buff *skb = de->tx_skb[i].skb; if ((skb) && (skb != DE_DUMMY_SKB)) { if (skb != DE_SETUP_SKB) { - de->net_stats.tx_dropped++; + de->dev->stats.tx_dropped++; pci_unmap_single(de->pdev, de->tx_skb[i].mapping, skb->len, PCI_DMA_TODEVICE); diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 1e350135f11d9a3b13b8e0300f063d927f65a365..778f974e2928bfdd32391f3ecb8b424843f72374 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -878,10 +878,10 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - np->stats.tx_errors++; + dev->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ dw16(ASICCtrl + 2, @@ -903,7 +903,7 @@ tx_error (struct net_device *dev, int tx_status) } /* Late Collision */ if (tx_status & 0x04) { - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* TxReset and clear FIFO */ dw16(ASICCtrl + 2, TxReset | FIFOReset); /* Wait reset done */ @@ -916,13 +916,8 @@ tx_error (struct net_device *dev, int tx_status) /* Let TxStartThresh stay default value */ } /* Maximum Collisions */ -#ifdef ETHER_STATS if (tx_status & 0x08) - np->stats.collisions16++; -#else - if (tx_status & 0x08) - np->stats.collisions++; -#endif + dev->stats.collisions++; /* Restart the Tx */ dw32(MACCtrl, dr16(MACCtrl) | TxEnable); } @@ -952,15 +947,15 @@ receive_packet (struct net_device *dev) break; /* Update rx error statistics, drop packet. */ if (frame_status & RFS_Errors) { - np->stats.rx_errors++; + dev->stats.rx_errors++; if (frame_status & (RxRuntFrame | RxLengthError)) - np->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (frame_status & RxFCSError) - np->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (frame_status & RxAlignmentError && np->speed != 1000) - np->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (frame_status & RxFIFOOverrun) - np->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } else { struct sk_buff *skb; @@ -1096,23 +1091,23 @@ get_stats (struct net_device *dev) /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ - np->stats.rx_packets += dr32(FramesRcvOk); - np->stats.tx_packets += dr32(FramesXmtOk); - np->stats.rx_bytes += dr32(OctetRcvOk); - np->stats.tx_bytes += dr32(OctetXmtOk); + dev->stats.rx_packets += dr32(FramesRcvOk); + dev->stats.tx_packets += dr32(FramesXmtOk); + dev->stats.rx_bytes += dr32(OctetRcvOk); + dev->stats.tx_bytes += dr32(OctetXmtOk); - np->stats.multicast = dr32(McstFramesRcvdOk); - np->stats.collisions += dr32(SingleColFrames) + dev->stats.multicast = dr32(McstFramesRcvdOk); + dev->stats.collisions += dr32(SingleColFrames) + dr32(MultiColFrames); /* detailed tx errors */ stat_reg = dr16(FramesAbortXSColls); - np->stats.tx_aborted_errors += stat_reg; - np->stats.tx_errors += stat_reg; + dev->stats.tx_aborted_errors += stat_reg; + dev->stats.tx_errors += stat_reg; stat_reg = dr16(CarrierSenseErrors); - np->stats.tx_carrier_errors += stat_reg; - np->stats.tx_errors += stat_reg; + dev->stats.tx_carrier_errors += stat_reg; + dev->stats.tx_errors += stat_reg; /* Clear all other statistic register. */ dr32(McstOctetXmtOk); @@ -1142,7 +1137,7 @@ get_stats (struct net_device *dev) dr16(TCPCheckSumErrors); dr16(UDPCheckSumErrors); dr16(IPCheckSumErrors); - return &np->stats; + return &dev->stats; } static int diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 5d8ae5320242635ced82ce1267277c63a9c91a0f..10e98ba33ebf520010a07e53861d769a35b45ec2 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -377,7 +377,6 @@ struct netdev_private { void __iomem *eeprom_addr; spinlock_t tx_lock; spinlock_t rx_lock; - struct net_device_stats stats; unsigned int rx_buf_sz; /* Based on MTU+slack. */ unsigned int speed; /* Operating speed */ unsigned int vlan; /* VLAN Id */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d49528ad7821d202355ef9b1a18d58e56794b162..50566243e6fa2222595866f0789ad495721f93de 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -567,6 +567,12 @@ struct be_error_recovery { /* Ethtool priv_flags */ #define BE_DISABLE_TPE_RECOVERY 0x1 +struct be_vxlan_port { + struct list_head list; + __be16 port; /* VxLAN UDP dst port */ + int port_aliases; /* alias count */ +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -671,9 +677,9 @@ struct be_adapter { u32 sli_family; u8 hba_port_num; u16 pvid; - __be16 vxlan_port; - int vxlan_port_count; - int vxlan_port_aliases; + __be16 vxlan_port; /* offloaded vxlan port num */ + int vxlan_port_count; /* active vxlan port count */ + struct list_head vxlan_port_list; /* vxlan port list */ struct phy_info phy; u8 wol_cap; bool wol_en; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6be3b9aba8ed38875574f9e2fa018e80cc948cab..f3a09ab559004b80106e957674cb0d6acbf3ea65 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3857,6 +3857,44 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } +static int be_enable_vxlan_offloads(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct be_vxlan_port *vxlan_port; + __be16 port; + int status; + + vxlan_port = list_first_entry(&adapter->vxlan_port_list, + struct be_vxlan_port, list); + port = vxlan_port->port; + + status = be_cmd_manage_iface(adapter, adapter->if_handle, + OP_CONVERT_NORMAL_TO_TUNNEL); + if (status) { + dev_warn(dev, "Failed to convert normal interface to tunnel\n"); + return status; + } + adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; + + status = be_cmd_set_vxlan_port(adapter, port); + if (status) { + dev_warn(dev, "Failed to add VxLAN port\n"); + return status; + } + adapter->vxlan_port = port; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->features |= NETIF_F_GSO_UDP_TUNNEL; + + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); + return 0; +} + static void be_disable_vxlan_offloads(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -4903,63 +4941,59 @@ static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter, * those other tunnels are unexported on the fly through ndo_features_check(). * * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack - * adds more than one port, disable offloads and don't re-enable them again - * until after all the tunnels are removed. + * adds more than one port, disable offloads and re-enable them again when + * there's only one port left. We maintain a list of ports for this purpose. */ static void be_work_add_vxlan_port(struct work_struct *work) { struct be_cmd_work *cmd_work = container_of(work, struct be_cmd_work, work); struct be_adapter *adapter = cmd_work->adapter; - struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->pdev->dev; __be16 port = cmd_work->info.vxlan_port; + struct be_vxlan_port *vxlan_port; int status; - if (adapter->vxlan_port == port && adapter->vxlan_port_count) { - adapter->vxlan_port_aliases++; - goto done; + /* Bump up the alias count if it is an existing port */ + list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) { + if (vxlan_port->port == port) { + vxlan_port->port_aliases++; + goto done; + } } + /* Add a new port to our list. We don't need a lock here since port + * add/delete are done only in the context of a single-threaded work + * queue (be_wq). + */ + vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL); + if (!vxlan_port) + goto done; + + vxlan_port->port = port; + INIT_LIST_HEAD(&vxlan_port->list); + list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list); + adapter->vxlan_port_count++; + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { dev_info(dev, "Only one UDP port supported for VxLAN offloads\n"); dev_info(dev, "Disabling VxLAN offloads\n"); - adapter->vxlan_port_count++; goto err; } - if (adapter->vxlan_port_count++ >= 1) + if (adapter->vxlan_port_count > 1) goto done; - status = be_cmd_manage_iface(adapter, adapter->if_handle, - OP_CONVERT_NORMAL_TO_TUNNEL); - if (status) { - dev_warn(dev, "Failed to convert normal interface to tunnel\n"); - goto err; - } - - status = be_cmd_set_vxlan_port(adapter, port); - if (status) { - dev_warn(dev, "Failed to add VxLAN port\n"); - goto err; - } - adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; - adapter->vxlan_port = port; - - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->features |= NETIF_F_GSO_UDP_TUNNEL; + status = be_enable_vxlan_offloads(adapter); + if (!status) + goto done; - dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", - be16_to_cpu(port)); - goto done; err: be_disable_vxlan_offloads(adapter); done: kfree(cmd_work); + return; } static void be_work_del_vxlan_port(struct work_struct *work) @@ -4968,23 +5002,40 @@ static void be_work_del_vxlan_port(struct work_struct *work) container_of(work, struct be_cmd_work, work); struct be_adapter *adapter = cmd_work->adapter; __be16 port = cmd_work->info.vxlan_port; + struct be_vxlan_port *vxlan_port; - if (adapter->vxlan_port != port) - goto done; + /* Nothing to be done if a port alias is being deleted */ + list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) { + if (vxlan_port->port == port) { + if (vxlan_port->port_aliases) { + vxlan_port->port_aliases--; + goto done; + } + break; + } + } + + /* No port aliases left; delete the port from the list */ + list_del(&vxlan_port->list); + adapter->vxlan_port_count--; - if (adapter->vxlan_port_aliases) { - adapter->vxlan_port_aliases--; + /* Disable VxLAN offload if this is the offloaded port */ + if (adapter->vxlan_port == vxlan_port->port) { + WARN_ON(adapter->vxlan_port_count); + be_disable_vxlan_offloads(adapter); + dev_info(&adapter->pdev->dev, + "Disabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); goto out; } - be_disable_vxlan_offloads(adapter); + /* If only 1 port is left, re-enable VxLAN offload */ + if (adapter->vxlan_port_count == 1) + be_enable_vxlan_offloads(adapter); - dev_info(&adapter->pdev->dev, - "Disabled VxLAN offloads for UDP port %d\n", - be16_to_cpu(port)); -done: - adapter->vxlan_port_count--; out: + kfree(vxlan_port); +done: kfree(cmd_work); } @@ -5217,15 +5268,15 @@ static bool be_err_is_recoverable(struct be_adapter *adapter) dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n", ue_err_code); - if (jiffies - err_rec->probe_time <= initial_idle_time) { + if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) { dev_err(&adapter->pdev->dev, "Cannot recover within %lu sec from driver load\n", jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC); return false; } - if (err_rec->last_recovery_time && - (jiffies - err_rec->last_recovery_time <= recovery_interval)) { + if (err_rec->last_recovery_time && time_before_eq( + jiffies - err_rec->last_recovery_time, recovery_interval)) { dev_err(&adapter->pdev->dev, "Cannot recover within %lu sec from last recovery\n", jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC); @@ -5626,6 +5677,7 @@ static int be_drv_init(struct be_adapter *adapter) /* Must be a power of 2 or else MODULO will BUG_ON */ adapter->be_get_temp_freq = 64; + INIT_LIST_HEAD(&adapter->vxlan_port_list); return 0; free_rx_filter: diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 23d82748f52b9aa19ecd17226444f3326dfaaed4..e863ba74d005d7f255931b336825df2abadd2fc8 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1148,14 +1148,14 @@ static int ethoc_probe(struct platform_device *pdev) /* Allow the platform setup code to pass in a MAC address. */ if (pdata) { - memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); + ether_addr_copy(netdev->dev_addr, pdata->hwaddr); priv->phy_id = pdata->phy_id; } else { const void *mac; mac = of_get_mac_address(pdev->dev.of_node); if (mac) - memcpy(netdev->dev_addr, mac, IFHWADDRLEN); + ether_addr_copy(netdev->dev_addr, mac); priv->phy_id = -1; } diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f819843e2bae73564e090b8fd9c7a8dfcec1fa12..659f1ad37e96a823a4ee3cba04be905d5c9c5dee 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ade6b3e4ed1326a42aa39d52b2465f1b271c02f8..95bf5e89cfd17b30d3543b33cb069f8aaee69d31 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -32,6 +32,9 @@ #include #include #include +#include +#include +#include #include #include @@ -40,103 +43,137 @@ #define DRV_NAME "ftgmac100" #define DRV_VERSION "0.7" -#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */ -#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */ +/* Arbitrary values, I am not sure the HW has limits */ +#define MAX_RX_QUEUE_ENTRIES 1024 +#define MAX_TX_QUEUE_ENTRIES 1024 +#define MIN_RX_QUEUE_ENTRIES 32 +#define MIN_TX_QUEUE_ENTRIES 32 -#define MAX_PKT_SIZE 1518 -#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */ +/* Defaults */ +#define DEF_RX_QUEUE_ENTRIES 128 +#define DEF_TX_QUEUE_ENTRIES 128 -/****************************************************************************** - * private data - *****************************************************************************/ -struct ftgmac100_descs { - struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; - struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES]; -}; +#define MAX_PKT_SIZE 1536 +#define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */ + +/* Min number of tx ring entries before stopping queue */ +#define TX_THRESHOLD (MAX_SKB_FRAGS + 1) struct ftgmac100 { + /* Registers */ struct resource *res; void __iomem *base; - int irq; - - struct ftgmac100_descs *descs; - dma_addr_t descs_dma_addr; - - struct page *rx_pages[RX_QUEUE_ENTRIES]; + /* Rx ring */ + unsigned int rx_q_entries; + struct ftgmac100_rxdes *rxdes; + dma_addr_t rxdes_dma; + struct sk_buff **rx_skbs; unsigned int rx_pointer; + u32 rxdes0_edorr_mask; + + /* Tx ring */ + unsigned int tx_q_entries; + struct ftgmac100_txdes *txdes; + dma_addr_t txdes_dma; + struct sk_buff **tx_skbs; unsigned int tx_clean_pointer; unsigned int tx_pointer; - unsigned int tx_pending; + u32 txdes0_edotr_mask; + + /* Used to signal the reset task of ring change request */ + unsigned int new_rx_q_entries; + unsigned int new_tx_q_entries; - spinlock_t tx_lock; + /* Scratch page to use when rx skb alloc fails */ + void *rx_scratch; + dma_addr_t rx_scratch_dma; + /* Component structures */ struct net_device *netdev; struct device *dev; struct ncsi_dev *ndev; struct napi_struct napi; - + struct work_struct reset_task; struct mii_bus *mii_bus; - int old_speed; - int int_mask_all; - bool use_ncsi; - bool enabled; - - u32 rxdes0_edorr_mask; - u32 txdes0_edotr_mask; -}; - -static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes, gfp_t gfp); -/****************************************************************************** - * internal functions (hardware register access) - *****************************************************************************/ -static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) -{ - iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); -} + /* Link management */ + int cur_speed; + int cur_duplex; + bool use_ncsi; -static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv, - unsigned int size) -{ - size = FTGMAC100_RBSR_SIZE(size); - iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR); -} + /* Multicast filter settings */ + u32 maht0; + u32 maht1; -static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv, - dma_addr_t addr) -{ - iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); -} + /* Flow control settings */ + bool tx_pause; + bool rx_pause; + bool aneg_pause; -static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv) -{ - iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); -} + /* Misc */ + bool need_mac_restart; + bool is_aspeed; +}; -static int ftgmac100_reset_hw(struct ftgmac100 *priv) +static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) { struct net_device *netdev = priv->netdev; int i; /* NOTE: reset clears all registers */ - iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR); - for (i = 0; i < 5; i++) { + iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); + iowrite32(maccr | FTGMAC100_MACCR_SW_RST, + priv->base + FTGMAC100_OFFSET_MACCR); + for (i = 0; i < 50; i++) { unsigned int maccr; maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); if (!(maccr & FTGMAC100_MACCR_SW_RST)) return 0; - udelay(1000); + udelay(1); } - netdev_err(netdev, "software reset failed\n"); + netdev_err(netdev, "Hardware reset failed\n"); return -EIO; } -static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) +static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv) +{ + u32 maccr = 0; + + switch (priv->cur_speed) { + case SPEED_10: + case 0: /* no link */ + break; + + case SPEED_100: + maccr |= FTGMAC100_MACCR_FAST_MODE; + break; + + case SPEED_1000: + maccr |= FTGMAC100_MACCR_GIGA_MODE; + break; + default: + netdev_err(priv->netdev, "Unknown speed %d !\n", + priv->cur_speed); + break; + } + + /* (Re)initialize the queue pointers */ + priv->rx_pointer = 0; + priv->tx_clean_pointer = 0; + priv->tx_pointer = 0; + + /* The doc says reset twice with 10us interval */ + if (ftgmac100_reset_mac(priv, maccr)) + return -EIO; + usleep_range(10, 1000); + return ftgmac100_reset_mac(priv, maccr); +} + +static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) { unsigned int maddr = mac[0] << 8 | mac[1]; unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; @@ -145,7 +182,7 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); } -static void ftgmac100_setup_mac(struct ftgmac100 *priv) +static void ftgmac100_initial_mac(struct ftgmac100 *priv) { u8 mac[ETH_ALEN]; unsigned int m; @@ -189,716 +226,833 @@ static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) return ret; eth_commit_mac_addr_change(dev, p); - ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr); + ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr); return 0; } -static void ftgmac100_init_hw(struct ftgmac100 *priv) -{ - /* setup ring buffer base registers */ - ftgmac100_set_rx_ring_base(priv, - priv->descs_dma_addr + - offsetof(struct ftgmac100_descs, rxdes)); - ftgmac100_set_normal_prio_tx_ring_base(priv, - priv->descs_dma_addr + - offsetof(struct ftgmac100_descs, txdes)); - - ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE); - - iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC); - - ftgmac100_set_mac(priv, priv->netdev->dev_addr); -} - -#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \ - FTGMAC100_MACCR_RXDMA_EN | \ - FTGMAC100_MACCR_TXMAC_EN | \ - FTGMAC100_MACCR_RXMAC_EN | \ - FTGMAC100_MACCR_FULLDUP | \ - FTGMAC100_MACCR_CRC_APD | \ - FTGMAC100_MACCR_RX_RUNT | \ - FTGMAC100_MACCR_RX_BROADPKT) - -static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed) +static void ftgmac100_config_pause(struct ftgmac100 *priv) { - int maccr = MACCR_ENABLE_ALL; + u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16); - switch (speed) { - default: - case 10: - break; + /* Throttle tx queue when receiving pause frames */ + if (priv->rx_pause) + fcr |= FTGMAC100_FCR_FC_EN; - case 100: - maccr |= FTGMAC100_MACCR_FAST_MODE; - break; - - case 1000: - maccr |= FTGMAC100_MACCR_GIGA_MODE; - break; - } + /* Enables sending pause frames when the RX queue is past a + * certain threshold. + */ + if (priv->tx_pause) + fcr |= FTGMAC100_FCR_FCTHR_EN; - iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); + iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR); } -static void ftgmac100_stop_hw(struct ftgmac100 *priv) +static void ftgmac100_init_hw(struct ftgmac100 *priv) { - iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); -} + u32 reg, rfifo_sz, tfifo_sz; -/****************************************************************************** - * internal functions (receive descriptor) - *****************************************************************************/ -static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS); -} + /* Clear stale interrupts */ + reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR); + iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); -static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS); -} + /* Setup RX ring buffer base */ + iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); -static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY); -} + /* Setup TX ring buffer base */ + iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); -static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes) -{ - /* clear status bits */ - rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask); -} + /* Configure RX buffer size */ + iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), + priv->base + FTGMAC100_OFFSET_RBSR); -static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR); -} + /* Set RX descriptor autopoll */ + iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), + priv->base + FTGMAC100_OFFSET_APTC); -static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR); -} + /* Write MAC address */ + ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr); -static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL); -} + /* Write multicast filter */ + iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); + iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); -static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT); + /* Configure descriptor sizes and increase burst sizes according + * to values in Aspeed SDK. The FIFO arbitration is enabled and + * the thresholds set based on the recommended values in the + * AST2400 specification. + */ + iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */ + FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */ + FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */ + FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */ + FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */ + FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */ + FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */ + priv->base + FTGMAC100_OFFSET_DBLAC); + + /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt + * mitigation doesn't seem to provide any benefit with NAPI so leave + * it at that. + */ + iowrite32(FTGMAC100_ITC_RXINT_THR(1) | + FTGMAC100_ITC_TXINT_THR(1), + priv->base + FTGMAC100_OFFSET_ITC); + + /* Configure FIFO sizes in the TPAFCR register */ + reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR); + rfifo_sz = reg & 0x00000007; + tfifo_sz = (reg >> 3) & 0x00000007; + reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR); + reg &= ~0x3f000000; + reg |= (tfifo_sz << 27); + reg |= (rfifo_sz << 24); + iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR); +} + +static void ftgmac100_start_hw(struct ftgmac100 *priv) +{ + u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); + + /* Keep the original GMAC and FAST bits */ + maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); + + /* Add all the main enable bits */ + maccr |= FTGMAC100_MACCR_TXDMA_EN | + FTGMAC100_MACCR_RXDMA_EN | + FTGMAC100_MACCR_TXMAC_EN | + FTGMAC100_MACCR_RXMAC_EN | + FTGMAC100_MACCR_CRC_APD | + FTGMAC100_MACCR_PHY_LINK_LEVEL | + FTGMAC100_MACCR_RX_RUNT | + FTGMAC100_MACCR_RX_BROADPKT; + + /* Add other bits as needed */ + if (priv->cur_duplex == DUPLEX_FULL) + maccr |= FTGMAC100_MACCR_FULLDUP; + if (priv->netdev->flags & IFF_PROMISC) + maccr |= FTGMAC100_MACCR_RX_ALL; + if (priv->netdev->flags & IFF_ALLMULTI) + maccr |= FTGMAC100_MACCR_RX_MULTIPKT; + else if (netdev_mc_count(priv->netdev)) + maccr |= FTGMAC100_MACCR_HT_MULTI_EN; + + /* Vlan filtering enabled */ + if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + maccr |= FTGMAC100_MACCR_RM_VLAN; + + /* Hit the HW */ + iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); } -static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes) +static void ftgmac100_stop_hw(struct ftgmac100 *priv) { - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB); + iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); } -static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes) +static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv) { - return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC; -} + struct netdev_hw_addr *ha; -static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST); -} + priv->maht1 = 0; + priv->maht0 = 0; + netdev_for_each_mc_addr(ha, priv->netdev) { + u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr); -static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes) -{ - rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); + crc_val = (~(crc_val >> 2)) & 0x3f; + if (crc_val >= 32) + priv->maht1 |= 1ul << (crc_val - 32); + else + priv->maht0 |= 1ul << (crc_val); + } } -static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes, - dma_addr_t addr) +static void ftgmac100_set_rx_mode(struct net_device *netdev) { - rxdes->rxdes3 = cpu_to_le32(addr); -} + struct ftgmac100 *priv = netdev_priv(netdev); -static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes) -{ - return le32_to_cpu(rxdes->rxdes3); -} + /* Setup the hash filter */ + ftgmac100_calc_mc_hash(priv); -static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes) -{ - return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) == - cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP); -} + /* Interface down ? that's all there is to do */ + if (!netif_running(netdev)) + return; -static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes) -{ - return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) == - cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP); -} + /* Update the HW */ + iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0); + iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1); -static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR); + /* Reconfigure MACCR */ + ftgmac100_start_hw(priv); } -static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes) +static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, + struct ftgmac100_rxdes *rxdes, gfp_t gfp) { - return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR); -} + struct net_device *netdev = priv->netdev; + struct sk_buff *skb; + dma_addr_t map; + int err; -static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes) -{ - return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR); -} + skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_warn(netdev, "failed to allocate rx skb\n"); + err = -ENOMEM; + map = priv->rx_scratch_dma; + } else { + map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + if (net_ratelimit()) + netdev_err(netdev, "failed to map rx page\n"); + dev_kfree_skb_any(skb); + map = priv->rx_scratch_dma; + skb = NULL; + err = -ENOMEM; + } + } -static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes) -{ - return &priv->rx_pages[rxdes - priv->descs->rxdes]; -} + /* Store skb */ + priv->rx_skbs[entry] = skb; -/* - * rxdes2 is not used by hardware. We use it to keep track of page. - * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). - */ -static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes, - struct page *page) -{ - *ftgmac100_rxdes_page_slot(priv, rxdes) = page; -} + /* Store DMA address into RX desc */ + rxdes->rxdes3 = cpu_to_le32(map); -static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes) -{ - return *ftgmac100_rxdes_page_slot(priv, rxdes); -} + /* Ensure the above is ordered vs clearing the OWN bit */ + dma_wmb(); -/****************************************************************************** - * internal functions (receive) - *****************************************************************************/ -static int ftgmac100_next_rx_pointer(int pointer) -{ - return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); -} + /* Clean status (which resets own bit) */ + if (entry == (priv->rx_q_entries - 1)) + rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask); + else + rxdes->rxdes0 = 0; -static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv) -{ - priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer); + return 0; } -static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv) +static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, + unsigned int pointer) { - return &priv->descs->rxdes[priv->rx_pointer]; + return (pointer + 1) & (priv->rx_q_entries - 1); } -static struct ftgmac100_rxdes * -ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv) -{ - struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv); - - while (ftgmac100_rxdes_packet_ready(rxdes)) { - if (ftgmac100_rxdes_first_segment(rxdes)) - return rxdes; - - ftgmac100_rxdes_set_dma_own(priv, rxdes); - ftgmac100_rx_pointer_advance(priv); - rxdes = ftgmac100_current_rxdes(priv); - } - - return NULL; -} - -static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes) +static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status) { struct net_device *netdev = priv->netdev; - bool error = false; - - if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx err\n"); + if (status & FTGMAC100_RXDES0_RX_ERR) netdev->stats.rx_errors++; - error = true; - } - - if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx crc err\n"); + if (status & FTGMAC100_RXDES0_CRC_ERR) netdev->stats.rx_crc_errors++; - error = true; - } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx IP checksum err\n"); - - error = true; - } - - if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx frame too long\n"); - - netdev->stats.rx_length_errors++; - error = true; - } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx runt\n"); + if (status & (FTGMAC100_RXDES0_FTL | + FTGMAC100_RXDES0_RUNT | + FTGMAC100_RXDES0_RX_ODD_NB)) netdev->stats.rx_length_errors++; - error = true; - } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) { - if (net_ratelimit()) - netdev_info(netdev, "rx odd nibble\n"); - - netdev->stats.rx_length_errors++; - error = true; - } - - return error; } -static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv) +static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) { struct net_device *netdev = priv->netdev; - struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv); - bool done = false; + struct ftgmac100_rxdes *rxdes; + struct sk_buff *skb; + unsigned int pointer, size; + u32 status, csum_vlan; + dma_addr_t map; - if (net_ratelimit()) - netdev_dbg(netdev, "drop packet %p\n", rxdes); + /* Grab next RX descriptor */ + pointer = priv->rx_pointer; + rxdes = &priv->rxdes[pointer]; - do { - if (ftgmac100_rxdes_last_segment(rxdes)) - done = true; + /* Grab descriptor status */ + status = le32_to_cpu(rxdes->rxdes0); - ftgmac100_rxdes_set_dma_own(priv, rxdes); - ftgmac100_rx_pointer_advance(priv); - rxdes = ftgmac100_current_rxdes(priv); - } while (!done && ftgmac100_rxdes_packet_ready(rxdes)); + /* Do we have a packet ? */ + if (!(status & FTGMAC100_RXDES0_RXPKT_RDY)) + return false; - netdev->stats.rx_dropped++; -} + /* Order subsequent reads with the test for the ready bit */ + dma_rmb(); -static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) -{ - struct net_device *netdev = priv->netdev; - struct ftgmac100_rxdes *rxdes; - struct sk_buff *skb; - bool done = false; + /* We don't cope with fragmented RX packets */ + if (unlikely(!(status & FTGMAC100_RXDES0_FRS) || + !(status & FTGMAC100_RXDES0_LRS))) + goto drop; - rxdes = ftgmac100_rx_locate_first_segment(priv); - if (!rxdes) - return false; + /* Grab received size and csum vlan field in the descriptor */ + size = status & FTGMAC100_RXDES0_VDBC; + csum_vlan = le32_to_cpu(rxdes->rxdes1); - if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) { - ftgmac100_rx_drop_packet(priv); - return true; + /* Any error (other than csum offload) flagged ? */ + if (unlikely(status & RXDES0_ANY_ERROR)) { + /* Correct for incorrect flagging of runt packets + * with vlan tags... Just accept a runt packet that + * has been flagged as vlan and whose size is at + * least 60 bytes. + */ + if ((status & FTGMAC100_RXDES0_RUNT) && + (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) && + (size >= 60)) + status &= ~FTGMAC100_RXDES0_RUNT; + + /* Any error still in there ? */ + if (status & RXDES0_ANY_ERROR) { + ftgmac100_rx_packet_error(priv, status); + goto drop; + } } - /* start processing */ - skb = netdev_alloc_skb_ip_align(netdev, 128); - if (unlikely(!skb)) { - if (net_ratelimit()) - netdev_err(netdev, "rx skb alloc failed\n"); - - ftgmac100_rx_drop_packet(priv); - return true; + /* If the packet had no skb (failed to allocate earlier) + * then try to allocate one and skip + */ + skb = priv->rx_skbs[pointer]; + if (!unlikely(skb)) { + ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); + goto drop; } - if (unlikely(ftgmac100_rxdes_multicast(rxdes))) + if (unlikely(status & FTGMAC100_RXDES0_MULTICAST)) netdev->stats.multicast++; - /* - * It seems that HW does checksum incorrectly with fragmented packets, - * so we are conservative here - if HW checksum error, let software do - * the checksum again. + /* If the HW found checksum errors, bounce it to software. + * + * If we didn't, we need to see if the packet was recognized + * by HW as one of the supported checksummed protocols before + * we accept the HW test results. */ - if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) || - (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - do { - dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes); - struct page *page = ftgmac100_rxdes_get_page(priv, rxdes); - unsigned int size; + if (netdev->features & NETIF_F_RXCSUM) { + u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR | + FTGMAC100_RXDES1_UDP_CHKSUM_ERR | + FTGMAC100_RXDES1_IP_CHKSUM_ERR; + if ((csum_vlan & err_bits) || + !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + } - dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + /* Transfer received size to skb */ + skb_put(skb, size); - size = ftgmac100_rxdes_data_length(rxdes); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size); + /* Extract vlan tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + csum_vlan & 0xffff); - skb->len += size; - skb->data_len += size; - skb->truesize += PAGE_SIZE; + /* Tear down DMA mapping, do necessary cache management */ + map = le32_to_cpu(rxdes->rxdes3); - if (ftgmac100_rxdes_last_segment(rxdes)) - done = true; +#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) + /* When we don't have an iommu, we can save cycles by not + * invalidating the cache for the part of the packet that + * wasn't received. + */ + dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); +#else + dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); +#endif - ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); - ftgmac100_rx_pointer_advance(priv); - rxdes = ftgmac100_current_rxdes(priv); - } while (!done); + /* Resplenish rx ring */ + ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC); + priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); - /* Small frames are copied into linear part of skb to free one page */ - if (skb->len <= 128) { - skb->truesize -= PAGE_SIZE; - __pskb_pull_tail(skb, skb->len); - } else { - /* We pull the minimum amount into linear part */ - __pskb_pull_tail(skb, ETH_HLEN); - } skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; - netdev->stats.rx_bytes += skb->len; + netdev->stats.rx_bytes += size; /* push packet to protocol stack */ - napi_gro_receive(&priv->napi, skb); + if (skb->ip_summed == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&priv->napi, skb); (*processed)++; return true; + + drop: + /* Clean rxdes0 (which resets own bit) */ + rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask); + priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer); + netdev->stats.rx_dropped++; + return true; } -/****************************************************************************** - * internal functions (transmit descriptor) - *****************************************************************************/ -static void ftgmac100_txdes_reset(const struct ftgmac100 *priv, - struct ftgmac100_txdes *txdes) +static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv, + unsigned int index) { - /* clear all except end of ring bit */ - txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask); - txdes->txdes1 = 0; - txdes->txdes2 = 0; - txdes->txdes3 = 0; + if (index == (priv->tx_q_entries - 1)) + return priv->txdes0_edotr_mask; + else + return 0; } -static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes) +static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv, + unsigned int pointer) { - return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); + return (pointer + 1) & (priv->tx_q_entries - 1); } -static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes) +static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv) { - /* - * Make sure dma own bit will not be set before any other - * descriptor fields. + /* Returns the number of available slots in the TX queue + * + * This always leaves one free slot so we don't have to + * worry about empty vs. full, and this simplifies the + * test for ftgmac100_tx_buf_cleanable() below */ - wmb(); - txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); + return (priv->tx_clean_pointer - priv->tx_pointer - 1) & + (priv->tx_q_entries - 1); } -static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv, - struct ftgmac100_txdes *txdes) +static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv) { - txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); + return priv->tx_pointer != priv->tx_clean_pointer; } -static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes) +static void ftgmac100_free_tx_packet(struct ftgmac100 *priv, + unsigned int pointer, + struct sk_buff *skb, + struct ftgmac100_txdes *txdes, + u32 ctl_stat) { - txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS); -} + dma_addr_t map = le32_to_cpu(txdes->txdes3); + size_t len; -static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes) -{ - txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS); -} + if (ctl_stat & FTGMAC100_TXDES0_FTS) { + len = skb_headlen(skb); + dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); + } else { + len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat); + dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); + } -static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes, - unsigned int len) -{ - txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len)); + /* Free SKB on last segment */ + if (ctl_stat & FTGMAC100_TXDES0_LTS) + dev_kfree_skb(skb); + priv->tx_skbs[pointer] = NULL; } -static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes) +static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) { - txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC); -} + struct net_device *netdev = priv->netdev; + struct ftgmac100_txdes *txdes; + struct sk_buff *skb; + unsigned int pointer; + u32 ctl_stat; -static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes) -{ - txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM); -} + pointer = priv->tx_clean_pointer; + txdes = &priv->txdes[pointer]; -static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes) -{ - txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM); -} + ctl_stat = le32_to_cpu(txdes->txdes0); + if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN) + return false; -static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes) -{ - txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM); -} + skb = priv->tx_skbs[pointer]; + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); + txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); -static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes, - dma_addr_t addr) -{ - txdes->txdes3 = cpu_to_le32(addr); -} + priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer); -static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes) -{ - return le32_to_cpu(txdes->txdes3); + return true; } -/* - * txdes2 is not used by hardware. We use it to keep track of socket buffer. - * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). - */ -static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes, - struct sk_buff *skb) +static void ftgmac100_tx_complete(struct ftgmac100 *priv) { - txdes->txdes2 = (unsigned int)skb; -} + struct net_device *netdev = priv->netdev; -static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes) -{ - return (struct sk_buff *)txdes->txdes2; -} + /* Process all completed packets */ + while (ftgmac100_tx_buf_cleanable(priv) && + ftgmac100_tx_complete_packet(priv)) + ; -/****************************************************************************** - * internal functions (transmit) - *****************************************************************************/ -static int ftgmac100_next_tx_pointer(int pointer) -{ - return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); + /* Restart queue if needed */ + smp_mb(); + if (unlikely(netif_queue_stopped(netdev) && + ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(netdev, 0); + __netif_tx_lock(txq, smp_processor_id()); + if (netif_queue_stopped(netdev) && + ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) + netif_wake_queue(netdev); + __netif_tx_unlock(txq); + } } -static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv) +static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan) { - priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer); -} + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + u8 ip_proto = ip_hdr(skb)->protocol; -static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv) -{ - priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer); + *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM; + switch(ip_proto) { + case IPPROTO_TCP: + *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM; + return true; + case IPPROTO_UDP: + *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM; + return true; + case IPPROTO_IP: + return true; + } + } + return skb_checksum_help(skb) == 0; } -static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv) +static int ftgmac100_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { - return &priv->descs->txdes[priv->tx_pointer]; -} + struct ftgmac100 *priv = netdev_priv(netdev); + struct ftgmac100_txdes *txdes, *first; + unsigned int pointer, nfrags, len, i, j; + u32 f_ctl_stat, ctl_stat, csum_vlan; + dma_addr_t map; -static struct ftgmac100_txdes * -ftgmac100_current_clean_txdes(struct ftgmac100 *priv) -{ - return &priv->descs->txdes[priv->tx_clean_pointer]; -} + /* The HW doesn't pad small frames */ + if (eth_skb_pad(skb)) { + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; + } -static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) -{ - struct net_device *netdev = priv->netdev; - struct ftgmac100_txdes *txdes; - struct sk_buff *skb; - dma_addr_t map; + /* Reject oversize packets */ + if (unlikely(skb->len > MAX_PKT_SIZE)) { + if (net_ratelimit()) + netdev_dbg(netdev, "tx packet too big\n"); + goto drop; + } - if (priv->tx_pending == 0) - return false; + /* Do we have a limit on #fragments ? I yet have to get a reply + * from Aspeed. If there's one I haven't hit it. + */ + nfrags = skb_shinfo(skb)->nr_frags; - txdes = ftgmac100_current_clean_txdes(priv); + /* Get header len */ + len = skb_headlen(skb); - if (ftgmac100_txdes_owned_by_dma(txdes)) - return false; + /* Map the packet head */ + map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, map)) { + if (net_ratelimit()) + netdev_err(netdev, "map tx packet head failed\n"); + goto drop; + } + + /* Grab the next free tx descriptor */ + pointer = priv->tx_pointer; + txdes = first = &priv->txdes[pointer]; + + /* Setup it up with the packet head. Don't write the head to the + * ring just yet + */ + priv->tx_skbs[pointer] = skb; + f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); + f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; + f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); + f_ctl_stat |= FTGMAC100_TXDES0_FTS; + if (nfrags == 0) + f_ctl_stat |= FTGMAC100_TXDES0_LTS; + txdes->txdes3 = cpu_to_le32(map); + + /* Setup HW checksumming */ + csum_vlan = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL && + !ftgmac100_prep_tx_csum(skb, &csum_vlan)) + goto drop; + + /* Add VLAN tag */ + if (skb_vlan_tag_present(skb)) { + csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; + csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; + } - skb = ftgmac100_txdes_get_skb(txdes); - map = ftgmac100_txdes_get_dma_addr(txdes); + txdes->txdes1 = cpu_to_le32(csum_vlan); + + /* Next descriptor */ + pointer = ftgmac100_next_tx_pointer(priv, pointer); + + /* Add the fragments */ + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + + /* Map it */ + map = skb_frag_dma_map(priv->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, map)) + goto dma_err; + + /* Setup descriptor */ + priv->tx_skbs[pointer] = skb; + txdes = &priv->txdes[pointer]; + ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer); + ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN; + ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len); + if (i == (nfrags - 1)) + ctl_stat |= FTGMAC100_TXDES0_LTS; + txdes->txdes0 = cpu_to_le32(ctl_stat); + txdes->txdes1 = 0; + txdes->txdes3 = cpu_to_le32(map); + + /* Next one */ + pointer = ftgmac100_next_tx_pointer(priv, pointer); + } + + /* Order the previous packet and descriptor udpates + * before setting the OWN bit on the first descriptor. + */ + dma_wmb(); + first->txdes0 = cpu_to_le32(f_ctl_stat); + + /* Update next TX pointer */ + priv->tx_pointer = pointer; + + /* If there isn't enough room for all the fragments of a new packet + * in the TX ring, stop the queue. The sequence below is race free + * vs. a concurrent restart in ftgmac100_poll() + */ + if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) { + netif_stop_queue(netdev); + /* Order the queue stop with the test below */ + smp_mb(); + if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD) + netif_wake_queue(netdev); + } + + /* Poke transmitter to read the updated TX descriptors */ + iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); + + return NETDEV_TX_OK; + + dma_err: + if (net_ratelimit()) + netdev_err(netdev, "map tx fragment failed\n"); + + /* Free head */ + pointer = priv->tx_pointer; + ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat); + first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask); + + /* Then all fragments */ + for (j = 0; j < i; j++) { + pointer = ftgmac100_next_tx_pointer(priv, pointer); + txdes = &priv->txdes[pointer]; + ctl_stat = le32_to_cpu(txdes->txdes0); + ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat); + txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask); + } - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += skb->len; + /* This cannot be reached if we successfully mapped the + * last fragment, so we know ftgmac100_free_tx_packet() + * hasn't freed the skb yet. + */ + drop: + /* Drop the packet */ + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; - dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + return NETDEV_TX_OK; +} - dev_kfree_skb(skb); +static void ftgmac100_free_buffers(struct ftgmac100 *priv) +{ + int i; - ftgmac100_txdes_reset(priv, txdes); + /* Free all RX buffers */ + for (i = 0; i < priv->rx_q_entries; i++) { + struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; + struct sk_buff *skb = priv->rx_skbs[i]; + dma_addr_t map = le32_to_cpu(rxdes->rxdes3); - ftgmac100_tx_clean_pointer_advance(priv); + if (!skb) + continue; - spin_lock(&priv->tx_lock); - priv->tx_pending--; - spin_unlock(&priv->tx_lock); - netif_wake_queue(netdev); + priv->rx_skbs[i] = NULL; + dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } - return true; -} + /* Free all TX buffers */ + for (i = 0; i < priv->tx_q_entries; i++) { + struct ftgmac100_txdes *txdes = &priv->txdes[i]; + struct sk_buff *skb = priv->tx_skbs[i]; -static void ftgmac100_tx_complete(struct ftgmac100 *priv) -{ - while (ftgmac100_tx_complete_packet(priv)) - ; + if (!skb) + continue; + ftgmac100_free_tx_packet(priv, i, skb, txdes, + le32_to_cpu(txdes->txdes0)); + } } -static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb, - dma_addr_t map) +static void ftgmac100_free_rings(struct ftgmac100 *priv) { - struct net_device *netdev = priv->netdev; - struct ftgmac100_txdes *txdes; - unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; - - txdes = ftgmac100_current_txdes(priv); - ftgmac100_tx_pointer_advance(priv); - - /* setup TX descriptor */ - ftgmac100_txdes_set_skb(txdes, skb); - ftgmac100_txdes_set_dma_addr(txdes, map); - ftgmac100_txdes_set_buffer_size(txdes, len); - - ftgmac100_txdes_set_first_segment(txdes); - ftgmac100_txdes_set_last_segment(txdes); - ftgmac100_txdes_set_txint(txdes); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - __be16 protocol = skb->protocol; - - if (protocol == cpu_to_be16(ETH_P_IP)) { - u8 ip_proto = ip_hdr(skb)->protocol; - - ftgmac100_txdes_set_ipcs(txdes); - if (ip_proto == IPPROTO_TCP) - ftgmac100_txdes_set_tcpcs(txdes); - else if (ip_proto == IPPROTO_UDP) - ftgmac100_txdes_set_udpcs(txdes); - } - } - - spin_lock(&priv->tx_lock); - priv->tx_pending++; - if (priv->tx_pending == TX_QUEUE_ENTRIES) - netif_stop_queue(netdev); + /* Free skb arrays */ + kfree(priv->rx_skbs); + kfree(priv->tx_skbs); - /* start transmit */ - ftgmac100_txdes_set_dma_own(txdes); - spin_unlock(&priv->tx_lock); + /* Free descriptors */ + if (priv->rxdes) + dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES * + sizeof(struct ftgmac100_rxdes), + priv->rxdes, priv->rxdes_dma); + priv->rxdes = NULL; - ftgmac100_txdma_normal_prio_start_polling(priv); + if (priv->txdes) + dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES * + sizeof(struct ftgmac100_txdes), + priv->txdes, priv->txdes_dma); + priv->txdes = NULL; - return NETDEV_TX_OK; + /* Free scratch packet buffer */ + if (priv->rx_scratch) + dma_free_coherent(priv->dev, RX_BUF_SIZE, + priv->rx_scratch, priv->rx_scratch_dma); } -/****************************************************************************** - * internal functions (buffer) - *****************************************************************************/ -static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, - struct ftgmac100_rxdes *rxdes, gfp_t gfp) +static int ftgmac100_alloc_rings(struct ftgmac100 *priv) { - struct net_device *netdev = priv->netdev; - struct page *page; - dma_addr_t map; + /* Allocate skb arrays */ + priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *), + GFP_KERNEL); + if (!priv->rx_skbs) + return -ENOMEM; + priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *), + GFP_KERNEL); + if (!priv->tx_skbs) + return -ENOMEM; - page = alloc_page(gfp); - if (!page) { - if (net_ratelimit()) - netdev_err(netdev, "failed to allocate rx page\n"); + /* Allocate descriptors */ + priv->rxdes = dma_zalloc_coherent(priv->dev, + MAX_RX_QUEUE_ENTRIES * + sizeof(struct ftgmac100_rxdes), + &priv->rxdes_dma, GFP_KERNEL); + if (!priv->rxdes) + return -ENOMEM; + priv->txdes = dma_zalloc_coherent(priv->dev, + MAX_TX_QUEUE_ENTRIES * + sizeof(struct ftgmac100_txdes), + &priv->txdes_dma, GFP_KERNEL); + if (!priv->txdes) return -ENOMEM; - } - map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, map))) { - if (net_ratelimit()) - netdev_err(netdev, "failed to map rx page\n"); - __free_page(page); + /* Allocate scratch packet buffer */ + priv->rx_scratch = dma_alloc_coherent(priv->dev, + RX_BUF_SIZE, + &priv->rx_scratch_dma, + GFP_KERNEL); + if (!priv->rx_scratch) return -ENOMEM; - } - ftgmac100_rxdes_set_page(priv, rxdes, page); - ftgmac100_rxdes_set_dma_addr(rxdes, map); - ftgmac100_rxdes_set_dma_own(priv, rxdes); return 0; } -static void ftgmac100_free_buffers(struct ftgmac100 *priv) +static void ftgmac100_init_rings(struct ftgmac100 *priv) { + struct ftgmac100_rxdes *rxdes = NULL; + struct ftgmac100_txdes *txdes = NULL; int i; - for (i = 0; i < RX_QUEUE_ENTRIES; i++) { - struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; - struct page *page = ftgmac100_rxdes_get_page(priv, rxdes); - dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes); + /* Update entries counts */ + priv->rx_q_entries = priv->new_rx_q_entries; + priv->tx_q_entries = priv->new_tx_q_entries; - if (!page) - continue; + if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES)) + return; - dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); - __free_page(page); + /* Initialize RX ring */ + for (i = 0; i < priv->rx_q_entries; i++) { + rxdes = &priv->rxdes[i]; + rxdes->rxdes0 = 0; + rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); } + /* Mark the end of the ring */ + rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); - for (i = 0; i < TX_QUEUE_ENTRIES; i++) { - struct ftgmac100_txdes *txdes = &priv->descs->txdes[i]; - struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes); - dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes); - - if (!skb) - continue; + if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES)) + return; - dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); - kfree_skb(skb); + /* Initialize TX ring */ + for (i = 0; i < priv->tx_q_entries; i++) { + txdes = &priv->txdes[i]; + txdes->txdes0 = 0; } - - dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs), - priv->descs, priv->descs_dma_addr); + txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask); } -static int ftgmac100_alloc_buffers(struct ftgmac100 *priv) +static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv) { int i; - priv->descs = dma_zalloc_coherent(priv->dev, - sizeof(struct ftgmac100_descs), - &priv->descs_dma_addr, GFP_KERNEL); - if (!priv->descs) - return -ENOMEM; - - /* initialize RX ring */ - ftgmac100_rxdes_set_end_of_ring(priv, - &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); - - for (i = 0; i < RX_QUEUE_ENTRIES; i++) { - struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + for (i = 0; i < priv->rx_q_entries; i++) { + struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; - if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) - goto err; + if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL)) + return -ENOMEM; } - - /* initialize TX ring */ - ftgmac100_txdes_set_end_of_ring(priv, - &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); return 0; - -err: - ftgmac100_free_buffers(priv); - return -ENOMEM; } -/****************************************************************************** - * internal functions (mdio) - *****************************************************************************/ static void ftgmac100_adjust_link(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; - int ier; + bool tx_pause, rx_pause; + int new_speed; + + /* We store "no link" as speed 0 */ + if (!phydev->link) + new_speed = 0; + else + new_speed = phydev->speed; + + /* Grab pause settings from PHY if configured to do so */ + if (priv->aneg_pause) { + rx_pause = tx_pause = phydev->pause; + if (phydev->asym_pause) + tx_pause = !rx_pause; + } else { + rx_pause = priv->rx_pause; + tx_pause = priv->tx_pause; + } - if (phydev->speed == priv->old_speed) + /* Link hasn't changed, do nothing */ + if (phydev->speed == priv->cur_speed && + phydev->duplex == priv->cur_duplex && + rx_pause == priv->rx_pause && + tx_pause == priv->tx_pause) return; - priv->old_speed = phydev->speed; - - ier = ioread32(priv->base + FTGMAC100_OFFSET_IER); + /* Print status if we have a link or we had one and just lost it, + * don't print otherwise. + */ + if (new_speed || priv->cur_speed) + phy_print_status(phydev); - /* disable all interrupts */ - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + priv->cur_speed = new_speed; + priv->cur_duplex = phydev->duplex; + priv->rx_pause = rx_pause; + priv->tx_pause = tx_pause; - netif_stop_queue(netdev); - ftgmac100_stop_hw(priv); + /* Link is down, do nothing else */ + if (!new_speed) + return; - netif_start_queue(netdev); - ftgmac100_init_hw(priv); - ftgmac100_start_hw(priv, phydev->speed); + /* Disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); - /* re-enable interrupts */ - iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER); + /* Reset the adapter asynchronously */ + schedule_work(&priv->reset_task); } -static int ftgmac100_mii_probe(struct ftgmac100 *priv) +static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) { struct net_device *netdev = priv->netdev; struct phy_device *phydev; @@ -910,19 +1064,25 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv) } phydev = phy_connect(netdev, phydev_name(phydev), - &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); + &ftgmac100_adjust_link, intf); if (IS_ERR(phydev)) { netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); return PTR_ERR(phydev); } + /* Indicate that we support PAUSE frames (see comment in + * Documentation/networking/phy.txt) + */ + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phydev->advertising = phydev->supported; + + /* Display what we found */ + phy_attached_info(phydev); + return 0; } -/****************************************************************************** - * struct mii_bus functions - *****************************************************************************/ static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { struct net_device *netdev = bus->priv; @@ -994,9 +1154,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, return -EIO; } -/****************************************************************************** - * struct ethtool_ops functions - *****************************************************************************/ static void ftgmac100_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -1005,175 +1162,365 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } +static void ftgmac100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + memset(ering, 0, sizeof(*ering)); + ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES; + ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES; + ering->rx_pending = priv->rx_q_entries; + ering->tx_pending = priv->tx_q_entries; +} + +static int ftgmac100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES || + ering->tx_pending > MAX_TX_QUEUE_ENTRIES || + ering->rx_pending < MIN_RX_QUEUE_ENTRIES || + ering->tx_pending < MIN_TX_QUEUE_ENTRIES || + !is_power_of_2(ering->rx_pending) || + !is_power_of_2(ering->tx_pending)) + return -EINVAL; + + priv->new_rx_q_entries = ering->rx_pending; + priv->new_tx_q_entries = ering->tx_pending; + if (netif_running(netdev)) + schedule_work(&priv->reset_task); + + return 0; +} + +static void ftgmac100_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + pause->autoneg = priv->aneg_pause; + pause->tx_pause = priv->tx_pause; + pause->rx_pause = priv->rx_pause; +} + +static int ftgmac100_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + + priv->aneg_pause = pause->autoneg; + priv->tx_pause = pause->tx_pause; + priv->rx_pause = pause->rx_pause; + + if (phydev) { + phydev->advertising &= ~ADVERTISED_Pause; + phydev->advertising &= ~ADVERTISED_Asym_Pause; + + if (pause->rx_pause) { + phydev->advertising |= ADVERTISED_Pause; + phydev->advertising |= ADVERTISED_Asym_Pause; + } + + if (pause->tx_pause) + phydev->advertising ^= ADVERTISED_Asym_Pause; + } + if (netif_running(netdev)) { + if (phydev && priv->aneg_pause) + phy_start_aneg(phydev); + else + ftgmac100_config_pause(priv); + } + + return 0; +} + static const struct ethtool_ops ftgmac100_ethtool_ops = { .get_drvinfo = ftgmac100_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_ringparam = ftgmac100_get_ringparam, + .set_ringparam = ftgmac100_set_ringparam, + .get_pauseparam = ftgmac100_get_pauseparam, + .set_pauseparam = ftgmac100_set_pauseparam, }; -/****************************************************************************** - * interrupt handler - *****************************************************************************/ static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) { struct net_device *netdev = dev_id; struct ftgmac100 *priv = netdev_priv(netdev); + unsigned int status, new_mask = FTGMAC100_INT_BAD; - /* When running in NCSI mode, the interface should be ready for - * receiving or transmitting NCSI packets before it's opened. - */ - if (likely(priv->use_ncsi || netif_running(netdev))) { - /* Disable interrupts for polling */ - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); - napi_schedule(&priv->napi); + /* Fetch and clear interrupt bits, process abnormal ones */ + status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); + iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); + if (unlikely(status & FTGMAC100_INT_BAD)) { + + /* RX buffer unavailable */ + if (status & FTGMAC100_INT_NO_RXBUF) + netdev->stats.rx_over_errors++; + + /* received packet lost due to RX FIFO full */ + if (status & FTGMAC100_INT_RPKT_LOST) + netdev->stats.rx_fifo_errors++; + + /* sent packet lost due to excessive TX collision */ + if (status & FTGMAC100_INT_XPKT_LOST) + netdev->stats.tx_fifo_errors++; + + /* AHB error -> Reset the chip */ + if (status & FTGMAC100_INT_AHB_ERR) { + if (net_ratelimit()) + netdev_warn(netdev, + "AHB bus error ! Resetting chip.\n"); + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + schedule_work(&priv->reset_task); + return IRQ_HANDLED; + } + + /* We may need to restart the MAC after such errors, delay + * this until after we have freed some Rx buffers though + */ + priv->need_mac_restart = true; + + /* Disable those errors until we restart */ + new_mask &= ~status; } + /* Only enable "bad" interrupts while NAPI is on */ + iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER); + + /* Schedule NAPI bh */ + napi_schedule_irqoff(&priv->napi); + return IRQ_HANDLED; } -/****************************************************************************** - * struct napi_struct functions - *****************************************************************************/ +static bool ftgmac100_check_rx(struct ftgmac100 *priv) +{ + struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer]; + + /* Do we have a packet ? */ + return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY)); +} + static int ftgmac100_poll(struct napi_struct *napi, int budget) { struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); - struct net_device *netdev = priv->netdev; - unsigned int status; - bool completed = true; - int rx = 0; + int work_done = 0; + bool more; - status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); - iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); + /* Handle TX completions */ + if (ftgmac100_tx_buf_cleanable(priv)) + ftgmac100_tx_complete(priv); - if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) { - /* - * FTGMAC100_INT_RPKT_BUF: - * RX DMA has received packets into RX buffer successfully - * - * FTGMAC100_INT_NO_RXBUF: - * RX buffer unavailable - */ - bool retry; + /* Handle RX packets */ + do { + more = ftgmac100_rx_packet(priv, &work_done); + } while (more && work_done < budget); - do { - retry = ftgmac100_rx_packet(priv, &rx); - } while (retry && rx < budget); - if (retry && rx == budget) - completed = false; - } + /* The interrupt is telling us to kick the MAC back to life + * after an RX overflow + */ + if (unlikely(priv->need_mac_restart)) { + ftgmac100_start_hw(priv); - if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) { - /* - * FTGMAC100_INT_XPKT_ETH: - * packet transmitted to ethernet successfully - * - * FTGMAC100_INT_XPKT_LOST: - * packet transmitted to ethernet lost due to late - * collision or excessive collision - */ - ftgmac100_tx_complete(priv); + /* Re-enable "bad" interrupts */ + iowrite32(FTGMAC100_INT_BAD, + priv->base + FTGMAC100_OFFSET_IER); } - if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF | - FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) { - if (net_ratelimit()) - netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status, - status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "", - status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", - status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : ""); + /* As long as we are waiting for transmit packets to be + * completed we keep NAPI going + */ + if (ftgmac100_tx_buf_cleanable(priv)) + work_done = budget; + + if (work_done < budget) { + /* We are about to re-enable all interrupts. However + * the HW has been latching RX/TX packet interrupts while + * they were masked. So we clear them first, then we need + * to re-check if there's something to process + */ + iowrite32(FTGMAC100_INT_RXTX, + priv->base + FTGMAC100_OFFSET_ISR); - if (status & FTGMAC100_INT_NO_RXBUF) { - /* RX buffer unavailable */ - netdev->stats.rx_over_errors++; - } + /* Push the above (and provides a barrier vs. subsequent + * reads of the descriptor). + */ + ioread32(priv->base + FTGMAC100_OFFSET_ISR); - if (status & FTGMAC100_INT_RPKT_LOST) { - /* received packet lost due to RX FIFO full */ - netdev->stats.rx_fifo_errors++; - } - } + /* Check RX and TX descriptors for more work to do */ + if (ftgmac100_check_rx(priv) || + ftgmac100_tx_buf_cleanable(priv)) + return budget; - if (completed) { + /* deschedule NAPI */ napi_complete(napi); /* enable all interrupts */ - iowrite32(priv->int_mask_all, + iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); } - return rx; + return work_done; } -/****************************************************************************** - * struct net_device_ops functions - *****************************************************************************/ -static int ftgmac100_open(struct net_device *netdev) +static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err) { - struct ftgmac100 *priv = netdev_priv(netdev); - unsigned int status; + int err = 0; + + /* Re-init descriptors (adjust queue sizes) */ + ftgmac100_init_rings(priv); + + /* Realloc rx descriptors */ + err = ftgmac100_alloc_rx_buffers(priv); + if (err && !ignore_alloc_err) + return err; + + /* Reinit and restart HW */ + ftgmac100_init_hw(priv); + ftgmac100_config_pause(priv); + ftgmac100_start_hw(priv); + + /* Re-enable the device */ + napi_enable(&priv->napi); + netif_start_queue(priv->netdev); + + /* Enable all interrupts */ + iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER); + + return err; +} + +static void ftgmac100_reset_task(struct work_struct *work) +{ + struct ftgmac100 *priv = container_of(work, struct ftgmac100, + reset_task); + struct net_device *netdev = priv->netdev; int err; - err = ftgmac100_alloc_buffers(priv); + netdev_dbg(netdev, "Resetting NIC...\n"); + + /* Lock the world */ + rtnl_lock(); + if (netdev->phydev) + mutex_lock(&netdev->phydev->lock); + if (priv->mii_bus) + mutex_lock(&priv->mii_bus->mdio_lock); + + + /* Check if the interface is still up */ + if (!netif_running(netdev)) + goto bail; + + /* Stop the network stack */ + netif_trans_update(netdev); + napi_disable(&priv->napi); + netif_tx_disable(netdev); + + /* Stop and reset the MAC */ + ftgmac100_stop_hw(priv); + err = ftgmac100_reset_and_config_mac(priv); if (err) { - netdev_err(netdev, "failed to allocate buffers\n"); - goto err_alloc; + /* Not much we can do ... it might come back... */ + netdev_err(netdev, "attempting to continue...\n"); } - err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev); + /* Free all rx and tx buffers */ + ftgmac100_free_buffers(priv); + + /* Setup everything again and restart chip */ + ftgmac100_init_all(priv, true); + + netdev_dbg(netdev, "Reset done !\n"); + bail: + if (priv->mii_bus) + mutex_unlock(&priv->mii_bus->mdio_lock); + if (netdev->phydev) + mutex_unlock(&netdev->phydev->lock); + rtnl_unlock(); +} + +static int ftgmac100_open(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + int err; + + /* Allocate ring buffers */ + err = ftgmac100_alloc_rings(priv); if (err) { - netdev_err(netdev, "failed to request irq %d\n", priv->irq); - goto err_irq; + netdev_err(netdev, "Failed to allocate descriptors\n"); + return err; } - priv->rx_pointer = 0; - priv->tx_clean_pointer = 0; - priv->tx_pointer = 0; - priv->tx_pending = 0; + /* When using NC-SI we force the speed to 100Mbit/s full duplex, + * + * Otherwise we leave it set to 0 (no link), the link + * message from the PHY layer will handle setting it up to + * something else if needed. + */ + if (priv->use_ncsi) { + priv->cur_duplex = DUPLEX_FULL; + priv->cur_speed = SPEED_100; + } else { + priv->cur_duplex = 0; + priv->cur_speed = 0; + } - err = ftgmac100_reset_hw(priv); + /* Reset the hardware */ + err = ftgmac100_reset_and_config_mac(priv); if (err) goto err_hw; - ftgmac100_init_hw(priv); - ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10); + /* Initialize NAPI */ + netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); - /* Clear stale interrupts */ - status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); - iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); + /* Grab our interrupt */ + err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", netdev->irq); + goto err_irq; + } - if (netdev->phydev) + /* Start things up */ + err = ftgmac100_init_all(priv, false); + if (err) { + netdev_err(netdev, "Failed to allocate packet buffers\n"); + goto err_alloc; + } + + if (netdev->phydev) { + /* If we have a PHY, start polling */ phy_start(netdev->phydev); - else if (priv->use_ncsi) + } else if (priv->use_ncsi) { + /* If using NC-SI, set our carrier on and start the stack */ netif_carrier_on(netdev); - napi_enable(&priv->napi); - netif_start_queue(netdev); - - /* enable all interrupts */ - iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER); - - /* Start the NCSI device */ - if (priv->use_ncsi) { + /* Start the NCSI device */ err = ncsi_start_dev(priv->ndev); if (err) goto err_ncsi; } - priv->enabled = true; - return 0; -err_ncsi: + err_ncsi: napi_disable(&priv->napi); netif_stop_queue(netdev); - iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); -err_hw: - free_irq(priv->irq, netdev); -err_irq: + err_alloc: ftgmac100_free_buffers(priv); -err_alloc: + free_irq(netdev->irq, netdev); + err_irq: + netif_napi_del(&priv->napi); + err_hw: + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + ftgmac100_free_rings(priv); return err; } @@ -1181,64 +1528,87 @@ static int ftgmac100_stop(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); - if (!priv->enabled) - return 0; + /* Note about the reset task: We are called with the rtnl lock + * held, so we are synchronized against the core of the reset + * task. We must not try to synchronously cancel it otherwise + * we can deadlock. But since it will test for netif_running() + * which has already been cleared by the net core, we don't + * anything special to do. + */ /* disable all interrupts */ - priv->enabled = false; iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); netif_stop_queue(netdev); napi_disable(&priv->napi); + netif_napi_del(&priv->napi); if (netdev->phydev) phy_stop(netdev->phydev); else if (priv->use_ncsi) ncsi_stop_dev(priv->ndev); ftgmac100_stop_hw(priv); - free_irq(priv->irq, netdev); + free_irq(netdev->irq, netdev); ftgmac100_free_buffers(priv); + ftgmac100_free_rings(priv); return 0; } -static int ftgmac100_hard_start_xmit(struct sk_buff *skb, - struct net_device *netdev) +/* optional */ +static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + if (!netdev->phydev) + return -ENXIO; + + return phy_mii_ioctl(netdev->phydev, ifr, cmd); +} + +static void ftgmac100_tx_timeout(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); - dma_addr_t map; - if (unlikely(skb->len > MAX_PKT_SIZE)) { - if (net_ratelimit()) - netdev_dbg(netdev, "tx packet too big\n"); + /* Disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); - netdev->stats.tx_dropped++; - kfree_skb(skb); - return NETDEV_TX_OK; - } + /* Do the reset outside of interrupt context */ + schedule_work(&priv->reset_task); +} - map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, map))) { - /* drop packet */ - if (net_ratelimit()) - netdev_err(netdev, "map socket buffer failed\n"); +static int ftgmac100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; - netdev->stats.tx_dropped++; - kfree_skb(skb); - return NETDEV_TX_OK; + if (!netif_running(netdev)) + return 0; + + /* Update the vlan filtering bit */ + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + u32 maccr; + + maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); + if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + maccr |= FTGMAC100_MACCR_RM_VLAN; + else + maccr &= ~FTGMAC100_MACCR_RM_VLAN; + iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); } - return ftgmac100_xmit(priv, skb, map); + return 0; } -/* optional */ -static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ftgmac100_poll_controller(struct net_device *netdev) { - if (!netdev->phydev) - return -ENXIO; + unsigned long flags; - return phy_mii_ioctl(netdev->phydev, ifr, cmd); + local_irq_save(flags); + ftgmac100_interrupt(netdev->irq, netdev); + local_irq_restore(flags); } +#endif static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_open = ftgmac100_open, @@ -1247,12 +1617,20 @@ static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_set_mac_address = ftgmac100_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = ftgmac100_do_ioctl, + .ndo_tx_timeout = ftgmac100_tx_timeout, + .ndo_set_rx_mode = ftgmac100_set_rx_mode, + .ndo_set_features = ftgmac100_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ftgmac100_poll_controller, +#endif }; static int ftgmac100_setup_mdio(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct platform_device *pdev = to_platform_device(priv->dev); + int phy_intf = PHY_INTERFACE_MODE_RGMII; + struct device_node *np = pdev->dev.of_node; int i, err = 0; u32 reg; @@ -1261,14 +1639,46 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) if (!priv->mii_bus) return -EIO; - if (of_machine_is_compatible("aspeed,ast2400") || - of_machine_is_compatible("aspeed,ast2500")) { + if (priv->is_aspeed) { /* This driver supports the old MDIO interface */ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); }; + /* Get PHY mode from device-tree */ + if (np) { + /* Default to RGMII. It's a gigabit part after all */ + phy_intf = of_get_phy_mode(np); + if (phy_intf < 0) + phy_intf = PHY_INTERFACE_MODE_RGMII; + + /* Aspeed only supports these. I don't know about other IP + * block vendors so I'm going to just let them through for + * now. Note that this is only a warning if for some obscure + * reason the DT really means to lie about it or it's a newer + * part we don't know about. + * + * On the Aspeed SoC there are additionally straps and SCU + * control bits that could tell us what the interface is + * (or allow us to configure it while the IP block is held + * in reset). For now I chose to keep this driver away from + * those SoC specific bits and assume the device-tree is + * right and the SCU has been configured properly by pinmux + * or the firmware. + */ + if (priv->is_aspeed && + phy_intf != PHY_INTERFACE_MODE_RMII && + phy_intf != PHY_INTERFACE_MODE_RGMII && + phy_intf != PHY_INTERFACE_MODE_RGMII_ID && + phy_intf != PHY_INTERFACE_MODE_RGMII_RXID && + phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) { + netdev_warn(netdev, + "Unsupported PHY mode %s !\n", + phy_modes(phy_intf)); + } + } + priv->mii_bus->name = "ftgmac100_mdio"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); @@ -1285,7 +1695,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) goto err_register_mdiobus; } - err = ftgmac100_mii_probe(priv); + err = ftgmac100_mii_probe(priv, phy_intf); if (err) { dev_err(priv->dev, "MII Probe failed!\n"); goto err_mii_probe; @@ -1321,15 +1731,13 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) nd->link_up ? "up" : "down"); } -/****************************************************************************** - * struct platform_driver functions - *****************************************************************************/ static int ftgmac100_probe(struct platform_device *pdev) { struct resource *res; int irq; struct net_device *netdev; struct ftgmac100 *priv; + struct device_node *np; int err = 0; if (!pdev) @@ -1354,6 +1762,7 @@ static int ftgmac100_probe(struct platform_device *pdev) netdev->ethtool_ops = &ftgmac100_ethtool_ops; netdev->netdev_ops = &ftgmac100_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; platform_set_drvdata(pdev, netdev); @@ -1361,11 +1770,7 @@ static int ftgmac100_probe(struct platform_device *pdev) priv = netdev_priv(netdev); priv->netdev = netdev; priv->dev = &pdev->dev; - - spin_lock_init(&priv->tx_lock); - - /* initialize NAPI */ - netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); + INIT_WORK(&priv->reset_task, ftgmac100_reset_task); /* map io memory */ priv->res = request_mem_region(res->start, resource_size(res), @@ -1383,29 +1788,28 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_ioremap; } - priv->irq = irq; + netdev->irq = irq; - /* MAC address from chip or random one */ - ftgmac100_setup_mac(priv); + /* Enable pause */ + priv->tx_pause = true; + priv->rx_pause = true; + priv->aneg_pause = true; - priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST | - FTGMAC100_INT_XPKT_ETH | - FTGMAC100_INT_XPKT_LOST | - FTGMAC100_INT_AHB_ERR | - FTGMAC100_INT_RPKT_BUF | - FTGMAC100_INT_NO_RXBUF); + /* MAC address from chip or random one */ + ftgmac100_initial_mac(priv); - if (of_machine_is_compatible("aspeed,ast2400") || - of_machine_is_compatible("aspeed,ast2500")) { + np = pdev->dev.of_node; + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || + of_device_is_compatible(np, "aspeed,ast2500-mac"))) { priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); + priv->is_aspeed = true; } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); } - if (pdev->dev.of_node && - of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) { + if (np && of_get_property(np, "use-ncsi", NULL)) { if (!IS_ENABLED(CONFIG_NET_NCSI)) { dev_err(&pdev->dev, "NCSI stack not enabled\n"); goto err_ncsi_dev; @@ -1423,15 +1827,21 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_setup_mdio; } - /* We have to disable on-chip IP checksum functionality - * when NCSI is enabled on the interface. It doesn't work - * in that case. - */ - netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; - if (priv->use_ncsi && - of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL)) - netdev->features &= ~NETIF_F_IP_CSUM; + /* Default ring sizes */ + priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; + priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; + /* Base feature set */ + netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + /* AST2400 doesn't have working HW checksum generation */ + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) + netdev->hw_features &= ~NETIF_F_HW_CSUM; + if (np && of_get_property(np, "no-hw-checksum", NULL)) + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); + netdev->features |= netdev->hw_features; /* register network device */ err = register_netdev(netdev); @@ -1440,7 +1850,7 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_register_netdev; } - netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); + netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base); return 0; @@ -1467,6 +1877,12 @@ static int ftgmac100_remove(struct platform_device *pdev) priv = netdev_priv(netdev); unregister_netdev(netdev); + + /* There's a small chance the reset task will have been re-queued, + * during stop, make sure it's gone before we free the structure. + */ + cancel_work_sync(&priv->reset_task); + ftgmac100_destroy_mdio(netdev); iounmap(priv->base); diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h index a7ce0ac8858a5f1a123be8ca46a03bc5fa18f9f0..0653d8176e6a536601119ffadefcbe55e28c7949 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.h +++ b/drivers/net/ethernet/faraday/ftgmac100.h @@ -86,6 +86,20 @@ #define FTGMAC100_INT_PHYSTS_CHG (1 << 9) #define FTGMAC100_INT_NO_HPTXBUF (1 << 10) +/* Interrupts we care about in NAPI mode */ +#define FTGMAC100_INT_BAD (FTGMAC100_INT_RPKT_LOST | \ + FTGMAC100_INT_XPKT_LOST | \ + FTGMAC100_INT_AHB_ERR | \ + FTGMAC100_INT_NO_RXBUF) + +/* Normal RX/TX interrupts, enabled when NAPI off */ +#define FTGMAC100_INT_RXTX (FTGMAC100_INT_XPKT_ETH | \ + FTGMAC100_INT_RPKT_BUF) + +/* All the interrupts we care about */ +#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \ + FTGMAC100_INT_BAD) + /* * Interrupt timer control register */ @@ -184,14 +198,21 @@ #define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff) #define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff) +/* + * Flow control register + */ +#define FTGMAC100_FCR_FC_EN (1 << 0) +#define FTGMAC100_FCR_FCTHR_EN (1 << 2) +#define FTGMAC100_FCR_PAUSE_TIME(x) (((x) & 0xffff) << 16) + /* * Transmit descriptor, aligned to 16 bytes */ struct ftgmac100_txdes { - unsigned int txdes0; - unsigned int txdes1; - unsigned int txdes2; /* not used by HW */ - unsigned int txdes3; /* TXBUF_BADR */ + __le32 txdes0; /* Control & status bits */ + __le32 txdes1; /* Irq, checksum and vlan control */ + __le32 txdes2; /* Reserved */ + __le32 txdes3; /* DMA buffer address */ } __attribute__ ((aligned(16))); #define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff) @@ -213,10 +234,10 @@ struct ftgmac100_txdes { * Receive descriptor, aligned to 16 bytes */ struct ftgmac100_rxdes { - unsigned int rxdes0; - unsigned int rxdes1; - unsigned int rxdes2; /* not used by HW */ - unsigned int rxdes3; /* RXBUF_BADR */ + __le32 rxdes0; /* Control & status bits */ + __le32 rxdes1; /* Checksum and vlan status */ + __le32 rxdes2; /* length/type on AST2500 */ + __le32 rxdes3; /* DMA buffer address */ } __attribute__ ((aligned(16))); #define FTGMAC100_RXDES0_VDBC 0x3fff @@ -234,6 +255,14 @@ struct ftgmac100_rxdes { #define FTGMAC100_RXDES0_FRS (1 << 29) #define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31) +/* Errors we care about for dropping packets */ +#define RXDES0_ANY_ERROR ( \ + FTGMAC100_RXDES0_RX_ERR | \ + FTGMAC100_RXDES0_CRC_ERR | \ + FTGMAC100_RXDES0_FTL | \ + FTGMAC100_RXDES0_RUNT | \ + FTGMAC100_RXDES0_RX_ODD_NB) + #define FTGMAC100_RXDES1_VLANTAG_CI 0xffff #define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20) #define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e2ca107f9d94f162cdab87e406c72095a6746651..9a520e4f0df9a0d47b75f71f01557414ba3d4eab 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* L4 Type field: TCP */ #define FM_L4_PARSE_RESULT_TCP 0x20 +/* FD status field indicating whether the FM Parser has attempted to validate + * the L4 csum of the frame. + * Note that having this bit set doesn't necessarily imply that the checksum + * is valid. One would have to check the parse results to find that out. + */ +#define FM_FD_STAT_L4CV 0x00000004 + #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ @@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, * For conformity, we'll still declare GSO explicitly. */ net_dev->features |= NETIF_F_GSO; + net_dev->features |= NETIF_F_RXCSUM; net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* we do not want shared skbs on TX */ @@ -334,6 +342,45 @@ static void dpaa_get_stats64(struct net_device *net_dev, } } +static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + u8 num_tc; + int i; + + if (tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = tc->mqprio->num_tc; + + if (num_tc == priv->num_tc) + return 0; + + if (!num_tc) { + netdev_reset_tc(net_dev); + goto out; + } + + if (num_tc > DPAA_TC_NUM) { + netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", + DPAA_TC_NUM); + return -EINVAL; + } + + netdev_set_num_tc(net_dev, num_tc); + + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, + i * DPAA_TC_TXQ_NUM); + +out: + priv->num_tc = num_tc ? : 1; + netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + return 0; +} + static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) { struct platform_device *of_dev; @@ -557,16 +604,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv) /* Use multiple WQs for FQ assignment: * - Tx Confirmation queues go to WQ1. - * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance - * to be scheduled, in case there are many more FQs in WQ3). - * - Rx Default and Tx queues go to WQ3 (no differentiation between - * Rx and Tx traffic). + * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance + * to be scheduled, in case there are many more FQs in WQ6). + * - Rx Default goes to WQ6. + * - Tx queues go to different WQs depending on their priority. Equal + * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and + * WQ0 (highest priority). * This ensures that Tx-confirmed buffers are timely released. In particular, * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they * are greatly outnumbered by other FQs in the system, while * dequeue scheduling is round-robin. */ -static inline void dpaa_assign_wq(struct dpaa_fq *fq) +static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) { switch (fq->fq_type) { case FQ_TYPE_TX_CONFIRM: @@ -575,11 +624,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq) break; case FQ_TYPE_RX_ERROR: case FQ_TYPE_TX_ERROR: - fq->wq = 2; + fq->wq = 5; break; case FQ_TYPE_RX_DEFAULT: + fq->wq = 6; + break; case FQ_TYPE_TX: - fq->wq = 3; + switch (idx / DPAA_TC_TXQ_NUM) { + case 0: + /* Low priority (best effort) */ + fq->wq = 6; + break; + case 1: + /* Medium priority */ + fq->wq = 2; + break; + case 2: + /* High priority */ + fq->wq = 1; + break; + case 3: + /* Very high priority */ + fq->wq = 0; + break; + default: + WARN(1, "Too many TX FQs: more than %d!\n", + DPAA_ETH_TXQ_NUM); + } break; default: WARN(1, "Invalid FQ type %d for FQID %d!\n", @@ -607,7 +678,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, } for (i = 0; i < count; i++) - dpaa_assign_wq(dpaa_fq + i); + dpaa_assign_wq(dpaa_fq + i, i); return dpaa_fq; } @@ -903,7 +974,7 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) * Tx Confirmation FQs. */ if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) - initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); /* FQ placement */ initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); @@ -985,7 +1056,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) /* Initialization common to all ingress queues */ if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); - initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); + initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | + QM_FQCTRL_CTXASTASHING); initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; @@ -1055,9 +1127,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list) return err; } -static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, - struct dpaa_fq *defq, - struct dpaa_buffer_layout *buf_layout) +static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, + struct dpaa_fq *defq, + struct dpaa_buffer_layout *buf_layout) { struct fman_buffer_prefix_content buf_prefix_content; struct fman_port_params params; @@ -1076,23 +1148,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, params.specific_params.non_rx_params.dflt_fqid = defq->fqid; err = fman_port_config(port, ¶ms); - if (err) + if (err) { pr_err("%s: fman_port_config failed\n", __func__); + return err; + } err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); - if (err) + if (err) { pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", __func__); + return err; + } err = fman_port_init(port); if (err) pr_err("%s: fm_port_init failed\n", __func__); + + return err; } -static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, - size_t count, struct dpaa_fq *errq, - struct dpaa_fq *defq, - struct dpaa_buffer_layout *buf_layout) +static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, + size_t count, struct dpaa_fq *errq, + struct dpaa_fq *defq, + struct dpaa_buffer_layout *buf_layout) { struct fman_buffer_prefix_content buf_prefix_content; struct fman_port_rx_params *rx_p; @@ -1120,32 +1198,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, } err = fman_port_config(port, ¶ms); - if (err) + if (err) { pr_err("%s: fman_port_config failed\n", __func__); + return err; + } err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); - if (err) + if (err) { pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", __func__); + return err; + } err = fman_port_init(port); if (err) pr_err("%s: fm_port_init failed\n", __func__); + + return err; } -static void dpaa_eth_init_ports(struct mac_device *mac_dev, - struct dpaa_bp **bps, size_t count, - struct fm_port_fqs *port_fqs, - struct dpaa_buffer_layout *buf_layout, - struct device *dev) +static int dpaa_eth_init_ports(struct mac_device *mac_dev, + struct dpaa_bp **bps, size_t count, + struct fm_port_fqs *port_fqs, + struct dpaa_buffer_layout *buf_layout, + struct device *dev) { struct fman_port *rxport = mac_dev->port[RX]; struct fman_port *txport = mac_dev->port[TX]; + int err; + + err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, + port_fqs->tx_defq, &buf_layout[TX]); + if (err) + return err; + + err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, + port_fqs->rx_defq, &buf_layout[RX]); - dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, - port_fqs->tx_defq, &buf_layout[TX]); - dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, - port_fqs->rx_defq, &buf_layout[RX]); + return err; } static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, @@ -1526,6 +1616,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, return skb; } +static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) +{ + /* The parser has run and performed L4 checksum validation. + * We know there were no parser errors (and implicitly no + * L4 csum error), otherwise we wouldn't be here. + */ + if ((priv->net_dev->features & NETIF_F_RXCSUM) && + (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) + return CHECKSUM_UNNECESSARY; + + /* We're here because either the parser didn't run or the L4 checksum + * was not verified. This may include the case of a UDP frame with + * checksum zero or an L4 proto other than TCP/UDP + */ + return CHECKSUM_NONE; +} + /* Build a linear skb around the received buffer. * We are guaranteed there is enough room at the end of the data buffer to * accommodate the shared info area of the skb. @@ -1556,7 +1663,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, skb_reserve(skb, fd_off); skb_put(skb, qm_fd_get_length(fd)); - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = rx_csum_offload(priv, fd); return skb; @@ -1616,7 +1723,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, if (WARN_ON(unlikely(!skb))) goto free_buffers; - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = rx_csum_offload(priv, fd); /* Make sure forwarded skbs will have enough space * on Tx, if extra headers are added. @@ -2093,7 +2200,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, dma_addr_t addr = qm_fd_addr(fd); enum qm_fd_format fd_format; struct net_device *net_dev; - u32 fd_status = fd->status; + u32 fd_status; struct dpaa_bp *dpaa_bp; struct dpaa_priv *priv; unsigned int skb_len; @@ -2350,6 +2457,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, .ndo_do_ioctl = dpaa_ioctl, + .ndo_setup_tc = dpaa_setup_tc, }; static int dpaa_napi_add(struct net_device *net_dev) @@ -2624,8 +2732,10 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); /* All real interfaces need their ports initialized */ - dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, - &priv->buf_layout[0], dev); + err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, + &priv->buf_layout[0], dev); + if (err) + goto init_ports_failed; priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); if (!priv->percpu_priv) { @@ -2638,6 +2748,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) memset(percpu_priv, 0, sizeof(*percpu_priv)); } + priv->num_tc = 1; + netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + /* Initialize NAPI */ err = dpaa_napi_add(net_dev); if (err < 0) @@ -2658,6 +2771,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) napi_add_failed: dpaa_napi_del(net_dev); alloc_percpu_failed: +init_ports_failed: dpaa_fq_free(dev, &priv->dpaa_fq_list); fq_alloc_failed: qman_delete_cgr_safe(&priv->ingress_cgr); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index 1f9aebf3f3c514517816ae92e379176c6861f219..9941a7866ebea43b8153c8a4150dc17f52e3afb7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -39,7 +39,12 @@ #include "mac.h" #include "dpaa_eth_trace.h" -#define DPAA_ETH_TXQ_NUM NR_CPUS +/* Number of prioritised traffic classes */ +#define DPAA_TC_NUM 4 +/* Number of Tx queues per traffic class */ +#define DPAA_TC_TXQ_NUM NR_CPUS +/* Total number of Tx queues */ +#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) #define DPAA_BPS_NUM 3 /* number of bpools per interface */ @@ -152,6 +157,7 @@ struct dpaa_priv { u16 channel; struct list_head dpaa_fq_list; + u8 num_tc; u32 msg_enable; /* net_device message level */ struct { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 91a16641e8514b7f1f99be03fe280d71e54de37d..a92bf94f8e94321899c28da6d36d6b9d30d571a5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -117,8 +117,9 @@ static struct platform_device_id fec_devtype[] = { .name = "imx6ul-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | + FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@ -235,14 +236,14 @@ static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd) { return (bdp >= bd->last) ? bd->base - : (struct bufdesc *)(((unsigned)bdp) + bd->dsize); + : (struct bufdesc *)(((void *)bdp) + bd->dsize); } static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct bufdesc_prop *bd) { return (bdp <= bd->base) ? bd->last - : (struct bufdesc *)(((unsigned)bdp) - bd->dsize); + : (struct bufdesc *)(((void *)bdp) - bd->dsize); } static int fec_enet_get_bd_index(struct bufdesc *bdp, @@ -1266,7 +1267,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) } } - /* ERR006538: Keep the transmitter going */ + /* ERR006358: Keep the transmitter going */ if (bdp != txq->bd.cur && readl(txq->bd.reg_desc_active) == 0) writel(0, txq->bd.reg_desc_active); @@ -2651,7 +2652,7 @@ static void fec_enet_free_queue(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; - dma_free_coherent(NULL, + dma_free_coherent(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, txq->tso_hdrs, txq->tso_hdrs_dma); @@ -2685,7 +2686,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev) txq->tx_wake_threshold = (txq->bd.ring_size - txq->tx_stop_threshold) / 2; - txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); @@ -3187,7 +3188,7 @@ static int fec_enet_init(struct net_device *ndev) } #ifdef CONFIG_OF -static void fec_reset_phy(struct platform_device *pdev) +static int fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; bool active_high = false; @@ -3195,16 +3196,18 @@ static void fec_reset_phy(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; if (!np) - return; + return 0; - of_property_read_u32(np, "phy-reset-duration", &msec); + err = of_property_read_u32(np, "phy-reset-duration", &msec); /* A sane reset duration should not be longer than 1s */ - if (msec > 1000) + if (!err && msec > 1000) msec = 1; phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); - if (!gpio_is_valid(phy_reset)) - return; + if (phy_reset == -EPROBE_DEFER) + return phy_reset; + else if (!gpio_is_valid(phy_reset)) + return 0; active_high = of_property_read_bool(np, "phy-reset-active-high"); @@ -3213,7 +3216,7 @@ static void fec_reset_phy(struct platform_device *pdev) "phy-reset"); if (err) { dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); - return; + return err; } if (msec > 20) @@ -3222,14 +3225,17 @@ static void fec_reset_phy(struct platform_device *pdev) usleep_range(msec * 1000, msec * 1000 + 1000); gpio_set_value_cansleep(phy_reset, !active_high); + + return 0; } #else /* CONFIG_OF */ -static void fec_reset_phy(struct platform_device *pdev) +static int fec_reset_phy(struct platform_device *pdev) { /* * In case of platform probe, the reset has been done * by machine code. */ + return 0; } #endif /* CONFIG_OF */ @@ -3400,6 +3406,7 @@ fec_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to enable phy regulator: %d\n", ret); + clk_disable_unprepare(fep->clk_ipg); goto failed_regulator; } } else { @@ -3412,7 +3419,9 @@ fec_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - fec_reset_phy(pdev); + ret = fec_reset_phy(pdev); + if (ret) + goto failed_reset; if (fep->bufdesc_ex) fec_ptp_init(pdev); @@ -3473,8 +3482,10 @@ fec_probe(struct platform_device *pdev) fec_ptp_stop(pdev); if (fep->reg_phy) regulator_disable(fep->reg_phy); +failed_reset: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); failed_regulator: - clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index f60845f0c6cad060b193fecdf00dd3a93127fb9c..4aefe24389695457ee307a9eb3403715091d283c 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -59,6 +59,7 @@ #define DMA_OFFSET 0x000C2000 #define FPM_OFFSET 0x000C3000 #define IMEM_OFFSET 0x000C4000 +#define HWP_OFFSET 0x000C7000 #define CGP_OFFSET 0x000DB000 /* Exceptions bit map */ @@ -218,6 +219,9 @@ #define QMI_GS_HALT_NOT_BUSY 0x00000002 +/* HWP defines */ +#define HWP_RPIMAC_PEN 0x00000001 + /* IRAM defines */ #define IRAM_IADD_AIE 0x80000000 #define IRAM_READY 0x80000000 @@ -475,6 +479,12 @@ struct fman_dma_regs { u32 res00e0[0x400 - 56]; }; +struct fman_hwp_regs { + u32 res0000[0x844 / 4]; /* 0x000..0x843 */ + u32 fmprrpimac; /* FM Parser Internal memory access control */ + u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */ +}; + /* Structure that holds current FMan state. * Used for saving run time information. */ @@ -606,6 +616,7 @@ struct fman { struct fman_bmi_regs __iomem *bmi_regs; struct fman_qmi_regs __iomem *qmi_regs; struct fman_dma_regs __iomem *dma_regs; + struct fman_hwp_regs __iomem *hwp_regs; fman_exceptions_cb *exception_cb; fman_bus_error_cb *bus_error_cb; /* Spinlock for FMan use */ @@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); } +static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg) +{ + /* enable HW Parser */ + iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac); +} + static int enable(struct fman *fman, struct fman_cfg *cfg) { u32 cfg_reg = 0; @@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state) state->max_num_of_open_dmas = 32; state->fm_port_num_of_cg = 256; state->num_of_rx_ports = 6; - state->total_fifo_size = 122 * 1024; + state->total_fifo_size = 136 * 1024; break; case 2: @@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman) fman->bmi_regs = base_addr + BMI_OFFSET; fman->qmi_regs = base_addr + QMI_OFFSET; fman->dma_regs = base_addr + DMA_OFFSET; + fman->hwp_regs = base_addr + HWP_OFFSET; fman->base_addr = base_addr; spin_lock_init(&fman->spinlock); @@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman) /* Init QMI Registers */ qmi_init(fman->qmi_regs, fman->cfg); + /* Init HW Parser */ + hwp_init(fman->hwp_regs); + err = enable(fman, cfg); if (err != 0) return err; diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index 57aae8d17d7710a9392fb91f7dee67904189d1ba..f53e1473dbccd667bebf8701a9b7c4cfac871663 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -134,14 +134,14 @@ enum fman_exceptions { struct fman_prs_result { u8 lpid; /* Logical port id */ u8 shimr; /* Shim header result */ - u16 l2r; /* Layer 2 result */ - u16 l3r; /* Layer 3 result */ + __be16 l2r; /* Layer 2 result */ + __be16 l3r; /* Layer 3 result */ u8 l4r; /* Layer 4 result */ u8 cplan; /* Classification plan id */ - u16 nxthdr; /* Next Header */ - u16 cksum; /* Running-sum */ + __be16 nxthdr; /* Next Header */ + __be16 cksum; /* Running-sum */ /* Flags&fragment-offset field of the last IP-header */ - u16 flags_frag_off; + __be16 flags_frag_off; /* Routing type field of a IPV6 routing extension header */ u8 route_type; /* Routing Extension Header Present; last bit is IP valid */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 84ea130eed365b405655650e7999351135b533e5..98bba10fc38c1a5916108fc0ec4b1f6f136f0032 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -381,6 +381,9 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, /* check RGMII support */ if (iface == PHY_INTERFACE_MODE_RGMII || + iface == PHY_INTERFACE_MODE_RGMII_ID || + iface == PHY_INTERFACE_MODE_RGMII_RXID || + iface == PHY_INTERFACE_MODE_RGMII_TXID || iface == PHY_INTERFACE_MODE_RMII) if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; @@ -390,7 +393,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, if (tmp & DTSEC_ID2_INT_REDUCED_OFF) return -EINVAL; - is_rgmii = iface == PHY_INTERFACE_MODE_RGMII; + is_rgmii = iface == PHY_INTERFACE_MODE_RGMII || + iface == PHY_INTERFACE_MODE_RGMII_ID || + iface == PHY_INTERFACE_MODE_RGMII_RXID || + iface == PHY_INTERFACE_MODE_RGMII_TXID; is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index cd6a53eaf1614f7ffbeaadc7bbf6b28d8163e4c0..c0296880feba7f1afa505d4f9c08003e496de429 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -443,7 +443,10 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, break; default: tmp |= IF_MODE_GMII; - if (phy_if == PHY_INTERFACE_MODE_RGMII) + if (phy_if == PHY_INTERFACE_MODE_RGMII || + phy_if == PHY_INTERFACE_MODE_RGMII_ID || + phy_if == PHY_INTERFACE_MODE_RGMII_RXID || + phy_if == PHY_INTERFACE_MODE_RGMII_TXID) tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO; } iowrite32be(tmp, ®s->if_mode); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index 173d8e0fd71668afe4292a506ef120d853f249f5..c4a66469a9074daab469f304751acc06df8f9839 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -36,6 +36,7 @@ #include "fman_mac.h" #include +#include struct fman_mac *memac_config(struct fman_mac_params *params); int memac_set_promiscuous(struct fman_mac *memac, bool new_val); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 9f3bb50a23651a4fd9edf51ff2d2fc29d321cae5..57bf44fa16a10ad54e1f56fa5cb5304d3039ed2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -62,6 +62,7 @@ #define BMI_PORT_REGS_OFFSET 0 #define QMI_PORT_REGS_OFFSET 0x400 +#define HWP_PORT_REGS_OFFSET 0x800 /* Default values */ #define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \ @@ -182,7 +183,7 @@ #define NIA_ENG_BMI 0x00500000 #define NIA_ENG_QMI_ENQ 0x00540000 #define NIA_ENG_QMI_DEQ 0x00580000 - +#define NIA_ENG_HWP 0x00440000 #define NIA_BMI_AC_ENQ_FRAME 0x00000002 #define NIA_BMI_AC_TX_RELEASE 0x000002C0 #define NIA_BMI_AC_RELEASE 0x000000C0 @@ -317,6 +318,19 @@ struct fman_port_qmi_regs { u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */ }; +#define HWP_HXS_COUNT 16 +#define HWP_HXS_PHE_REPORT 0x00000800 +#define HWP_HXS_PCAC_PSTAT 0x00000100 +#define HWP_HXS_PCAC_PSTOP 0x00000001 +struct fman_port_hwp_regs { + struct { + u32 ssa; /* Soft Sequence Attachment */ + u32 lcv; /* Line-up Enable Confirmation Mask */ + } pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */ + u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */ + u32 fmpr_pcac; /* Configuration Access Control */ +}; + /* QMI dequeue prefetch modes */ enum fman_port_deq_prefetch { FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */ @@ -436,6 +450,7 @@ struct fman_port { union fman_port_bmi_regs __iomem *bmi_regs; struct fman_port_qmi_regs __iomem *qmi_regs; + struct fman_port_hwp_regs __iomem *hwp_regs; struct fman_sp_buffer_offsets buffer_offsets; @@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port) /* NIA */ tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT; - tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; + tmp |= NIA_ENG_HWP; iowrite32be(tmp, ®s->fmbm_rfne); + /* Parser Next Engine NIA */ + iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, ®s->fmbm_rfpne); + /* Enqueue NIA */ iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene); @@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port) return 0; } +static void stop_port_hwp(struct fman_port *port) +{ + struct fman_port_hwp_regs __iomem *regs = port->hwp_regs; + int cnt = 100; + + iowrite32be(HWP_HXS_PCAC_PSTOP, ®s->fmpr_pcac); + + while (cnt-- > 0 && + (ioread32be(®s->fmpr_pcac) & HWP_HXS_PCAC_PSTAT)) + udelay(10); + if (!cnt) + pr_err("Timeout stopping HW Parser\n"); +} + +static void start_port_hwp(struct fman_port *port) +{ + struct fman_port_hwp_regs __iomem *regs = port->hwp_regs; + int cnt = 100; + + iowrite32be(0, ®s->fmpr_pcac); + + while (cnt-- > 0 && + !(ioread32be(®s->fmpr_pcac) & HWP_HXS_PCAC_PSTAT)) + udelay(10); + if (!cnt) + pr_err("Timeout starting HW Parser\n"); +} + +static void init_hwp(struct fman_port *port) +{ + struct fman_port_hwp_regs __iomem *regs = port->hwp_regs; + int i; + + stop_port_hwp(port); + + for (i = 0; i < HWP_HXS_COUNT; i++) { + /* enable HXS error reporting into FD[STATUS] PHE */ + iowrite32be(0x00000000, ®s->pmda[i].ssa); + iowrite32be(0xffffffff, ®s->pmda[i].lcv); + } + + start_port_hwp(port); +} + static int init(struct fman_port *port) { int err; @@ -673,6 +735,8 @@ static int init(struct fman_port *port) switch (port->port_type) { case FMAN_PORT_TYPE_RX: err = init_bmi_rx(port); + if (!err) + init_hwp(port); break; case FMAN_PORT_TYPE_TX: err = init_bmi_tx(port); @@ -686,7 +750,8 @@ static int init(struct fman_port *port) /* Init QMI registers */ err = init_qmi(port); - return err; + if (err) + return err; return 0; } @@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) /* Allocate the FM driver's parameters structure */ port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); if (!port->cfg) - goto err_params; + return -EINVAL; /* Initialize FM port parameters which will be kept by the driver */ port->port_type = port->dts_params.type; @@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) /* set memory map pointers */ port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET; port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET; + port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET; port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH; /* resource distribution. */ @@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) err_port_cfg: kfree(port->cfg); -err_params: - kfree(port); return -EINVAL; } EXPORT_SYMBOL(fman_port_config); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index db9c0bcf54cd9308cfac3533e2603f21955b99b5..1fc27c97e3b23205fe3466ddf9f5bb45f72faf09 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -38,12 +38,6 @@ #include #include -#ifdef CONFIG_8xx -#include -#include -#include -#endif - #include "fs_enet.h" #include "fec.h" diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 96d44cf44fe09f4e3d8b5ff0270869a92315ad59..64300ac13e0253452ef885fcd786336de61929c9 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -37,12 +37,6 @@ #include #include -#ifdef CONFIG_8xx -#include -#include -#include -#endif - #include "fs_enet.h" /*************************************************/ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index b6ed818f78fffe21ee2b4c385c7c6222bc5df9f3..9d9b6e6dd9884fdb835e80043f2cdc83d5283bbf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -9,9 +9,9 @@ #include #include +#include #include #include - #include "hnae.h" #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) @@ -57,11 +57,15 @@ static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) { + if (unlikely(!cb->priv)) + return; + if (cb->type == DESC_TYPE_SKB) dev_kfree_skb_any((struct sk_buff *)cb->priv); else if (unlikely(is_rx_ring(ring))) put_page((struct page *)cb->priv); - memset(cb, 0, sizeof(*cb)); + + cb->priv = NULL; } static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) @@ -197,6 +201,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; + spin_lock_init(&ring->lock); assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); /* not matter for tx or rx ring, the ntc and ntc start from 0 */ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 8016854796fb7fbe4eacd5799ccf40810b72b008..04211ac73b36a3152b6642a4c797f738076bd601 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -67,6 +67,8 @@ do { \ #define AE_IS_VER1(ver) ((ver) == AE_VERSION_1) #define AE_NAME_SIZE 16 +#define BD_SIZE_2048_MAX_MTU 6000 + /* some said the RX and TX RCB format should not be the same in the future. But * it is the same now... */ @@ -101,7 +103,6 @@ enum hnae_led_state { #define HNS_RX_FLAG_L4ID_TCP 0x1 #define HNS_RX_FLAG_L4ID_SCTP 0x3 - #define HNS_TXD_ASID_S 0 #define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S) #define HNS_TXD_BUFNUM_S 8 @@ -273,6 +274,9 @@ struct hnae_ring { /* statistic */ struct ring_stats stats; + /* ring lock for poll one */ + spinlock_t lock; + dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ @@ -483,11 +487,11 @@ struct hnae_ae_ops { u32 auto_neg, u32 rx_en, u32 tx_en); void (*get_coalesce_usecs)(struct hnae_handle *handle, u32 *tx_usecs, u32 *rx_usecs); - void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, - u32 *tx_frames, u32 *rx_frames); + void (*get_max_coalesced_frames)(struct hnae_handle *handle, + u32 *tx_frames, u32 *rx_frames); int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); int (*set_coalesce_frames)(struct hnae_handle *handle, - u32 coalesce_frames); + u32 tx_frames, u32 rx_frames); void (*get_coalesce_range)(struct hnae_handle *handle, u32 *tx_frames_low, u32 *rx_frames_low, u32 *tx_frames_high, u32 *rx_frames_high, @@ -646,6 +650,41 @@ static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i) ring->desc[i].rx.ipoff_bnum_pid_flag = 0; } +/* when reinit buffer size, we should reinit buffer description */ +static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h) +{ + int i, j; + struct hnae_ring *ring; + + for (i = 0; i < h->q_num; i++) { + ring = &h->qs[i]->rx_ring; + for (j = 0; j < ring->desc_num; j++) + ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma); + } + + wmb(); /* commit all data before submit */ +} + +/* when reinit buffer size, we should reinit page offset */ +static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h) +{ + int i, j; + struct hnae_ring *ring; + + for (i = 0; i < h->q_num; i++) { + ring = &h->qs[i]->rx_ring; + for (j = 0; j < ring->desc_num; j++) { + ring->desc_cb[j].page_offset = 0; + if (ring->desc[j].addr != + cpu_to_le64(ring->desc_cb[j].dma)) + ring->desc[j].addr = + cpu_to_le64(ring->desc_cb[j].dma); + } + } + + wmb(); /* commit all data before submit */ +} + #define hnae_set_field(origin, mask, shift, val) \ do { \ (origin) &= (~(mask)); \ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 0a9cdf00b31afa9608414a4ad3de3089e4f61d04..ff864a187d5a71fa277f2907659621b9f87ffdcf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -267,8 +267,32 @@ static int hns_ae_clr_multicast(struct hnae_handle *handle) static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct hnae_queue *q; + u32 rx_buf_size; + int i, ret; + + /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */ + if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) { + if (new_mtu <= BD_SIZE_2048_MAX_MTU) + rx_buf_size = 2048; + else + rx_buf_size = 4096; + } else { + rx_buf_size = mac_cb->dsaf_dev->buf_size; + } + + ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size); - return hns_mac_set_mtu(mac_cb, new_mtu); + if (!ret) { + /* reinit ring buf_size */ + for (i = 0; i < handle->q_num; i++) { + q = handle->qs[i]; + q->rx_ring.buf_size = rx_buf_size; + hns_rcb_set_rx_ring_bs(q, rx_buf_size); + } + } + + return ret; } static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) @@ -463,15 +487,21 @@ static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, ring_pair->port_id_in_comm); } -static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, - u32 *tx_frames, u32 *rx_frames) +static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle, + u32 *tx_frames, u32 *rx_frames) { struct ring_pair_cb *ring_pair = container_of(handle->qs[0], struct ring_pair_cb, q); + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); - *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, - ring_pair->port_id_in_comm); - *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + handle->port_type == HNAE_PORT_DEBUG) + *tx_frames = hns_rcb_get_rx_coalesced_frames( + ring_pair->rcb_common, ring_pair->port_id_in_comm); + else + *tx_frames = hns_rcb_get_tx_coalesced_frames( + ring_pair->rcb_common, ring_pair->port_id_in_comm); + *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common, ring_pair->port_id_in_comm); } @@ -485,15 +515,34 @@ static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle, ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout); } -static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, - u32 coalesce_frames) +static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, + u32 tx_frames, u32 rx_frames) { + int ret; struct ring_pair_cb *ring_pair = container_of(handle->qs[0], struct ring_pair_cb, q); + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); - return hns_rcb_set_coalesced_frames( - ring_pair->rcb_common, - ring_pair->port_id_in_comm, coalesce_frames); + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + handle->port_type == HNAE_PORT_DEBUG) { + if (tx_frames != rx_frames) + return -EINVAL; + return hns_rcb_set_rx_coalesced_frames( + ring_pair->rcb_common, + ring_pair->port_id_in_comm, rx_frames); + } else { + if (tx_frames != 1) + return -EINVAL; + ret = hns_rcb_set_tx_coalesced_frames( + ring_pair->rcb_common, + ring_pair->port_id_in_comm, tx_frames); + if (ret) + return ret; + + return hns_rcb_set_rx_coalesced_frames( + ring_pair->rcb_common, + ring_pair->port_id_in_comm, rx_frames); + } } static void hns_ae_get_coalesce_range(struct hnae_handle *handle, @@ -504,20 +553,27 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle, { struct dsaf_device *dsaf_dev; + assert(handle); + dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); - *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; - *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; - *tx_frames_high = - (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? - HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; - *rx_frames_high = - (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? - HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; - *tx_usecs_low = 0; - *rx_usecs_low = 0; - *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; - *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; + *tx_frames_low = HNS_RCB_TX_FRAMES_LOW; + *rx_frames_low = HNS_RCB_RX_FRAMES_LOW; + + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + handle->port_type == HNAE_PORT_DEBUG) + *tx_frames_high = + (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ? + HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1; + else + *tx_frames_high = 1; + + *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ? + HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1; + *tx_usecs_low = HNS_RCB_TX_USECS_LOW; + *rx_usecs_low = HNS_RCB_RX_USECS_LOW; + *tx_usecs_high = HNS_RCB_TX_USECS_HIGH; + *rx_usecs_high = HNS_RCB_RX_USECS_HIGH; } void hns_ae_update_stats(struct hnae_handle *handle, @@ -802,8 +858,9 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); /* update the current hash->queue mappings from the shadow RSS table */ - memcpy(indir, ppe_cb->rss_indir_table, - HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); + if (indir) + memcpy(indir, ppe_cb->rss_indir_table, + HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); return 0; } @@ -814,15 +871,19 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); /* set the RSS Hash Key if specififed by the user */ - if (key) - hns_ppe_set_rss_key(ppe_cb, (u32 *)key); + if (key) { + memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE); + hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); + } - /* update the shadow RSS table with user specified qids */ - memcpy(ppe_cb->rss_indir_table, indir, - HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); + if (indir) { + /* update the shadow RSS table with user specified qids */ + memcpy(ppe_cb->rss_indir_table, indir, + HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); - /* now update the hardware */ - hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + /* now update the hardware */ + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + } return 0; } @@ -846,7 +907,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { .get_autoneg = hns_ae_get_autoneg, .set_pauseparam = hns_ae_set_pauseparam, .get_coalesce_usecs = hns_ae_get_coalesce_usecs, - .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, + .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames, .set_coalesce_usecs = hns_ae_set_coalesce_usecs, .set_coalesce_frames = hns_ae_set_coalesce_frames, .get_coalesce_range = hns_ae_get_coalesce_range, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 3382441fe7b51e84bb5e815ffc1e4fa192b09a91..86944bc3b273fd97232a60e68f25046e68042882 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -86,12 +86,11 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode) dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0); } -/** -*hns_gmac_get_en - get port enable -*@mac_drv:mac device -*@rx:rx enable -*@tx:tx enable -*/ +/* hns_gmac_get_en - get port enable + * @mac_drv:mac device + * @rx:rx enable + * @tx:tx enable + */ static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx) { struct mac_driver *drv = (struct mac_driver *)mac_drv; @@ -148,6 +147,17 @@ static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval) GMAC_MAX_FRM_SIZE_S, newval); } +static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval) +{ + u32 tx_ctrl; + struct mac_driver *drv = (struct mac_driver *)mac_drv; + + tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG); + dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval); + dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval); + dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl); +} + static void hns_gmac_config_an_mode(void *mac_drv, u8 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; @@ -250,7 +260,6 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, u32 full_duplex) { - u32 tx_ctrl; struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG, @@ -279,14 +288,6 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, return -EINVAL; } - tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG); - dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1); - dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1); - dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl); - - dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG, - GMAC_MODE_CHANGE_EB_B, 1); - return 0; } @@ -325,6 +326,17 @@ static void hns_gmac_init(void *mac_drv) hns_gmac_tx_loop_pkt_dis(mac_drv); if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG) hns_gmac_set_uc_match(mac_drv, 0); + + hns_gmac_config_pad_and_crc(mac_drv, 1); + + dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG, + GMAC_MODE_CHANGE_EB_B, 1); + + /* reduce gmac tx water line to avoid gmac hang-up + * in speed 100M and duplex half. + */ + dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK, + GMAC_TX_WATER_LINE_SHIFT, 8); } void hns_gmac_update_stats(void *mac_drv) @@ -453,24 +465,6 @@ static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode, return 0; } -static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval) -{ - u32 tx_ctrl; - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG); - dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval); - dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval); - dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl); -} - -static void hns_gmac_get_id(void *mac_drv, u8 *mac_id) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - *mac_id = drv->mac_id; -} - static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info) { enum hns_gmac_duplex_mdoe duplex; @@ -672,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) static int hns_gmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_gmac_stats_string); return 0; @@ -712,7 +706,6 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc; mac_drv->config_half_duplex = hns_gmac_set_duplex_type; mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames; - mac_drv->mac_get_id = hns_gmac_get_id; mac_drv->get_info = hns_gmac_get_info; mac_drv->autoneg_stat = hns_gmac_autoneg_stat; mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index bdd8cdd732fb588930f2cc085b7a0fddd9f1a263..8b5cdf490850374baba82205264967c8f12a50e5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -335,44 +335,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, return 0; } -/** - *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same - * address twice - *@net_dev: net device - *@vfn : vf lan - *@mac : mac address - *return status - */ -int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac) -{ - struct mac_entry_idx *old_mac; - struct dsaf_device *dsaf_dev; - u32 ret; - - dsaf_dev = mac_cb->dsaf_dev; - - if (vfn < DSAF_MAX_VM_NUM) { - old_mac = &mac_cb->addr_entry_idx[vfn]; - } else { - dev_err(mac_cb->dev, - "vf queue is too large, %s mac%d queue = %#x!\n", - mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn); - return -EINVAL; - } - - if (dsaf_dev) { - ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id, - mac_cb->mac_id, old_mac->addr); - if (ret) - return ret; - - if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0) - old_mac->valid = 0; - } - - return 0; -} - int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) { struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -494,10 +456,9 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb) } } -int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu) +int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size) { struct mac_driver *drv = hns_mac_get_drv(mac_cb); - u32 buf_size = mac_cb->dsaf_dev->buf_size; u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ? MAC_MAX_MTU : MAC_MAX_MTU_V2; @@ -735,6 +696,8 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, rc = phy_device_register(phy); if (rc) { phy_device_free(phy); + dev_err(&mdio->dev, "registered phy fail at address %i\n", + addr); return -ENODEV; } @@ -745,7 +708,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, return 0; } -static void hns_mac_register_phy(struct hns_mac_cb *mac_cb) +static int hns_mac_register_phy(struct hns_mac_cb *mac_cb) { struct acpi_reference_args args; struct platform_device *pdev; @@ -755,24 +718,39 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb) /* Loop over the child nodes and register a phy_device for each one */ if (!to_acpi_device_node(mac_cb->fw_port)) - return; + return -ENODEV; rc = acpi_node_get_property_reference( mac_cb->fw_port, "mdio-node", 0, &args); if (rc) - return; + return rc; addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port); if (addr < 0) - return; + return addr; /* dev address in adev */ pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev)); + if (!pdev) { + dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n", + mac_cb->mac_id); + return -EINVAL; + } + mii_bus = platform_get_drvdata(pdev); + if (!mii_bus) { + dev_err(mac_cb->dev, + "mac%d mdio is NULL, dsaf will probe again later\n", + mac_cb->mac_id); + return -EPROBE_DEFER; + } + rc = hns_mac_register_phydev(mii_bus, mac_cb, addr); if (!rc) dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n", mac_cb->mac_id, addr); + + return rc; } #define MAC_MEDIA_TYPE_MAX_LEN 16 @@ -793,7 +771,7 @@ static const struct { *@np:device node * return: 0 --success, negative --fail */ -static int hns_mac_get_info(struct hns_mac_cb *mac_cb) +static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { struct device_node *np; struct regmap *syscon; @@ -903,7 +881,15 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) } } } else if (is_acpi_node(mac_cb->fw_port)) { - hns_mac_register_phy(mac_cb); + ret = hns_mac_register_phy(mac_cb); + /* + * Mac can work well if there is phy or not.If the port don't + * connect with phy, the return value will be ignored. Only + * when there is phy but can't find mdio bus, the return value + * will be handled. + */ + if (ret == -EPROBE_DEFER) + return ret; } else { dev_err(mac_cb->dev, "mac%d cannot find phy node\n", mac_cb->mac_id); @@ -1065,6 +1051,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev) dsaf_dev->mac_cb[port_id] = mac_cb; } } + /* init mac_cb for all port */ for (port_id = 0; port_id < max_port_num; port_id++) { mac_cb = dsaf_dev->mac_cb[port_id]; @@ -1074,6 +1061,7 @@ int hns_mac_init(struct dsaf_device *dsaf_dev) ret = hns_mac_get_cfg(dsaf_dev, mac_cb); if (ret) return ret; + ret = hns_mac_init_ex(mac_cb); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 2bb3d1e93c64a315c92f0e493573add3e5f1e023..24dfba53a0f216c5ce55850b4a5703a1a02772e5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -373,8 +373,6 @@ struct mac_driver { void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable); /* config rx mode for promiscuous*/ void (*set_promiscuous)(void *mac_drv, u8 enable); - /* get mac id */ - void (*mac_get_id)(void *mac_drv, u8 *mac_id); void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en); void (*autoneg_stat)(void *mac_drv, u32 *enable); @@ -436,7 +434,6 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); void hns_mac_start(struct hns_mac_cb *mac_cb); void hns_mac_stop(struct hns_mac_cb *mac_cb); -int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac); void hns_mac_uninit(struct dsaf_device *dsaf_dev); void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); void hns_mac_reset(struct hns_mac_cb *mac_cb); @@ -444,7 +441,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg); void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en); int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable); int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en); -int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu); +int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size); int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, u8 *auto_neg, u16 *speed, u8 *duplex); int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 403ea9db6dbd15a6384ed845480fe8cf18c52163..e0bc79ea3d88091c5723403c724a5d4460f93e99 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -510,10 +510,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48); + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80); + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ @@ -521,10 +521,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192); + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240); + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } @@ -1648,87 +1648,6 @@ int hns_dsaf_rm_mac_addr( mac_entry->addr); } -/** - * hns_dsaf_set_mac_mc_entry - set mac mc-entry - * @dsaf_dev: dsa fabric device struct pointer - * @mac_entry: mc-mac entry - */ -int hns_dsaf_set_mac_mc_entry( - struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_multi_dest_entry *mac_entry) -{ - u16 entry_index = DSAF_INVALID_ENTRY_IDX; - struct dsaf_drv_tbl_tcam_key mac_key; - struct dsaf_tbl_tcam_mcast_cfg mac_data; - struct dsaf_drv_priv *priv = - (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev); - struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl; - struct dsaf_drv_tbl_tcam_key tmp_mac_key; - struct dsaf_tbl_tcam_data tcam_data; - - /* mac addr check */ - if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", - dsaf_dev->ae_dev.name, mac_entry->addr); - return -EINVAL; - } - - /*config key */ - hns_dsaf_set_mac_key(dsaf_dev, &mac_key, - mac_entry->in_vlan_id, - mac_entry->in_port_num, mac_entry->addr); - - /* entry ie exist? */ - entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); - if (entry_index == DSAF_INVALID_ENTRY_IDX) { - /*if hasnot, find enpty entry*/ - entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev); - if (entry_index == DSAF_INVALID_ENTRY_IDX) { - /*if hasnot empty, error*/ - dev_err(dsaf_dev->dev, - "set_uc_entry failed, %s Mac key(%#x:%#x)\n", - dsaf_dev->ae_dev.name, - mac_key.high.val, mac_key.low.val); - return -EINVAL; - } - - /* config hardware entry */ - memset(mac_data.tbl_mcast_port_msk, - 0, sizeof(mac_data.tbl_mcast_port_msk)); - } else { - /* config hardware entry */ - hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, - &mac_data); - - tmp_mac_key.high.val = - le32_to_cpu(tcam_data.tbl_tcam_data_high); - tmp_mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - } - mac_data.tbl_mcast_old_en = 0; - mac_data.tbl_mcast_item_vld = 1; - dsaf_set_field(mac_data.tbl_mcast_port_msk[0], - 0x3F, 0, mac_entry->port_mask[0]); - - dev_dbg(dsaf_dev->dev, - "set_uc_entry, %s key(%#x:%#x) entry_index%d\n", - dsaf_dev->ae_dev.name, mac_key.high.val, - mac_key.low.val, entry_index); - - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); - - hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, NULL, - &mac_data); - - /* config software entry */ - soft_mac_entry += entry_index; - soft_mac_entry->index = entry_index; - soft_mac_entry->tcam_key.high.val = mac_key.high.val; - soft_mac_entry->tcam_key.low.val = mac_key.low.val; - - return 0; -} - static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) { u16 *a = (u16 *)dst; @@ -2090,166 +2009,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, return ret; } -/** - * hns_dsaf_get_mac_uc_entry - get mac uc entry - * @dsaf_dev: dsa fabric device struct pointer - * @mac_entry: mac entry - */ -int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_single_dest_entry *mac_entry) -{ - u16 entry_index = DSAF_INVALID_ENTRY_IDX; - struct dsaf_drv_tbl_tcam_key mac_key; - - struct dsaf_tbl_tcam_ucast_cfg mac_data; - struct dsaf_tbl_tcam_data tcam_data; - - /* check macaddr */ - if (MAC_IS_ALL_ZEROS(mac_entry->addr) || - MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", - mac_entry->addr); - return -EINVAL; - } - - /*config key */ - hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id, - mac_entry->in_port_num, mac_entry->addr); - - /*check exist? */ - entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); - if (entry_index == DSAF_INVALID_ENTRY_IDX) { - /*find none, error */ - dev_err(dsaf_dev->dev, - "get_uc_entry failed, %s Mac key(%#x:%#x)\n", - dsaf_dev->ae_dev.name, - mac_key.high.val, mac_key.low.val); - return -EINVAL; - } - dev_dbg(dsaf_dev->dev, - "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n", - dsaf_dev->ae_dev.name, mac_key.high.val, - mac_key.low.val, entry_index); - - /* read entry */ - hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); - - mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); - mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - - mac_entry->port_num = mac_data.tbl_ucast_out_port; - - return 0; -} - -/** - * hns_dsaf_get_mac_mc_entry - get mac mc entry - * @dsaf_dev: dsa fabric device struct pointer - * @mac_entry: mac entry - */ -int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_multi_dest_entry *mac_entry) -{ - u16 entry_index = DSAF_INVALID_ENTRY_IDX; - struct dsaf_drv_tbl_tcam_key mac_key; - - struct dsaf_tbl_tcam_mcast_cfg mac_data; - struct dsaf_tbl_tcam_data tcam_data; - - /*check mac addr */ - if (MAC_IS_ALL_ZEROS(mac_entry->addr) || - MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", - mac_entry->addr); - return -EINVAL; - } - - /*config key */ - hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id, - mac_entry->in_port_num, mac_entry->addr); - - /*check exist? */ - entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key); - if (entry_index == DSAF_INVALID_ENTRY_IDX) { - /* find none, error */ - dev_err(dsaf_dev->dev, - "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n", - dsaf_dev->ae_dev.name, mac_key.high.val, - mac_key.low.val); - return -EINVAL; - } - dev_dbg(dsaf_dev->dev, - "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n", - dsaf_dev->ae_dev.name, mac_key.high.val, - mac_key.low.val, entry_index); - - /*read entry */ - hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); - - mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); - mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - - mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F; - return 0; -} - -/** - * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index - * @dsaf_dev: dsa fabric device struct pointer - * @entry_index: tab entry index - * @mac_entry: mac entry - */ -int hns_dsaf_get_mac_entry_by_index( - struct dsaf_device *dsaf_dev, - u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry) -{ - struct dsaf_drv_tbl_tcam_key mac_key; - - struct dsaf_tbl_tcam_mcast_cfg mac_data; - struct dsaf_tbl_tcam_ucast_cfg mac_uc_data; - struct dsaf_tbl_tcam_data tcam_data; - char mac_addr[ETH_ALEN] = {0}; - - if (entry_index >= dsaf_dev->tcam_max_num) { - /* find none, del error */ - dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n", - dsaf_dev->ae_dev.name); - return -EINVAL; - } - - /* mc entry, do read opt */ - hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data); - - mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); - mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - - mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F; - - /***get mac addr*/ - mac_addr[0] = mac_key.high.bits.mac_0; - mac_addr[1] = mac_key.high.bits.mac_1; - mac_addr[2] = mac_key.high.bits.mac_2; - mac_addr[3] = mac_key.high.bits.mac_3; - mac_addr[4] = mac_key.low.bits.mac_4; - mac_addr[5] = mac_key.low.bits.mac_5; - /**is mc or uc*/ - if (MAC_IS_MULTICAST((u8 *)mac_addr) || - MAC_IS_L3_MULTICAST((u8 *)mac_addr)) { - /**mc donot do*/ - } else { - /*is not mc, just uc... */ - hns_dsaf_tcam_uc_get(dsaf_dev, entry_index, &tcam_data, - &mac_uc_data); - - mac_key.high.val = le32_to_cpu(tcam_data.tbl_tcam_data_high); - mac_key.low.val = le32_to_cpu(tcam_data.tbl_tcam_data_low); - - mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port); - } - - return 0; -} - static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev, size_t sizeof_priv) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index cef6bf46ae9309bf84c9f5ff466982d59d8bed93..4507e8222683c112c05eeca4633e65990b789b8f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -68,7 +68,7 @@ enum dsaf_roce_qos_sl { }; #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) -#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) +#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP) enum hal_dsaf_mode { HRD_DSAF_NO_DSAF_MODE = 0x0, @@ -429,23 +429,12 @@ static inline struct hnae_vf_cb *hns_ae_get_vf_cb( int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry); -int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_multi_dest_entry *mac_entry); int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry); int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, u8 in_port_num, u8 *addr); int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_mac_single_dest_entry *mac_entry); -int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_single_dest_entry *mac_entry); -int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, - struct dsaf_drv_mac_multi_dest_entry *mac_entry); -int hns_dsaf_get_mac_entry_by_index( - struct dsaf_device *dsaf_dev, - u16 entry_index, - struct dsaf_drv_mac_multi_dest_entry *mac_entry); - void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); @@ -475,5 +464,4 @@ int hns_dsaf_rm_mac_addr( int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); - #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 6ea872287307bd85b13436a42c1d8aacbad05f2d..b62816c1574eb840f74a334b904f9fd993733116 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb) int hns_ppe_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ETH_PPE_STATIC_NUM; return 0; } @@ -496,21 +496,23 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data) */ int hns_ppe_init(struct dsaf_device *dsaf_dev) { - int i, k; int ret; + int i; for (i = 0; i < HNS_PPE_COM_NUM; i++) { ret = hns_ppe_common_get_cfg(dsaf_dev, i); if (ret) - goto get_ppe_cfg_fail; + goto get_cfg_fail; ret = hns_rcb_common_get_cfg(dsaf_dev, i); if (ret) - goto get_rcb_cfg_fail; + goto get_cfg_fail; hns_ppe_get_cfg(dsaf_dev->ppe_common[i]); - hns_rcb_get_cfg(dsaf_dev->rcb_common[i]); + ret = hns_rcb_get_cfg(dsaf_dev->rcb_common[i]); + if (ret) + goto get_cfg_fail; } for (i = 0; i < HNS_PPE_COM_NUM; i++) @@ -518,13 +520,12 @@ int hns_ppe_init(struct dsaf_device *dsaf_dev) return 0; -get_rcb_cfg_fail: - hns_ppe_common_free_cfg(dsaf_dev, i); -get_ppe_cfg_fail: - for (k = i - 1; k >= 0; k--) { - hns_rcb_common_free_cfg(dsaf_dev, k); - hns_ppe_common_free_cfg(dsaf_dev, k); +get_cfg_fail: + for (i = 0; i < HNS_PPE_COM_NUM; i++) { + hns_rcb_common_free_cfg(dsaf_dev, i); + hns_ppe_common_free_cfg(dsaf_dev, i); } + return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index f0ed80d6ef9cd45a8408c987ab4315646098f438..6f3570cfb501604bea3f22d73374dd8dc28d756f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -32,6 +32,9 @@ #define RCB_RESET_WAIT_TIMES 30 #define RCB_RESET_TRY_TIMES 10 +/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */ +#define RCB_DEFAULT_BUFFER_SIZE 2048 + /** *hns_rcb_wait_fbd_clean - clean fbd *@qs: ring struct pointer array @@ -192,6 +195,30 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) wmb(); /* Sync point after breakpoint */ } +/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester + *@q: hnae_queue + *@buf_size: buffer size set to hw + */ +void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size) +{ + u32 bd_size_type = hns_rcb_buf_size2type(buf_size); + + dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, + bd_size_type); +} + +/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester + *@q: hnae_queue + *@buf_size: buffer size set to hw + */ +void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size) +{ + u32 bd_size_type = hns_rcb_buf_size2type(buf_size); + + dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, + bd_size_type); +} + /** *hns_rcb_ring_init - init rcb ring *@ring_pair: ring pair control block @@ -200,8 +227,6 @@ void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) { struct hnae_queue *q = &ring_pair->q; - struct rcb_common_cb *rcb_common = ring_pair->rcb_common; - u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; struct hnae_ring *ring = (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; dma_addr_t dma = ring->desc_dma_addr; @@ -212,8 +237,8 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); - dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, - bd_size_type); + hns_rcb_set_rx_ring_bs(q, ring->buf_size); + dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, ring_pair->port_id_in_comm); dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, @@ -224,12 +249,12 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); - dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, - bd_size_type); + hns_rcb_set_tx_ring_bs(q, ring->buf_size); + dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, ring_pair->port_id_in_comm); dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, - ring_pair->port_id_in_comm); + ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET); } } @@ -259,13 +284,27 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, static void hns_rcb_set_port_timeout( struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) { - if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) + if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout * HNS_RCB_CLK_FREQ_MHZ); - else + } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { + if (timeout > HNS_RCB_DEF_GAP_TIME_USECS) + dsaf_write_dev(rcb_common, + RCB_PORT_INT_GAPTIME_REG + port_idx * 4, + HNS_RCB_DEF_GAP_TIME_USECS); + else + dsaf_write_dev(rcb_common, + RCB_PORT_INT_GAPTIME_REG + port_idx * 4, + timeout); + + dsaf_write_dev(rcb_common, + RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, + timeout); + } else { dsaf_write_dev(rcb_common, RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, timeout); + } } static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) @@ -327,8 +366,12 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) for (i = 0; i < port_num; i++) { hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); - (void)hns_rcb_set_coalesced_frames( - rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); + hns_rcb_set_rx_coalesced_frames( + rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES); + if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) && + !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) + hns_rcb_set_tx_coalesced_frames( + rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES); hns_rcb_set_port_timeout( rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); } @@ -380,7 +423,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) struct hnae_ring *ring; struct rcb_common_cb *rcb_common; struct ring_pair_cb *ring_pair_cb; - u32 buf_size; u16 desc_num, mdnum_ppkt; bool irq_idx, is_ver1; @@ -401,7 +443,6 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) } rcb_common = ring_pair_cb->rcb_common; - buf_size = rcb_common->dsaf_dev->buf_size; desc_num = rcb_common->dsaf_dev->desc_num; ring->desc = NULL; @@ -410,7 +451,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) ring->irq = ring_pair_cb->virq[irq_idx]; ring->desc_dma_addr = 0; - ring->buf_size = buf_size; + ring->buf_size = RCB_DEFAULT_BUFFER_SIZE; ring->desc_num = desc_num; ring->max_desc_num_per_pkt = mdnum_ppkt; ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; @@ -430,7 +471,6 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) static int hns_rcb_get_port_in_comm( struct rcb_common_cb *rcb_common, int ring_idx) { - return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); } @@ -452,7 +492,7 @@ static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) *hns_rcb_get_cfg - get rcb config *@rcb_common: rcb common device */ -void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) +int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) { struct ring_pair_cb *ring_pair_cb; u32 i; @@ -477,25 +517,47 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) : platform_get_irq(pdev, base_irq_idx + i * 3); + if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) || + (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER)) + return -EPROBE_DEFER; + ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); hns_rcb_ring_pair_get_cfg(ring_pair_cb); } + + return 0; } /** - *hns_rcb_get_coalesced_frames - get rcb port coalesced frames + *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames *@rcb_common: rcb_common device *@port_idx:port id in comm * *Returns: coalesced_frames */ -u32 hns_rcb_get_coalesced_frames( +u32 hns_rcb_get_rx_coalesced_frames( struct rcb_common_cb *rcb_common, u32 port_idx) { return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); } +/** + *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames + *@rcb_common: rcb_common device + *@port_idx:port id in comm + * + *Returns: coalesced_frames + */ +u32 hns_rcb_get_tx_coalesced_frames( + struct rcb_common_cb *rcb_common, u32 port_idx) +{ + u64 reg; + + reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4; + return dsaf_read_dev(rcb_common, reg); +} + /** *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out *@rcb_common: rcb_common device @@ -538,33 +600,47 @@ int hns_rcb_set_coalesce_usecs( return -EINVAL; } } - if (timeout > HNS_RCB_MAX_COALESCED_USECS) { + if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) { dev_err(rcb_common->dsaf_dev->dev, - "error: coalesce_usecs setting supports 0~1023us\n"); + "error: coalesce_usecs setting supports 1~1023us\n"); return -EINVAL; } + hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); + return 0; +} - if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { - if (timeout == 0) - /* set timeout to 0, Disable gap time */ - dsaf_set_reg_field(rcb_common->io_base, - RCB_INT_GAP_TIME_REG + port_idx * 4, - PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, - 0); - else - /* set timeout non 0, restore gap time to 1 */ - dsaf_set_reg_field(rcb_common->io_base, - RCB_INT_GAP_TIME_REG + port_idx * 4, - PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B, - 1); +/** + *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames + *@rcb_common: rcb_common device + *@port_idx:port id in comm + *@coalesced_frames:tx/rx BD num for coalesced frames + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int hns_rcb_set_tx_coalesced_frames( + struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) +{ + u32 old_waterline = + hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx); + u64 reg; + + if (coalesced_frames == old_waterline) + return 0; + + if (coalesced_frames != 1) { + dev_err(rcb_common->dsaf_dev->dev, + "error: not support tx coalesce_frames setting!\n"); + return -EINVAL; } - hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); + reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4; + dsaf_write_dev(rcb_common, reg, coalesced_frames); return 0; } /** - *hns_rcb_set_coalesced_frames - set rcb coalesced frames + *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames *@rcb_common: rcb_common device *@port_idx:port id in comm *@coalesced_frames:tx/rx BD num for coalesced frames @@ -572,10 +648,11 @@ int hns_rcb_set_coalesce_usecs( * Returns: * Zero for success, or an error code in case of failure */ -int hns_rcb_set_coalesced_frames( +int hns_rcb_set_rx_coalesced_frames( struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) { - u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); + u32 old_waterline = + hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx); if (coalesced_frames == old_waterline) return 0; @@ -799,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) */ int hns_rcb_get_ring_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return HNS_RING_STATIC_REG_NUM; return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 99b4e1ba0a9411a9889cbf8343605a8e79616bb6..602816498c8dd0c4aecd20ae9f5df196d7944821 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -35,12 +35,23 @@ struct rcb_common_cb; #define HNS_RCB_REG_OFFSET 0x10000 +#define HNS_RCB_TX_FRAMES_LOW 1 +#define HNS_RCB_RX_FRAMES_LOW 1 +#define HNS_RCB_TX_FRAMES_HIGH 1023 +#define HNS_RCB_RX_FRAMES_HIGH 1023 +#define HNS_RCB_TX_USECS_LOW 1 +#define HNS_RCB_RX_USECS_LOW 1 +#define HNS_RCB_TX_USECS_HIGH 1023 +#define HNS_RCB_RX_USECS_HIGH 1023 #define HNS_RCB_MAX_COALESCED_FRAMES 1023 #define HNS_RCB_MIN_COALESCED_FRAMES 1 -#define HNS_RCB_DEF_COALESCED_FRAMES 50 +#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50 +#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1 #define HNS_RCB_CLK_FREQ_MHZ 350 #define HNS_RCB_MAX_COALESCED_USECS 0x3ff -#define HNS_RCB_DEF_COALESCED_USECS 50 +#define HNS_RCB_DEF_COALESCED_USECS 30 +#define HNS_RCB_DEF_GAP_TIME_USECS 20 +#define HNS_RCB_TX_PKTLINE_OFFSET 8 #define HNS_RCB_COMMON_ENDIAN 1 @@ -110,7 +121,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index); void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); -void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); +int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf); @@ -125,13 +136,17 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); void hns_rcb_init_hw(struct ring_pair_cb *ring); void hns_rcb_reset_ring_hw(struct hnae_queue *q); void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); -u32 hns_rcb_get_coalesced_frames( +u32 hns_rcb_get_rx_coalesced_frames( + struct rcb_common_cb *rcb_common, u32 port_idx); +u32 hns_rcb_get_tx_coalesced_frames( struct rcb_common_cb *rcb_common, u32 port_idx); u32 hns_rcb_get_coalesce_usecs( struct rcb_common_cb *rcb_common, u32 port_idx); int hns_rcb_set_coalesce_usecs( struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout); -int hns_rcb_set_coalesced_frames( +int hns_rcb_set_rx_coalesced_frames( + struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames); +int hns_rcb_set_tx_coalesced_frames( struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames); void hns_rcb_update_stats(struct hnae_queue *queue); @@ -146,4 +161,7 @@ int hns_rcb_get_ring_regs_count(void); void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data); void hns_rcb_get_strings(int stringset, u8 *data, int index); +void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size); +void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size); + #endif /* _HNS_DSAF_RCB_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 8fa18fc17cd2e25f2e3458e608abe6f5a96b60d9..46a52d9bb196326e5da7481f43616dab0afd12a2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -421,7 +421,7 @@ #define RCB_CFG_OVERTIME_REG 0x9300 #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 -#define RCB_INT_GAP_TIME_REG 0x9400 +#define RCB_PORT_INT_GAPTIME_REG 0x9400 #define RCB_PORT_CFG_OVERTIME_REG 0x9430 #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 @@ -466,6 +466,7 @@ #define GMAC_DUPLEX_TYPE_REG 0x0008UL #define GMAC_FD_FC_TYPE_REG 0x000CUL +#define GMAC_TX_WATER_LINE_REG 0x0010UL #define GMAC_FC_TX_TIMER_REG 0x001CUL #define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL #define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL @@ -912,6 +913,9 @@ #define GMAC_DUPLEX_TYPE_B 0 +#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1) +#define GMAC_TX_WATER_LINE_SHIFT 0 + #define GMAC_FC_TX_TIMER_S 0 #define GMAC_FC_TX_TIMER_M 0xffff diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index aae830a93050ad5f99ece2b6901dd30531852d87..51e7e9f5af4992b3a21f3cbfd4d27bed06d092e0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -299,18 +299,6 @@ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable); } -/** - *hns_xgmac_get_id - get xgmac port id - *@mac_drv: mac driver - *@newval:xgmac max frame length - */ -static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id) -{ - struct mac_driver *drv = (struct mac_driver *)mac_drv; - - *mac_id = drv->mac_id; -} - /** *hns_xgmac_config_max_frame_length - set xgmac max frame length *@mac_drv: mac driver @@ -793,7 +781,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data) */ static int hns_xgmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; @@ -833,7 +821,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->config_half_duplex = NULL; mac_drv->set_rx_ignore_pause_frames = hns_xgmac_set_rx_ignore_pause_frames; - mac_drv->mac_get_id = hns_xgmac_get_id; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index fca37e2c7f017d76aa537daede5f7af14cb8e152..c6700b91a2dfd3da02dc8a4b6728fbf92beff4c1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, int last_offset; bool twobufs; - twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); @@ -859,7 +860,7 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, return recv_pkts; } -static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) +static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int num = 0; @@ -873,22 +874,23 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring, 1); - napi_schedule(&ring_data->napi); + return false; + } else { + return true; } } -static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; - int num = 0; + int num; num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (num == 0) - ring_data->ring->q->handle->dev->ops->toggle_ring_irq( - ring, 0); + if (!num) + return true; else - napi_schedule(&ring_data->napi); + return false; } static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, @@ -921,12 +923,13 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h) /* netif_tx_lock will turn down the performance, set only when necessary */ #ifdef CONFIG_NET_POLL_CONTROLLER -#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) -#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) +#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock) +#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock) #else -#define NETIF_TX_LOCK(ndev) -#define NETIF_TX_UNLOCK(ndev) +#define NETIF_TX_LOCK(ring) +#define NETIF_TX_UNLOCK(ring) #endif + /* reclaim all desc in one budget * return error or number of desc left */ @@ -940,13 +943,13 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, int head; int bytes, pkts; - NETIF_TX_LOCK(ndev); + NETIF_TX_LOCK(ring); head = readl_relaxed(ring->io_base + RCB_REG_HEAD); rmb(); /* make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) { - NETIF_TX_UNLOCK(ndev); + NETIF_TX_UNLOCK(ring); return 0; /* no data to poll */ } @@ -954,7 +957,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); ring->stats.io_err_cnt++; - NETIF_TX_UNLOCK(ndev); + NETIF_TX_UNLOCK(ring); return -EIO; } @@ -966,7 +969,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, prefetch(&ring->desc_cb[ring->next_to_clean]); } - NETIF_TX_UNLOCK(ndev); + NETIF_TX_UNLOCK(ring); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); @@ -989,7 +992,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, return 0; } -static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) +static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int head; @@ -1002,20 +1005,21 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring, 1); - napi_schedule(&ring_data->napi); + return false; + } else { + return true; } } -static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) +static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); if (head == ring->next_to_clean) - ring_data->ring->q->handle->dev->ops->toggle_ring_irq( - ring, 0); + return true; else - napi_schedule(&ring_data->napi); + return false; } static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) @@ -1026,7 +1030,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) int head; int bytes, pkts; - NETIF_TX_LOCK(ndev); + NETIF_TX_LOCK(ring); head = ring->next_to_use; /* ntu :soft setted ring position*/ bytes = 0; @@ -1034,7 +1038,7 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) while (head != ring->next_to_clean) hns_nic_reclaim_one_desc(ring, &bytes, &pkts); - NETIF_TX_UNLOCK(ndev); + NETIF_TX_UNLOCK(ring); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); @@ -1042,15 +1046,23 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) static int hns_nic_common_poll(struct napi_struct *napi, int budget) { + int clean_complete = 0; struct hns_nic_ring_data *ring_data = container_of(napi, struct hns_nic_ring_data, napi); - int clean_complete = ring_data->poll_one( - ring_data, budget, ring_data->ex_process); + struct hnae_ring *ring = ring_data->ring; - if (clean_complete >= 0 && clean_complete < budget) { - napi_complete(napi); - ring_data->fini_process(ring_data); - return 0; +try_again: + clean_complete += ring_data->poll_one( + ring_data, budget - clean_complete, + ring_data->ex_process); + + if (clean_complete < budget) { + if (ring_data->fini_process(ring_data)) { + napi_complete(napi); + ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + } else { + goto try_again; + } } return clean_complete; @@ -1196,54 +1208,31 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx) napi_disable(&priv->ring_data[idx].napi); } -static void hns_set_irq_affinity(struct hns_nic_priv *priv) +static int hns_nic_init_affinity_mask(int q_num, int ring_idx, + struct hnae_ring *ring, cpumask_t *mask) { - struct hnae_handle *h = priv->ae_handle; - struct hns_nic_ring_data *rd; - int i; int cpu; - cpumask_var_t mask; - - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) - return; - /*diffrent irq banlance for 16core and 32core*/ - if (h->q_num == num_possible_cpus()) { - for (i = 0; i < h->q_num * 2; i++) { - rd = &priv->ring_data[i]; - if (cpu_online(rd->queue_index)) { - cpumask_clear(mask); - cpu = rd->queue_index; - cpumask_set_cpu(cpu, mask); - (void)irq_set_affinity_hint(rd->ring->irq, - mask); - } - } + /* Diffrent irq banlance between 16core and 32core. + * The cpu mask set by ring index according to the ring flag + * which indicate the ring is tx or rx. + */ + if (q_num == num_possible_cpus()) { + if (is_tx_ring(ring)) + cpu = ring_idx; + else + cpu = ring_idx - q_num; } else { - for (i = 0; i < h->q_num; i++) { - rd = &priv->ring_data[i]; - if (cpu_online(rd->queue_index * 2)) { - cpumask_clear(mask); - cpu = rd->queue_index * 2; - cpumask_set_cpu(cpu, mask); - (void)irq_set_affinity_hint(rd->ring->irq, - mask); - } - } - - for (i = h->q_num; i < h->q_num * 2; i++) { - rd = &priv->ring_data[i]; - if (cpu_online(rd->queue_index * 2 + 1)) { - cpumask_clear(mask); - cpu = rd->queue_index * 2 + 1; - cpumask_set_cpu(cpu, mask); - (void)irq_set_affinity_hint(rd->ring->irq, - mask); - } - } + if (is_tx_ring(ring)) + cpu = ring_idx * 2; + else + cpu = (ring_idx - q_num) * 2 + 1; } - free_cpumask_var(mask); + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + + return cpu; } static int hns_nic_init_irq(struct hns_nic_priv *priv) @@ -1252,6 +1241,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) struct hns_nic_ring_data *rd; int i; int ret; + int cpu; for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; @@ -1261,7 +1251,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, "%s-%s%d", priv->netdev->name, - (i < h->q_num ? "tx" : "rx"), rd->queue_index); + (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index); rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; @@ -1273,12 +1263,17 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) return ret; } disable_irq(rd->ring->irq); + + cpu = hns_nic_init_affinity_mask(h->q_num, i, + rd->ring, &rd->mask); + + if (cpu_online(cpu)) + irq_set_affinity_hint(rd->ring->irq, + &rd->mask); + rd->ring->irq_init_flag = RCB_IRQ_INITED; } - /*set cpu affinity*/ - hns_set_irq_affinity(priv); - return 0; } @@ -1487,32 +1482,259 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, return (netdev_tx_t)ret; } +static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data, + struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +#define HNS_LB_TX_RING 0 +static struct sk_buff *hns_assemble_skb(struct net_device *ndev) +{ + struct sk_buff *skb; + struct ethhdr *ethhdr; + int frame_len; + + /* allocate test skb */ + skb = alloc_skb(64, GFP_KERNEL); + if (!skb) + return NULL; + + skb_put(skb, 64); + skb->dev = ndev; + memset(skb->data, 0xFF, skb->len); + + /* must be tcp/ip package */ + ethhdr = (struct ethhdr *)skb->data; + ethhdr->h_proto = htons(ETH_P_IP); + + frame_len = skb->len & (~1ul); + memset(&skb->data[frame_len / 2], 0xAA, + frame_len / 2 - 1); + + skb->queue_mapping = HNS_LB_TX_RING; + + return skb; +} + +static int hns_enable_serdes_lb(struct net_device *ndev) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct hnae_handle *h = priv->ae_handle; + struct hnae_ae_ops *ops = h->dev->ops; + int speed, duplex; + int ret; + + ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1); + if (ret) + return ret; + + ret = ops->start ? ops->start(h) : 0; + if (ret) + return ret; + + /* link adjust duplex*/ + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) + speed = 1000; + else + speed = 10000; + duplex = 1; + + ops->adjust_link(h, speed, duplex); + + /* wait h/w ready */ + mdelay(300); + + return 0; +} + +static void hns_disable_serdes_lb(struct net_device *ndev) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct hnae_handle *h = priv->ae_handle; + struct hnae_ae_ops *ops = h->dev->ops; + + ops->stop(h); + ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0); +} + +/** + *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The + *function as follows: + * 1. if one rx ring has found the page_offset is not equal 0 between head + * and tail, it means that the chip fetched the wrong descs for the ring + * which buffer size is 4096. + * 2. we set the chip serdes loopback and set rss indirection to the ring. + * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring + * recieving all packages and it will fetch new descriptions. + * 4. recover to the original state. + * + *@ndev: net device + */ +static int hns_nic_clear_all_rx_fetch(struct net_device *ndev) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct hnae_handle *h = priv->ae_handle; + struct hnae_ae_ops *ops = h->dev->ops; + struct hns_nic_ring_data *rd; + struct hnae_ring *ring; + struct sk_buff *skb; + u32 *org_indir; + u32 *cur_indir; + int indir_size; + int head, tail; + int fetch_num; + int i, j; + bool found; + int retry_times; + int ret = 0; + + /* alloc indir memory */ + indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir); + org_indir = kzalloc(indir_size, GFP_KERNEL); + if (!org_indir) + return -ENOMEM; + + /* store the orginal indirection */ + ops->get_rss(h, org_indir, NULL, NULL); + + cur_indir = kzalloc(indir_size, GFP_KERNEL); + if (!cur_indir) { + ret = -ENOMEM; + goto cur_indir_alloc_err; + } + + /* set loopback */ + if (hns_enable_serdes_lb(ndev)) { + ret = -EINVAL; + goto enable_serdes_lb_err; + } + + /* foreach every rx ring to clear fetch desc */ + for (i = 0; i < h->q_num; i++) { + ring = &h->qs[i]->rx_ring; + head = readl_relaxed(ring->io_base + RCB_REG_HEAD); + tail = readl_relaxed(ring->io_base + RCB_REG_TAIL); + found = false; + fetch_num = ring_dist(ring, head, tail); + + while (head != tail) { + if (ring->desc_cb[head].page_offset != 0) { + found = true; + break; + } + + head++; + if (head == ring->desc_num) + head = 0; + } + + if (found) { + for (j = 0; j < indir_size / sizeof(*org_indir); j++) + cur_indir[j] = i; + ops->set_rss(h, cur_indir, NULL, 0); + + for (j = 0; j < fetch_num; j++) { + /* alloc one skb and init */ + skb = hns_assemble_skb(ndev); + if (!skb) + goto out; + rd = &tx_ring_data(priv, skb->queue_mapping); + hns_nic_net_xmit_hw(ndev, skb, rd); + + retry_times = 0; + while (retry_times++ < 10) { + mdelay(10); + /* clean rx */ + rd = &rx_ring_data(priv, i); + if (rd->poll_one(rd, fetch_num, + hns_nic_drop_rx_fetch)) + break; + } + + retry_times = 0; + while (retry_times++ < 10) { + mdelay(10); + /* clean tx ring 0 send package */ + rd = &tx_ring_data(priv, + HNS_LB_TX_RING); + if (rd->poll_one(rd, fetch_num, NULL)) + break; + } + } + } + } + +out: + /* restore everything */ + ops->set_rss(h, org_indir, NULL, 0); + hns_disable_serdes_lb(ndev); +enable_serdes_lb_err: + kfree(cur_indir); +cur_indir_alloc_err: + kfree(org_indir); + + return ret; +} + static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; + bool if_running = netif_running(ndev); int ret; + /* MTU < 68 is an error and causes problems on some kernels */ + if (new_mtu < 68) + return -EINVAL; + + /* MTU no change */ + if (new_mtu == ndev->mtu) + return 0; + if (!h->dev->ops->set_mtu) return -ENOTSUPP; - if (netif_running(ndev)) { + if (if_running) { (void)hns_nic_net_stop(ndev); msleep(100); + } - ret = h->dev->ops->set_mtu(h, new_mtu); - if (ret) - netdev_err(ndev, "set mtu fail, return value %d\n", - ret); + if (priv->enet_ver != AE_VERSION_1 && + ndev->mtu <= BD_SIZE_2048_MAX_MTU && + new_mtu > BD_SIZE_2048_MAX_MTU) { + /* update desc */ + hnae_reinit_all_ring_desc(h); - if (hns_nic_net_open(ndev)) - netdev_err(ndev, "hns net open fail\n"); - } else { - ret = h->dev->ops->set_mtu(h, new_mtu); + /* clear the package which the chip has fetched */ + ret = hns_nic_clear_all_rx_fetch(ndev); + + /* the page offset must be consist with desc */ + hnae_reinit_all_ring_page_off(h); + + if (ret) { + netdev_err(ndev, "clear the fetched desc fail\n"); + goto out; + } + } + + ret = h->dev->ops->set_mtu(h, new_mtu); + if (ret) { + netdev_err(ndev, "set mtu fail, return value %d\n", + ret); + goto out; } - if (!ret) - ndev->mtu = new_mtu; + /* finally, set new mtu to netdevice */ + ndev->mtu = new_mtu; + +out: + if (if_running) { + if (hns_nic_net_open(ndev)) { + netdev_err(ndev, "hns net open fail\n"); + ret = -EINVAL; + } + } return ret; } @@ -1791,7 +2013,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) static void hns_nic_service_event_complete(struct hns_nic_priv *priv) { WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); - + /* make sure to commit the things */ smp_mb__before_atomic(); clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 5b412de350aa28e9099ee251e0824ed66f37b2b1..1b83232082b2a22244b77a05f4125a69cc4a5f32 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -37,10 +37,11 @@ enum hns_nic_state { struct hns_nic_ring_data { struct hnae_ring *ring; struct napi_struct napi; + cpumask_t mask; /* affinity mask */ int queue_index; int (*poll_one)(struct hns_nic_ring_data *, int, void *); void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *); - void (*fini_process)(struct hns_nic_ring_data *); + bool (*fini_process)(struct hns_nic_ring_data *); }; /* compatible the difference between two versions */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3ac2183dbd2119e0746d35510fcfc1e390ab6d9b..b8fab149690f880394f0d973560aecca69d1171e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -146,7 +146,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev, /* When there is no phy, autoneg is off. */ cmd->base.autoneg = false; - cmd->base.cmd = speed; + cmd->base.speed = speed; cmd->base.duplex = duplex; if (net_dev->phydev) @@ -764,14 +764,14 @@ static int hns_get_coalesce(struct net_device *net_dev, ec->use_adaptive_tx_coalesce = 1; if ((!ops->get_coalesce_usecs) || - (!ops->get_rx_max_coalesced_frames)) + (!ops->get_max_coalesced_frames)) return -ESRCH; ops->get_coalesce_usecs(priv->ae_handle, &ec->tx_coalesce_usecs, &ec->rx_coalesce_usecs); - ops->get_rx_max_coalesced_frames( + ops->get_max_coalesced_frames( priv->ae_handle, &ec->tx_max_coalesced_frames, &ec->rx_max_coalesced_frames); @@ -801,30 +801,28 @@ static int hns_set_coalesce(struct net_device *net_dev, { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; - int ret; + int rc1, rc2; ops = priv->ae_handle->dev->ops; if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) return -EINVAL; - if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames) - return -EINVAL; - if ((!ops->set_coalesce_usecs) || (!ops->set_coalesce_frames)) return -ESRCH; - ret = ops->set_coalesce_usecs(priv->ae_handle, + rc1 = ops->set_coalesce_usecs(priv->ae_handle, ec->rx_coalesce_usecs); - if (ret) - return ret; - ret = ops->set_coalesce_frames( - priv->ae_handle, - ec->rx_max_coalesced_frames); + rc2 = ops->set_coalesce_frames(priv->ae_handle, + ec->tx_max_coalesced_frames, + ec->rx_max_coalesced_frames); - return ret; + if (rc1 || rc2) + return -EINVAL; + + return 0; } /** @@ -1253,12 +1251,10 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, ops = priv->ae_handle->dev->ops; - /* currently hfunc can only be Toeplitz hash */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + netdev_err(netdev, "Invalid hfunc!\n"); return -EOPNOTSUPP; - if (!indir) - return 0; + } return ops->set_rss(priv->ae_handle, indir, key, hfunc); } diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 501eb2090ca62bcd118abc136e4c433bdaa38eb6..e5221d95afe195583b7496130cf1a544d1b1f97f 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -23,17 +23,9 @@ #include #include #include -#include #define MDIO_DRV_NAME "Hi-HNS_MDIO" #define MDIO_BUS_NAME "Hisilicon MII Bus" -#define MDIO_DRV_VERSION "1.3.0" -#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." -#define MDIO_DRV_STRING MDIO_BUS_NAME -#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME - -#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) -#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) #define MDIO_TIMEOUT 1000000 @@ -64,9 +56,7 @@ struct hns_mdio_device { #define MDIO_CMD_DEVAD_S 0 #define MDIO_CMD_PRTAD_M 0x1f #define MDIO_CMD_PRTAD_S 5 -#define MDIO_CMD_OP_M 0x3 #define MDIO_CMD_OP_S 10 -#define MDIO_CMD_ST_M 0x3 #define MDIO_CMD_ST_S 12 #define MDIO_CMD_START_B 14 @@ -185,18 +175,20 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev, static int hns_mdio_wait_ready(struct mii_bus *bus) { struct hns_mdio_device *mdio_dev = bus->priv; + u32 cmd_reg_value; int i; - u32 cmd_reg_value = 1; /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */ /* after that can do read or write*/ - for (i = 0; cmd_reg_value; i++) { + for (i = 0; i < MDIO_TIMEOUT; i++) { cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev, MDIO_COMMAND_REG, MDIO_CMD_START_B); - if (i == MDIO_TIMEOUT) - return -ETIMEDOUT; + if (!cmd_reg_value) + break; } + if ((i == MDIO_TIMEOUT) && cmd_reg_value) + return -ETIMEDOUT; return 0; } diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile index eba21835d90de8f88280ff82618fa9925c777c05..98768ba0955a1b3b45518b020f9eb6fda0770b5b 100644 --- a/drivers/net/ethernet/ibm/emac/Makefile +++ b/drivers/net/ethernet/ibm/emac/Makefile @@ -8,4 +8,3 @@ ibm_emac-y := mal.o core.o phy.o ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o -ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index c44036d5761a4cbec301afb63393d49b47bac867..508923f39ccfe0cbc39ed23305af46860eef32c9 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1929,7 +1929,7 @@ static struct net_device_stats *emac_stats(struct net_device *ndev) struct emac_instance *dev = netdev_priv(ndev); struct emac_stats *st = &dev->stats; struct emac_error_stats *est = &dev->estats; - struct net_device_stats *nst = &dev->nstats; + struct net_device_stats *nst = &ndev->stats; unsigned long flags; DBG2(dev, "stats" NL); @@ -3173,8 +3173,6 @@ static int emac_probe(struct platform_device *ofdev) printk("%s: found %s PHY (0x%02x)\n", ndev->name, dev->phy.def->name, dev->phy.address); - emac_dbg_register(dev); - /* Life is good */ return 0; @@ -3243,7 +3241,6 @@ static int emac_remove(struct platform_device *ofdev) mal_unregister_commac(dev->mal, &dev->commac); emac_put_deps(dev); - emac_dbg_unregister(dev); iounmap(dev->emacp); if (dev->wol_irq) @@ -3326,9 +3323,6 @@ static int __init emac_init(void) printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n"); - /* Init debug stuff */ - emac_init_debug(); - /* Build EMAC boot list */ emac_make_bootlist(); @@ -3373,7 +3367,6 @@ static void __exit emac_exit(void) rgmii_exit(); zmii_exit(); mal_exit(); - emac_fini_debug(); /* Destroy EMAC boot list */ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 0710a6685489355fdd74b89259be8b360b52c557..f10e156641d511d2d5d6a133dd3a8df18498cf69 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -265,7 +265,6 @@ struct emac_instance { /* Stats */ struct emac_error_stats estats; - struct net_device_stats nstats; struct emac_stats stats; /* Misc diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c deleted file mode 100644 index a559f326bf631dcfcdde4cd4c341794f9589ad55..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ibm/emac/debug.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * drivers/net/ethernet/ibm/emac/debug.c - * - * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. - * - * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. - * - * - * Based on the arch/ppc version of the driver: - * - * Copyright (c) 2004, 2005 Zultys Technologies - * Eugene Surovegin or - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ -#include -#include -#include -#include -#include -#include - -#include "core.h" - -static DEFINE_SPINLOCK(emac_dbg_lock); - -static void emac_desc_dump(struct emac_instance *p) -{ - int i; - printk("** EMAC %s TX BDs **\n" - " tx_cnt = %d tx_slot = %d ack_slot = %d\n", - p->ofdev->dev.of_node->full_name, - p->tx_cnt, p->tx_slot, p->ack_slot); - for (i = 0; i < NUM_TX_BUFF / 2; ++i) - printk - ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", - i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ', - p->tx_desc[i].ctrl, p->tx_desc[i].data_len, - NUM_TX_BUFF / 2 + i, - p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr, - p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ', - p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl, - p->tx_desc[NUM_TX_BUFF / 2 + i].data_len); - - printk("** EMAC %s RX BDs **\n" - " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" - " rx_sg_skb = 0x%p\n", - p->ofdev->dev.of_node->full_name, - p->rx_slot, p->commac.flags, p->rx_skb_size, - p->rx_sync_size, p->rx_sg_skb); - for (i = 0; i < NUM_RX_BUFF / 2; ++i) - printk - ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", - i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ', - p->rx_desc[i].ctrl, p->rx_desc[i].data_len, - NUM_RX_BUFF / 2 + i, - p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr, - p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ', - p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl, - p->rx_desc[NUM_RX_BUFF / 2 + i].data_len); -} - -static void emac_mac_dump(struct emac_instance *dev) -{ - struct emac_regs __iomem *p = dev->emacp; - const int xaht_regs = EMAC_XAHT_REGS(dev); - u32 *gaht_base = emac_gaht_base(dev); - u32 *iaht_base = emac_iaht_base(dev); - int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC); - int n; - - printk("** EMAC %s registers **\n" - "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" - "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" - "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", - dev->ofdev->dev.of_node->full_name, - in_be32(&p->mr0), in_be32(&p->mr1), - in_be32(&p->tmr0), in_be32(&p->tmr1), - in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), - in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), - in_be32(&p->vtci) - ); - - if (emac4sync) - printk("MAR = %04x%08x MMAR = %04x%08x\n", - in_be32(&p->u0.emac4sync.mahr), - in_be32(&p->u0.emac4sync.malr), - in_be32(&p->u0.emac4sync.mmahr), - in_be32(&p->u0.emac4sync.mmalr) - ); - - for (n = 0; n < xaht_regs; n++) - printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n)); - - for (n = 0; n < xaht_regs; n++) - printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n)); - - printk("LSA = %04x%08x IPGVR = 0x%04x\n" - "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" - "OCTX = 0x%08x OCRX = 0x%08x\n", - in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), - in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), - in_be32(&p->octx), in_be32(&p->ocrx) - ); - - if (!emac4sync) { - printk("IPCR = 0x%08x\n", - in_be32(&p->u1.emac4.ipcr) - ); - } else { - printk("REVID = 0x%08x TPC = 0x%08x\n", - in_be32(&p->u1.emac4sync.revid), - in_be32(&p->u1.emac4sync.tpc) - ); - } - - emac_desc_dump(dev); -} - -static void emac_mal_dump(struct mal_instance *mal) -{ - int i; - - printk("** MAL %s Registers **\n" - "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" - "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" - "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", - mal->ofdev->dev.of_node->full_name, - get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), - get_mal_dcrn(mal, MAL_IER), - get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), - get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR), - get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR), - get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR) - ); - - printk("TX|"); - for (i = 0; i < mal->num_tx_chans; ++i) { - if (i && !(i % 4)) - printk("\n "); - printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i))); - } - printk("\nRX|"); - for (i = 0; i < mal->num_rx_chans; ++i) { - if (i && !(i % 4)) - printk("\n "); - printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i))); - } - printk("\n "); - for (i = 0; i < mal->num_rx_chans; ++i) { - u32 r = get_mal_dcrn(mal, MAL_RCBS(i)); - if (i && !(i % 3)) - printk("\n "); - printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16); - } - printk("\n"); -} - -static struct emac_instance *__emacs[4]; -static struct mal_instance *__mals[1]; - -void emac_dbg_register(struct emac_instance *dev) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&emac_dbg_lock, flags); - for (i = 0; i < ARRAY_SIZE(__emacs); i++) - if (__emacs[i] == NULL) { - __emacs[i] = dev; - break; - } - spin_unlock_irqrestore(&emac_dbg_lock, flags); -} - -void emac_dbg_unregister(struct emac_instance *dev) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&emac_dbg_lock, flags); - for (i = 0; i < ARRAY_SIZE(__emacs); i++) - if (__emacs[i] == dev) { - __emacs[i] = NULL; - break; - } - spin_unlock_irqrestore(&emac_dbg_lock, flags); -} - -void mal_dbg_register(struct mal_instance *mal) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&emac_dbg_lock, flags); - for (i = 0; i < ARRAY_SIZE(__mals); i++) - if (__mals[i] == NULL) { - __mals[i] = mal; - break; - } - spin_unlock_irqrestore(&emac_dbg_lock, flags); -} - -void mal_dbg_unregister(struct mal_instance *mal) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&emac_dbg_lock, flags); - for (i = 0; i < ARRAY_SIZE(__mals); i++) - if (__mals[i] == mal) { - __mals[i] = NULL; - break; - } - spin_unlock_irqrestore(&emac_dbg_lock, flags); -} - -void emac_dbg_dump_all(void) -{ - unsigned int i; - unsigned long flags; - - spin_lock_irqsave(&emac_dbg_lock, flags); - - for (i = 0; i < ARRAY_SIZE(__mals); ++i) - if (__mals[i]) - emac_mal_dump(__mals[i]); - - for (i = 0; i < ARRAY_SIZE(__emacs); ++i) - if (__emacs[i]) - emac_mac_dump(__emacs[i]); - - spin_unlock_irqrestore(&emac_dbg_lock, flags); -} - -#if defined(CONFIG_MAGIC_SYSRQ) -static void emac_sysrq_handler(int key) -{ - emac_dbg_dump_all(); -} - -static struct sysrq_key_op emac_sysrq_op = { - .handler = emac_sysrq_handler, - .help_msg = "emac(c)", - .action_msg = "Show EMAC(s) status", -}; - -int __init emac_init_debug(void) -{ - return register_sysrq_key('c', &emac_sysrq_op); -} - -void __exit emac_fini_debug(void) -{ - unregister_sysrq_key('c', &emac_sysrq_op); -} - -#else -int __init emac_init_debug(void) -{ - return 0; -} -void __exit emac_fini_debug(void) -{ -} -#endif /* CONFIG_MAGIC_SYSRQ */ diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h index 9c45efe4c8fecfdc0e16371ee67a622e4bfcbdbe..5bdfc174a07e2cb25b040dbb1d14a00f5abb2852 100644 --- a/drivers/net/ethernet/ibm/emac/debug.h +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -25,32 +25,9 @@ #include "core.h" #if defined(CONFIG_IBM_EMAC_DEBUG) - -struct emac_instance; -struct mal_instance; - -void emac_dbg_register(struct emac_instance *dev); -void emac_dbg_unregister(struct emac_instance *dev); -void mal_dbg_register(struct mal_instance *mal); -void mal_dbg_unregister(struct mal_instance *mal); -int emac_init_debug(void) __init; -void emac_fini_debug(void) __exit; -void emac_dbg_dump_all(void); - # define DBG_LEVEL 1 - #else - -# define emac_dbg_register(x) do { } while(0) -# define emac_dbg_unregister(x) do { } while(0) -# define mal_dbg_register(x) do { } while(0) -# define mal_dbg_unregister(x) do { } while(0) -# define emac_init_debug() do { } while(0) -# define emac_fini_debug() do { } while(0) -# define emac_dbg_dump_all() do { } while(0) - # define DBG_LEVEL 0 - #endif #define EMAC_DBG(d, name, fmt, arg...) \ diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index cd3227b088b73f51b68951401eefc7e2d1e11470..91b1a558f37d65d218404c802c8e932c8828f43b 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -695,8 +695,6 @@ static int mal_probe(struct platform_device *ofdev) wmb(); platform_set_drvdata(ofdev, mal); - mal_dbg_register(mal); - return 0; fail6: @@ -740,8 +738,6 @@ static int mal_remove(struct platform_device *ofdev) mal_reset(mal); - mal_dbg_unregister(mal); - dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 7acda04d034e909269bf7c75befe125313150870..ed8780cca982cd6935eadb8a39a2e80cdc00448e 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -146,7 +146,6 @@ struct ibmveth_adapter { struct vio_dev *vdev; struct net_device *netdev; struct napi_struct napi; - struct net_device_stats stats; unsigned int mcastFilterSize; void * buffer_list_addr; void * filter_list_addr; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b23d6545f83562b8d9d12b4a885514dfd5257743..4fcd2f0378bad26d91ee2f5c8840dff16cfe836f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -65,7 +65,6 @@ #include #include #include -#include #include #include #include @@ -75,6 +74,7 @@ #include #include #include +#include #include "ibmvnic.h" @@ -89,7 +89,6 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION); static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; static int ibmvnic_remove(struct vio_dev *); static void release_sub_crqs(struct ibmvnic_adapter *); -static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *); static int ibmvnic_reset_crq(struct ibmvnic_adapter *); static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); @@ -110,6 +109,11 @@ static int ibmvnic_poll(struct napi_struct *napi, int data); static void send_map_query(struct ibmvnic_adapter *adapter); static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); static void send_request_unmap(struct ibmvnic_adapter *, u8); +static void send_login(struct ibmvnic_adapter *adapter); +static void send_cap_queries(struct ibmvnic_adapter *adapter); +static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); +static int ibmvnic_init(struct ibmvnic_adapter *); +static void release_crq_queue(struct ibmvnic_adapter *); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -159,21 +163,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, return rc; } -/* net_device_ops functions */ - -static void init_rx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_rx_pool *rx_pool, int num, int index, - int buff_size, int active) -{ - netdev_dbg(adapter->netdev, - "Initializing rx_pool %d, %d buffs, %d bytes each\n", - index, num, buff_size); - rx_pool->size = num; - rx_pool->index = index; - rx_pool->buff_size = buff_size; - rx_pool->active = active; -} - static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { @@ -202,45 +191,12 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, { struct device *dev = &adapter->vdev->dev; - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + if (!ltb->buff) + return; + if (!adapter->failover) send_request_unmap(adapter, ltb->map_id); -} - -static int alloc_rx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_rx_pool *pool) -{ - struct device *dev = &adapter->vdev->dev; - int i; - - pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL); - if (!pool->free_map) - return -ENOMEM; - - pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff), - GFP_KERNEL); - - if (!pool->rx_buff) { - dev_err(dev, "Couldn't alloc rx buffers\n"); - kfree(pool->free_map); - return -ENOMEM; - } - - if (alloc_long_term_buff(adapter, &pool->long_term_buff, - pool->size * pool->buff_size)) { - kfree(pool->free_map); - kfree(pool->rx_buff); - return -ENOMEM; - } - - for (i = 0; i < pool->size; ++i) - pool->free_map[i] = i; - - atomic_set(&pool->available, 0); - pool->next_alloc = 0; - pool->next_free = 0; - - return 0; + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); } static void replenish_rx_pool(struct ibmvnic_adapter *adapter, @@ -347,93 +303,195 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) } } -static void free_rx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_rx_pool *pool) +static void release_stats_token(struct ibmvnic_adapter *adapter) { - int i; + struct device *dev = &adapter->vdev->dev; + + if (!adapter->stats_token) + return; + + dma_unmap_single(dev, adapter->stats_token, + sizeof(struct ibmvnic_statistics), + DMA_FROM_DEVICE); + adapter->stats_token = 0; +} + +static int init_stats_token(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + dma_addr_t stok; + + stok = dma_map_single(dev, &adapter->stats, + sizeof(struct ibmvnic_statistics), + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, stok)) { + dev_err(dev, "Couldn't map stats buffer\n"); + return -1; + } + + adapter->stats_token = stok; + return 0; +} - kfree(pool->free_map); - pool->free_map = NULL; +static void release_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + int rx_scrqs; + int i, j; - if (!pool->rx_buff) + if (!adapter->rx_pool) return; - for (i = 0; i < pool->size; i++) { - if (pool->rx_buff[i].skb) { - dev_kfree_skb_any(pool->rx_buff[i].skb); - pool->rx_buff[i].skb = NULL; + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + + kfree(rx_pool->free_map); + free_long_term_buff(adapter, &rx_pool->long_term_buff); + + if (!rx_pool->rx_buff) + continue; + + for (j = 0; j < rx_pool->size; j++) { + if (rx_pool->rx_buff[j].skb) { + dev_kfree_skb_any(rx_pool->rx_buff[i].skb); + rx_pool->rx_buff[i].skb = NULL; + } } + + kfree(rx_pool->rx_buff); } - kfree(pool->rx_buff); - pool->rx_buff = NULL; + + kfree(adapter->rx_pool); + adapter->rx_pool = NULL; } -static int ibmvnic_open(struct net_device *netdev) +static int init_rx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->vdev->dev; - struct ibmvnic_tx_pool *tx_pool; - union ibmvnic_crq crq; + struct ibmvnic_rx_pool *rx_pool; int rxadd_subcrqs; u64 *size_array; - int tx_subcrqs; int i, j; rxadd_subcrqs = - be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - tx_subcrqs = - be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + - be32_to_cpu(adapter->login_rsp_buf-> - off_rxadd_buff_size)); - adapter->map_id = 1; - adapter->napi = kcalloc(adapter->req_rx_queues, - sizeof(struct napi_struct), GFP_KERNEL); - if (!adapter->napi) - goto alloc_napi_failed; - for (i = 0; i < adapter->req_rx_queues; i++) { - netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, - NAPI_POLL_WEIGHT); - napi_enable(&adapter->napi[i]); + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + + adapter->rx_pool = kcalloc(rxadd_subcrqs, + sizeof(struct ibmvnic_rx_pool), + GFP_KERNEL); + if (!adapter->rx_pool) { + dev_err(dev, "Failed to allocate rx pools\n"); + return -1; } - adapter->rx_pool = - kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); - if (!adapter->rx_pool) - goto rx_pool_arr_alloc_failed; - send_map_query(adapter); for (i = 0; i < rxadd_subcrqs; i++) { - init_rx_pool(adapter, &adapter->rx_pool[i], - adapter->req_rx_add_entries_per_subcrq, i, - be64_to_cpu(size_array[i]), 1); - if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { - dev_err(dev, "Couldn't alloc rx pool\n"); - goto rx_pool_alloc_failed; + rx_pool = &adapter->rx_pool[i]; + + netdev_dbg(adapter->netdev, + "Initializing rx_pool %d, %lld buffs, %lld bytes each\n", + i, adapter->req_rx_add_entries_per_subcrq, + be64_to_cpu(size_array[i])); + + rx_pool->size = adapter->req_rx_add_entries_per_subcrq; + rx_pool->index = i; + rx_pool->buff_size = be64_to_cpu(size_array[i]); + rx_pool->active = 1; + + rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), + GFP_KERNEL); + if (!rx_pool->free_map) { + release_rx_pools(adapter); + return -1; + } + + rx_pool->rx_buff = kcalloc(rx_pool->size, + sizeof(struct ibmvnic_rx_buff), + GFP_KERNEL); + if (!rx_pool->rx_buff) { + dev_err(dev, "Couldn't alloc rx buffers\n"); + release_rx_pools(adapter); + return -1; + } + + if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size)) { + release_rx_pools(adapter); + return -1; } + + for (j = 0; j < rx_pool->size; ++j) + rx_pool->free_map[j] = j; + + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + } + + return 0; +} + +static void release_tx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_tx_pool *tx_pool; + int i, tx_scrqs; + + if (!adapter->tx_pool) + return; + + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + for (i = 0; i < tx_scrqs; i++) { + tx_pool = &adapter->tx_pool[i]; + kfree(tx_pool->tx_buff); + free_long_term_buff(adapter, &tx_pool->long_term_buff); + kfree(tx_pool->free_map); } - adapter->tx_pool = - kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + kfree(adapter->tx_pool); + adapter->tx_pool = NULL; +} + +static int init_tx_pools(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_pool *tx_pool; + int tx_subcrqs; + int i, j; + + tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + adapter->tx_pool = kcalloc(tx_subcrqs, + sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) - goto tx_pool_arr_alloc_failed; + return -1; + for (i = 0; i < tx_subcrqs; i++) { tx_pool = &adapter->tx_pool[i]; - tx_pool->tx_buff = - kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); - if (!tx_pool->tx_buff) - goto tx_pool_alloc_failed; + tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, + sizeof(struct ibmvnic_tx_buff), + GFP_KERNEL); + if (!tx_pool->tx_buff) { + dev_err(dev, "tx pool buffer allocation failed\n"); + release_tx_pools(adapter); + return -1; + } if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, adapter->req_tx_entries_per_subcrq * - adapter->req_mtu)) - goto tx_ltb_alloc_failed; + adapter->req_mtu)) { + release_tx_pools(adapter); + return -1; + } - tx_pool->free_map = - kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) - goto tx_fm_alloc_failed; + tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, + sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) { + release_tx_pools(adapter); + return -1; + } for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) tx_pool->free_map[j] = j; @@ -441,20 +499,183 @@ static int ibmvnic_open(struct net_device *netdev) tx_pool->consumer_index = 0; tx_pool->producer_index = 0; } - adapter->bounce_buffer_size = - (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1; - adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size, - GFP_KERNEL); - if (!adapter->bounce_buffer) - goto bounce_alloc_failed; - adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer, - adapter->bounce_buffer_size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dev_err(dev, "Couldn't map tx bounce buffer\n"); - goto bounce_map_failed; + return 0; +} + +static void release_error_buffers(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_error_buff *error_buff, *tmp; + unsigned long flags; + + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { + list_del(&error_buff->list); + dma_unmap_single(dev, error_buff->dma, error_buff->len, + DMA_FROM_DEVICE); + kfree(error_buff->buff); + kfree(error_buff); + } + spin_unlock_irqrestore(&adapter->error_list_lock, flags); +} + +static int ibmvnic_login(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + unsigned long timeout = msecs_to_jiffies(30000); + struct device *dev = &adapter->vdev->dev; + + do { + if (adapter->renegotiate) { + adapter->renegotiate = false; + release_sub_crqs(adapter); + + reinit_completion(&adapter->init_done); + send_cap_queries(adapter); + if (!wait_for_completion_timeout(&adapter->init_done, + timeout)) { + dev_err(dev, "Capabilities query timeout\n"); + return -1; + } + } + + reinit_completion(&adapter->init_done); + send_login(adapter); + if (!wait_for_completion_timeout(&adapter->init_done, + timeout)) { + dev_err(dev, "Login timeout\n"); + return -1; + } + } while (adapter->renegotiate); + + return 0; +} + +static void release_resources(struct ibmvnic_adapter *adapter) +{ + release_tx_pools(adapter); + release_rx_pools(adapter); + + release_stats_token(adapter); + release_error_buffers(adapter); +} + +static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) +{ + struct net_device *netdev = adapter->netdev; + unsigned long timeout = msecs_to_jiffies(30000); + union ibmvnic_crq crq; + bool resend; + int rc; + + if (adapter->logical_link_state == link_state) { + netdev_dbg(netdev, "Link state already %d\n", link_state); + return 0; + } + + netdev_err(netdev, "setting link state %d\n", link_state); + memset(&crq, 0, sizeof(crq)); + crq.logical_link_state.first = IBMVNIC_CRQ_CMD; + crq.logical_link_state.cmd = LOGICAL_LINK_STATE; + crq.logical_link_state.link_state = link_state; + + do { + resend = false; + + reinit_completion(&adapter->init_done); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) { + netdev_err(netdev, "Failed to set link state\n"); + return rc; + } + + if (!wait_for_completion_timeout(&adapter->init_done, + timeout)) { + netdev_err(netdev, "timeout setting link state\n"); + return -1; + } + + if (adapter->init_done_rc == 1) { + /* Partuial success, delay and re-send */ + mdelay(1000); + resend = true; + } + } while (resend); + + return 0; +} + +static int set_real_num_queues(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int rc; + + rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); + if (rc) { + netdev_err(netdev, "failed to set the number of tx queues\n"); + return rc; + } + + rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); + if (rc) + netdev_err(netdev, "failed to set the number of rx queues\n"); + + return rc; +} + +static int ibmvnic_open(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; + int rc = 0; + int i; + + if (adapter->is_closed) { + rc = ibmvnic_init(adapter); + if (rc) + return rc; + } + + rc = ibmvnic_login(netdev); + if (rc) + return rc; + + rc = set_real_num_queues(netdev); + if (rc) + return rc; + + rc = init_sub_crq_irqs(adapter); + if (rc) { + dev_err(dev, "failed to initialize sub crq irqs\n"); + return -1; + } + + rc = init_stats_token(adapter); + if (rc) + return rc; + + adapter->map_id = 1; + adapter->napi = kcalloc(adapter->req_rx_queues, + sizeof(struct napi_struct), GFP_KERNEL); + if (!adapter->napi) + goto ibmvnic_open_fail; + for (i = 0; i < adapter->req_rx_queues; i++) { + netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, + NAPI_POLL_WEIGHT); + napi_enable(&adapter->napi[i]); } + + send_map_query(adapter); + + rc = init_rx_pools(netdev); + if (rc) + goto ibmvnic_open_fail; + + rc = init_tx_pools(netdev); + if (rc) + goto ibmvnic_open_fail; + replenish_pools(adapter); /* We're ready to receive frames, enable the sub-crq interrupts and @@ -466,106 +687,63 @@ static int ibmvnic_open(struct net_device *netdev) for (i = 0; i < adapter->req_tx_queues; i++) enable_scrq_irq(adapter, adapter->tx_scrq[i]); - memset(&crq, 0, sizeof(crq)); - crq.logical_link_state.first = IBMVNIC_CRQ_CMD; - crq.logical_link_state.cmd = LOGICAL_LINK_STATE; - crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP; - ibmvnic_send_crq(adapter, &crq); + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); + if (rc) + goto ibmvnic_open_fail; netif_tx_start_all_queues(netdev); + adapter->is_closed = false; return 0; -bounce_map_failed: - kfree(adapter->bounce_buffer); -bounce_alloc_failed: - i = tx_subcrqs - 1; - kfree(adapter->tx_pool[i].free_map); -tx_fm_alloc_failed: - free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff); -tx_ltb_alloc_failed: - kfree(adapter->tx_pool[i].tx_buff); -tx_pool_alloc_failed: - for (j = 0; j < i; j++) { - kfree(adapter->tx_pool[j].tx_buff); - free_long_term_buff(adapter, - &adapter->tx_pool[j].long_term_buff); - kfree(adapter->tx_pool[j].free_map); - } - kfree(adapter->tx_pool); - adapter->tx_pool = NULL; -tx_pool_arr_alloc_failed: - i = rxadd_subcrqs; -rx_pool_alloc_failed: - for (j = 0; j < i; j++) { - free_rx_pool(adapter, &adapter->rx_pool[j]); - free_long_term_buff(adapter, - &adapter->rx_pool[j].long_term_buff); - } - kfree(adapter->rx_pool); - adapter->rx_pool = NULL; -rx_pool_arr_alloc_failed: +ibmvnic_open_fail: for (i = 0; i < adapter->req_rx_queues; i++) napi_disable(&adapter->napi[i]); -alloc_napi_failed: + release_resources(adapter); return -ENOMEM; } +static void disable_sub_crqs(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->tx_scrq) { + for (i = 0; i < adapter->req_tx_queues; i++) + if (adapter->tx_scrq[i]) + disable_irq(adapter->tx_scrq[i]->irq); + } + + if (adapter->rx_scrq) { + for (i = 0; i < adapter->req_rx_queues; i++) + if (adapter->rx_scrq[i]) + disable_irq(adapter->rx_scrq[i]->irq); + } +} + static int ibmvnic_close(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - struct device *dev = &adapter->vdev->dev; - union ibmvnic_crq crq; + int rc = 0; int i; adapter->closing = true; + disable_sub_crqs(adapter); - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); + if (adapter->napi) { + for (i = 0; i < adapter->req_rx_queues; i++) + napi_disable(&adapter->napi[i]); + } if (!adapter->failover) netif_tx_stop_all_queues(netdev); - if (adapter->bounce_buffer) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->bounce_buffer_size, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); - memset(&crq, 0, sizeof(crq)); - crq.logical_link_state.first = IBMVNIC_CRQ_CMD; - crq.logical_link_state.cmd = LOGICAL_LINK_STATE; - crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN; - ibmvnic_send_crq(adapter, &crq); - - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); - i++) { - kfree(adapter->tx_pool[i].tx_buff); - free_long_term_buff(adapter, - &adapter->tx_pool[i].long_term_buff); - kfree(adapter->tx_pool[i].free_map); - } - kfree(adapter->tx_pool); - adapter->tx_pool = NULL; - - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - i++) { - free_rx_pool(adapter, &adapter->rx_pool[i]); - free_long_term_buff(adapter, - &adapter->rx_pool[i].long_term_buff); - } - kfree(adapter->rx_pool); - adapter->rx_pool = NULL; + release_resources(adapter); + adapter->is_closed = true; adapter->closing = false; - - return 0; + return rc; } /** @@ -714,7 +892,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_bytes = 0; dma_addr_t data_dma_addr; struct netdev_queue *txq; - bool used_bounce = false; unsigned long lpar_rc; union sub_crq tx_crq; unsigned int offset; @@ -731,9 +908,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) be32_to_cpu(adapter->login_rsp_buf-> off_txsubm_subcrqs)); if (adapter->migrated) { + if (!netif_subqueue_stopped(netdev, skb)) + netif_stop_subqueue(netdev, queue_num); + dev_kfree_skb_any(skb); + tx_send_failed++; tx_dropped++; - ret = NETDEV_TX_BUSY; + ret = NETDEV_TX_OK; goto out; } @@ -755,7 +936,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->index = index; tx_buff->pool_index = queue_num; tx_buff->last_frag = true; - tx_buff->used_bounce = used_bounce; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -800,11 +980,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); if (dma_mapping_error(dev, tx_buff->indir_dma)) { + dev_kfree_skb_any(skb); + tx_buff->skb = NULL; if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(dev, "tx: unable to map descriptor array\n"); tx_map_failed++; tx_dropped++; - ret = NETDEV_TX_BUSY; + ret = NETDEV_TX_OK; goto out; } lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], @@ -823,15 +1005,20 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) else tx_pool->consumer_index--; + dev_kfree_skb_any(skb); + tx_buff->skb = NULL; + + if (lpar_rc == H_CLOSED) + netif_stop_subqueue(netdev, queue_num); + tx_send_failed++; tx_dropped++; - ret = NETDEV_TX_BUSY; + ret = NETDEV_TX_OK; goto out; } - atomic_inc(&tx_scrq->used); - - if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { + if (atomic_inc_return(&tx_scrq->used) + >= adapter->req_tx_entries_per_subcrq) { netdev_info(netdev, "Stopping queue %d\n", queue_num); netif_stop_subqueue(netdev, queue_num); } @@ -975,7 +1162,15 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) skb = rx_buff->skb; skb_copy_to_linear_data(skb, rx_buff->data + offset, length); - skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci); + + /* VLAN Header has been stripped by the system firmware and + * needs to be inserted by the driver + */ + if (adapter->rx_vlan_header_insertion && + (flags & IBMVNIC_VLAN_STRIPPED)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + ntohs(next->rx_comp.vlan_tci)); + /* free the entry */ next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); @@ -1176,6 +1371,12 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, scrq->crq_num); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + if (rc) { + netdev_err(adapter->netdev, + "Failed to release sub-CRQ %16lx, rc = %ld\n", + scrq->crq_num, rc); + } + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, DMA_BIDIRECTIONAL); free_pages((unsigned long)scrq->msgs, 2); @@ -1189,12 +1390,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter struct ibmvnic_sub_crq_queue *scrq; int rc; - scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC); + scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); if (!scrq) return NULL; - scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); - memset(scrq->msgs, 0, 4 * PAGE_SIZE); + scrq->msgs = + (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); if (!scrq->msgs) { dev_warn(dev, "Couldn't allocate crq queue messages page\n"); goto zero_page_failed; @@ -1222,9 +1423,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter scrq->adapter = adapter; scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); - scrq->cur = 0; - atomic_set(&scrq->used, 0); - scrq->rx_skb_top = NULL; spin_lock_init(&scrq->lock); netdev_dbg(adapter->netdev, @@ -1249,49 +1447,40 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) int i; if (adapter->tx_scrq) { - for (i = 0; i < adapter->req_tx_queues; i++) - if (adapter->tx_scrq[i]) { + for (i = 0; i < adapter->req_tx_queues; i++) { + if (!adapter->tx_scrq[i]) + continue; + + if (adapter->tx_scrq[i]->irq) { free_irq(adapter->tx_scrq[i]->irq, adapter->tx_scrq[i]); irq_dispose_mapping(adapter->tx_scrq[i]->irq); - release_sub_crq_queue(adapter, - adapter->tx_scrq[i]); + adapter->tx_scrq[i]->irq = 0; } + + release_sub_crq_queue(adapter, adapter->tx_scrq[i]); + } + kfree(adapter->tx_scrq); adapter->tx_scrq = NULL; } if (adapter->rx_scrq) { - for (i = 0; i < adapter->req_rx_queues; i++) - if (adapter->rx_scrq[i]) { + for (i = 0; i < adapter->req_rx_queues; i++) { + if (!adapter->rx_scrq[i]) + continue; + + if (adapter->rx_scrq[i]->irq) { free_irq(adapter->rx_scrq[i]->irq, adapter->rx_scrq[i]); irq_dispose_mapping(adapter->rx_scrq[i]->irq); - release_sub_crq_queue(adapter, - adapter->rx_scrq[i]); + adapter->rx_scrq[i]->irq = 0; } - kfree(adapter->rx_scrq); - adapter->rx_scrq = NULL; - } -} - -static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) -{ - int i; - if (adapter->tx_scrq) { - for (i = 0; i < adapter->req_tx_queues; i++) - if (adapter->tx_scrq[i]) - release_sub_crq_queue(adapter, - adapter->tx_scrq[i]); - adapter->tx_scrq = NULL; - } + release_sub_crq_queue(adapter, adapter->rx_scrq[i]); + } - if (adapter->rx_scrq) { - for (i = 0; i < adapter->req_rx_queues; i++) - if (adapter->rx_scrq[i]) - release_sub_crq_queue(adapter, - adapter->rx_scrq[i]); + kfree(adapter->rx_scrq); adapter->rx_scrq = NULL; } } @@ -1358,7 +1547,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, continue; txbuff->data_dma[j] = 0; - txbuff->used_bounce = false; } /* if sub_crq was sent indirectly */ first = txbuff->indir_arr[0].generic.first; @@ -1369,9 +1557,8 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, } if (txbuff->last_frag) { - atomic_dec(&scrq->used); - - if (atomic_read(&scrq->used) <= + if (atomic_sub_return(next->tx_comp.num_comps, + &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && netif_subqueue_stopped(adapter->netdev, txbuff->skb)) { @@ -1487,52 +1674,24 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); } - release_sub_crqs_no_irqs(adapter); + release_sub_crqs(adapter); return rc; } -static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) +static int init_sub_crqs(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_sub_crq_queue **allqueues; int registered_queues = 0; - union ibmvnic_crq crq; int total_queues; int more = 0; int i; - if (!retry) { - /* Sub-CRQ entries are 32 byte long */ - int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); - - if (adapter->min_tx_entries_per_subcrq > entries_page || - adapter->min_rx_add_entries_per_subcrq > entries_page) { - dev_err(dev, "Fatal, invalid entries per sub-crq\n"); - goto allqueues_failed; - } - - /* Get the minimum between the queried max and the entries - * that fit in our PAGE_SIZE - */ - adapter->req_tx_entries_per_subcrq = - adapter->max_tx_entries_per_subcrq > entries_page ? - entries_page : adapter->max_tx_entries_per_subcrq; - adapter->req_rx_add_entries_per_subcrq = - adapter->max_rx_add_entries_per_subcrq > entries_page ? - entries_page : adapter->max_rx_add_entries_per_subcrq; - - adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; - adapter->req_rx_queues = adapter->opt_rx_comp_queues; - adapter->req_rx_add_queues = adapter->max_rx_add_queues; - - adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; - } - total_queues = adapter->req_tx_queues + adapter->req_rx_queues; - allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC); + allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); if (!allqueues) - goto allqueues_failed; + return -1; for (i = 0; i < total_queues; i++) { allqueues[i] = init_sub_crq_queue(adapter); @@ -1570,7 +1729,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) } adapter->tx_scrq = kcalloc(adapter->req_tx_queues, - sizeof(*adapter->tx_scrq), GFP_ATOMIC); + sizeof(*adapter->tx_scrq), GFP_KERNEL); if (!adapter->tx_scrq) goto tx_failed; @@ -1580,7 +1739,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) } adapter->rx_scrq = kcalloc(adapter->req_rx_queues, - sizeof(*adapter->rx_scrq), GFP_ATOMIC); + sizeof(*adapter->rx_scrq), GFP_KERNEL); if (!adapter->rx_scrq) goto rx_failed; @@ -1589,6 +1748,51 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) adapter->rx_scrq[i]->scrq_num = i; } + kfree(allqueues); + return 0; + +rx_failed: + kfree(adapter->tx_scrq); + adapter->tx_scrq = NULL; +tx_failed: + for (i = 0; i < registered_queues; i++) + release_sub_crq_queue(adapter, allqueues[i]); + kfree(allqueues); + return -1; +} + +static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) +{ + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + + if (!retry) { + /* Sub-CRQ entries are 32 byte long */ + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + + if (adapter->min_tx_entries_per_subcrq > entries_page || + adapter->min_rx_add_entries_per_subcrq > entries_page) { + dev_err(dev, "Fatal, invalid entries per sub-crq\n"); + return; + } + + /* Get the minimum between the queried max and the entries + * that fit in our PAGE_SIZE + */ + adapter->req_tx_entries_per_subcrq = + adapter->max_tx_entries_per_subcrq > entries_page ? + entries_page : adapter->max_tx_entries_per_subcrq; + adapter->req_rx_add_entries_per_subcrq = + adapter->max_rx_add_entries_per_subcrq > entries_page ? + entries_page : adapter->max_rx_add_entries_per_subcrq; + + adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; + adapter->req_rx_queues = adapter->opt_rx_comp_queues; + adapter->req_rx_add_queues = adapter->max_rx_add_queues; + + adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; + } + memset(&crq, 0, sizeof(crq)); crq.request_capability.first = IBMVNIC_CRQ_CMD; crq.request_capability.cmd = REQUEST_CAPABILITY; @@ -1642,20 +1846,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } - - kfree(allqueues); - - return; - -rx_failed: - kfree(adapter->tx_scrq); - adapter->tx_scrq = NULL; -tx_failed: - for (i = 0; i < registered_queues; i++) - release_sub_crq_queue(adapter, allqueues[i]); - kfree(allqueues); -allqueues_failed: - ibmvnic_remove(adapter->vdev); } static int pending_scrq(struct ibmvnic_adapter *adapter, @@ -1829,13 +2019,11 @@ static void send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; struct ibmvnic_login_buffer *login_buffer; - struct ibmvnic_inflight_cmd *inflight_cmd; struct device *dev = &adapter->vdev->dev; dma_addr_t rsp_buffer_token; dma_addr_t buffer_token; size_t rsp_buffer_size; union ibmvnic_crq crq; - unsigned long flags; size_t buffer_size; __be64 *tx_list_p; __be64 *rx_list_p; @@ -1872,11 +2060,7 @@ static void send_login(struct ibmvnic_adapter *adapter) dev_err(dev, "Couldn't map login rsp buffer\n"); goto buf_rsp_map_failed; } - inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); - if (!inflight_cmd) { - dev_err(dev, "Couldn't allocate inflight_cmd\n"); - goto inflight_alloc_failed; - } + adapter->login_buf = login_buffer; adapter->login_buf_token = buffer_token; adapter->login_buf_sz = buffer_size; @@ -1927,20 +2111,10 @@ static void send_login(struct ibmvnic_adapter *adapter) crq.login.cmd = LOGIN; crq.login.ioba = cpu_to_be32(buffer_token); crq.login.len = cpu_to_be32(buffer_size); - - memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); - - spin_lock_irqsave(&adapter->inflight_lock, flags); - list_add_tail(&inflight_cmd->list, &adapter->inflight); - spin_unlock_irqrestore(&adapter->inflight_lock, flags); - ibmvnic_send_crq(adapter, &crq); return; -inflight_alloc_failed: - dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, - DMA_FROM_DEVICE); buf_rsp_map_failed: kfree(login_rsp_buffer); buf_rsp_alloc_failed: @@ -2064,6 +2238,10 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); + atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); @@ -2242,77 +2420,22 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, kfree(error_buff); } -static void handle_dump_size_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - int len = be32_to_cpu(crq->request_dump_size_rsp.len); - struct ibmvnic_inflight_cmd *inflight_cmd; - struct device *dev = &adapter->vdev->dev; - union ibmvnic_crq newcrq; - unsigned long flags; - - /* allocate and map buffer */ - adapter->dump_data = kmalloc(len, GFP_KERNEL); - if (!adapter->dump_data) { - complete(&adapter->fw_done); - return; - } - - adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len, - DMA_FROM_DEVICE); - - if (dma_mapping_error(dev, adapter->dump_data_token)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(dev, "Couldn't map dump data\n"); - kfree(adapter->dump_data); - complete(&adapter->fw_done); - return; - } - - inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); - if (!inflight_cmd) { - dma_unmap_single(dev, adapter->dump_data_token, len, - DMA_FROM_DEVICE); - kfree(adapter->dump_data); - complete(&adapter->fw_done); - return; - } - - memset(&newcrq, 0, sizeof(newcrq)); - newcrq.request_dump.first = IBMVNIC_CRQ_CMD; - newcrq.request_dump.cmd = REQUEST_DUMP; - newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token); - newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size); - - memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq)); - - spin_lock_irqsave(&adapter->inflight_lock, flags); - list_add_tail(&inflight_cmd->list, &adapter->inflight); - spin_unlock_irqrestore(&adapter->inflight_lock, flags); - - ibmvnic_send_crq(adapter, &newcrq); -} - -static void handle_error_indication(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) +static void request_error_information(struct ibmvnic_adapter *adapter, + union ibmvnic_crq *err_crq) { - int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz); - struct ibmvnic_inflight_cmd *inflight_cmd; struct device *dev = &adapter->vdev->dev; + struct net_device *netdev = adapter->netdev; struct ibmvnic_error_buff *error_buff; - union ibmvnic_crq new_crq; + unsigned long timeout = msecs_to_jiffies(30000); + union ibmvnic_crq crq; unsigned long flags; - - dev_err(dev, "Firmware reports %serror id %x, cause %d\n", - crq->error_indication. - flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - be32_to_cpu(crq->error_indication.error_id), - be16_to_cpu(crq->error_indication.error_cause)); + int rc, detail_len; error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); if (!error_buff) return; + detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); if (!error_buff->buff) { kfree(error_buff); @@ -2322,43 +2445,61 @@ static void handle_error_indication(union ibmvnic_crq *crq, error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, error_buff->dma)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(dev, "Couldn't map error buffer\n"); - kfree(error_buff->buff); - kfree(error_buff); - return; - } - - inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC); - if (!inflight_cmd) { - dma_unmap_single(dev, error_buff->dma, detail_len, - DMA_FROM_DEVICE); + netdev_err(netdev, "Couldn't map error buffer\n"); kfree(error_buff->buff); kfree(error_buff); return; } error_buff->len = detail_len; - error_buff->error_id = crq->error_indication.error_id; + error_buff->error_id = err_crq->error_indication.error_id; spin_lock_irqsave(&adapter->error_list_lock, flags); list_add_tail(&error_buff->list, &adapter->errors); spin_unlock_irqrestore(&adapter->error_list_lock, flags); - memset(&new_crq, 0, sizeof(new_crq)); - new_crq.request_error_info.first = IBMVNIC_CRQ_CMD; - new_crq.request_error_info.cmd = REQUEST_ERROR_INFO; - new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); - new_crq.request_error_info.len = cpu_to_be32(detail_len); - new_crq.request_error_info.error_id = crq->error_indication.error_id; + memset(&crq, 0, sizeof(crq)); + crq.request_error_info.first = IBMVNIC_CRQ_CMD; + crq.request_error_info.cmd = REQUEST_ERROR_INFO; + crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); + crq.request_error_info.len = cpu_to_be32(detail_len); + crq.request_error_info.error_id = err_crq->error_indication.error_id; + + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) { + netdev_err(netdev, "failed to request error information\n"); + goto err_info_fail; + } - memcpy(&inflight_cmd->crq, &crq, sizeof(crq)); + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { + netdev_err(netdev, "timeout waiting for error information\n"); + goto err_info_fail; + } + + return; - spin_lock_irqsave(&adapter->inflight_lock, flags); - list_add_tail(&inflight_cmd->list, &adapter->inflight); - spin_unlock_irqrestore(&adapter->inflight_lock, flags); +err_info_fail: + spin_lock_irqsave(&adapter->error_list_lock, flags); + list_del(&error_buff->list); + spin_unlock_irqrestore(&adapter->error_list_lock, flags); - ibmvnic_send_crq(adapter, &new_crq); + kfree(error_buff->buff); + kfree(error_buff); +} + +static void handle_error_indication(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + + dev_err(dev, "Firmware reports %serror id %x, cause %d\n", + crq->error_indication.flags + & IBMVNIC_FATAL_ERROR ? "FATAL " : "", + be32_to_cpu(crq->error_indication.error_id), + be16_to_cpu(crq->error_indication.error_cause)); + + if (be32_to_cpu(crq->error_indication.error_id)) + request_error_information(adapter, crq); } static void handle_change_mac_rsp(union ibmvnic_crq *crq, @@ -2428,9 +2569,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, *req_value, (long int)be64_to_cpu(crq->request_capability_rsp. number), name); - release_sub_crqs_no_irqs(adapter); + release_sub_crqs(adapter); *req_value = be64_to_cpu(crq->request_capability_rsp.number); - init_sub_crqs(adapter, 1); + ibmvnic_send_req_caps(adapter, 1); return; default: dev_err(dev, "Error %d in request cap rsp\n", @@ -2473,7 +2614,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, struct device *dev = &adapter->vdev->dev; struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; struct ibmvnic_login_buffer *login = adapter->login_buf; - union ibmvnic_crq crq; int i; dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, @@ -2508,11 +2648,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, } complete(&adapter->init_done); - memset(&crq, 0, sizeof(crq)); - crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD; - crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM; - ibmvnic_send_crq(adapter, &crq); - return 0; } @@ -2619,648 +2754,138 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, adapter->max_rx_queues = be64_to_cpu(crq->query_capability.number); netdev_dbg(netdev, "max_rx_queues = %lld\n", - adapter->max_rx_queues); - break; - case MAX_RX_ADD_QUEUES: - adapter->max_rx_add_queues = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "max_rx_add_queues = %lld\n", - adapter->max_rx_add_queues); - break; - case MIN_TX_ENTRIES_PER_SUBCRQ: - adapter->min_tx_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", - adapter->min_tx_entries_per_subcrq); - break; - case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: - adapter->min_rx_add_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", - adapter->min_rx_add_entries_per_subcrq); - break; - case MAX_TX_ENTRIES_PER_SUBCRQ: - adapter->max_tx_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", - adapter->max_tx_entries_per_subcrq); - break; - case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: - adapter->max_rx_add_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", - adapter->max_rx_add_entries_per_subcrq); - break; - case TCP_IP_OFFLOAD: - adapter->tcp_ip_offload = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "tcp_ip_offload = %lld\n", - adapter->tcp_ip_offload); - break; - case PROMISC_SUPPORTED: - adapter->promisc_supported = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "promisc_supported = %lld\n", - adapter->promisc_supported); - break; - case MIN_MTU: - adapter->min_mtu = be64_to_cpu(crq->query_capability.number); - netdev->min_mtu = adapter->min_mtu - ETH_HLEN; - netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); - break; - case MAX_MTU: - adapter->max_mtu = be64_to_cpu(crq->query_capability.number); - netdev->max_mtu = adapter->max_mtu - ETH_HLEN; - netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); - break; - case MAX_MULTICAST_FILTERS: - adapter->max_multicast_filters = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "max_multicast_filters = %lld\n", - adapter->max_multicast_filters); - break; - case VLAN_HEADER_INSERTION: - adapter->vlan_header_insertion = - be64_to_cpu(crq->query_capability.number); - if (adapter->vlan_header_insertion) - netdev->features |= NETIF_F_HW_VLAN_STAG_TX; - netdev_dbg(netdev, "vlan_header_insertion = %lld\n", - adapter->vlan_header_insertion); - break; - case MAX_TX_SG_ENTRIES: - adapter->max_tx_sg_entries = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", - adapter->max_tx_sg_entries); - break; - case RX_SG_SUPPORTED: - adapter->rx_sg_supported = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "rx_sg_supported = %lld\n", - adapter->rx_sg_supported); - break; - case OPT_TX_COMP_SUB_QUEUES: - adapter->opt_tx_comp_sub_queues = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", - adapter->opt_tx_comp_sub_queues); - break; - case OPT_RX_COMP_QUEUES: - adapter->opt_rx_comp_queues = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", - adapter->opt_rx_comp_queues); - break; - case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: - adapter->opt_rx_bufadd_q_per_rx_comp_q = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", - adapter->opt_rx_bufadd_q_per_rx_comp_q); - break; - case OPT_TX_ENTRIES_PER_SUBCRQ: - adapter->opt_tx_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", - adapter->opt_tx_entries_per_subcrq); - break; - case OPT_RXBA_ENTRIES_PER_SUBCRQ: - adapter->opt_rxba_entries_per_subcrq = - be64_to_cpu(crq->query_capability.number); - netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", - adapter->opt_rxba_entries_per_subcrq); - break; - case TX_RX_DESC_REQ: - adapter->tx_rx_desc_req = crq->query_capability.number; - netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", - adapter->tx_rx_desc_req); - break; - - default: - netdev_err(netdev, "Got invalid cap rsp %d\n", - crq->query_capability.capability); - } - -out: - if (atomic_read(&adapter->running_cap_crqs) == 0) { - adapter->wait_capability = false; - init_sub_crqs(adapter, 0); - /* We're done querying the capabilities, initialize sub-crqs */ - } -} - -static void handle_control_ras_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - u8 correlator = crq->control_ras_rsp.correlator; - struct device *dev = &adapter->vdev->dev; - bool found = false; - int i; - - if (crq->control_ras_rsp.rc.code) { - dev_warn(dev, "Control ras failed rc=%d\n", - crq->control_ras_rsp.rc.code); - return; - } - - for (i = 0; i < adapter->ras_comp_num; i++) { - if (adapter->ras_comps[i].correlator == correlator) { - found = true; - break; - } - } - - if (!found) { - dev_warn(dev, "Correlator not found on control_ras_rsp\n"); - return; - } - - switch (crq->control_ras_rsp.op) { - case IBMVNIC_TRACE_LEVEL: - adapter->ras_comps[i].trace_level = crq->control_ras.level; - break; - case IBMVNIC_ERROR_LEVEL: - adapter->ras_comps[i].error_check_level = - crq->control_ras.level; - break; - case IBMVNIC_TRACE_PAUSE: - adapter->ras_comp_int[i].paused = 1; - break; - case IBMVNIC_TRACE_RESUME: - adapter->ras_comp_int[i].paused = 0; - break; - case IBMVNIC_TRACE_ON: - adapter->ras_comps[i].trace_on = 1; - break; - case IBMVNIC_TRACE_OFF: - adapter->ras_comps[i].trace_on = 0; - break; - case IBMVNIC_CHG_TRACE_BUFF_SZ: - /* trace_buff_sz is 3 bytes, stuff it into an int */ - ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0; - ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] = - crq->control_ras_rsp.trace_buff_sz[0]; - ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] = - crq->control_ras_rsp.trace_buff_sz[1]; - ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] = - crq->control_ras_rsp.trace_buff_sz[2]; - break; - default: - dev_err(dev, "invalid op %d on control_ras_rsp", - crq->control_ras_rsp.op); - } -} - -static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, - loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_fw_trace_entry *trace; - int num = ras_comp_int->num; - union ibmvnic_crq crq; - dma_addr_t trace_tok; - - if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) - return 0; - - trace = - dma_alloc_coherent(dev, - be32_to_cpu(adapter->ras_comps[num]. - trace_buff_size), &trace_tok, - GFP_KERNEL); - if (!trace) { - dev_err(dev, "Couldn't alloc trace buffer\n"); - return 0; - } - - memset(&crq, 0, sizeof(crq)); - crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD; - crq.collect_fw_trace.cmd = COLLECT_FW_TRACE; - crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; - crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); - crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; - - init_completion(&adapter->fw_done); - ibmvnic_send_crq(adapter, &crq); - wait_for_completion(&adapter->fw_done); - - if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) - len = - be32_to_cpu(adapter->ras_comps[num].trace_buff_size) - - *ppos; - - copy_to_user(user_buf, &((u8 *)trace)[*ppos], len); - - dma_free_coherent(dev, - be32_to_cpu(adapter->ras_comps[num].trace_buff_size), - trace, trace_tok); - *ppos += len; - return len; -} - -static const struct file_operations trace_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = trace_read, -}; - -static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len, - loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - char buff[5]; /* 1 or 0 plus \n and \0 */ - int size; - - size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused); - - if (*ppos >= size) - return 0; - - copy_to_user(user_buf, buff, size); - *ppos += size; - return size; -} - -static ssize_t paused_write(struct file *file, const char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - union ibmvnic_crq crq; - unsigned long val; - char buff[9]; /* decimal max int plus \n and \0 */ - - copy_from_user(buff, user_buf, sizeof(buff)); - val = kstrtoul(buff, 10, NULL); - - adapter->ras_comp_int[num].paused = val ? 1 : 0; - - memset(&crq, 0, sizeof(crq)); - crq.control_ras.first = IBMVNIC_CRQ_CMD; - crq.control_ras.cmd = CONTROL_RAS; - crq.control_ras.correlator = adapter->ras_comps[num].correlator; - crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME; - ibmvnic_send_crq(adapter, &crq); - - return len; -} - -static const struct file_operations paused_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = paused_read, - .write = paused_write, -}; - -static ssize_t tracing_read(struct file *file, char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - char buff[5]; /* 1 or 0 plus \n and \0 */ - int size; - - size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on); - - if (*ppos >= size) - return 0; - - copy_to_user(user_buf, buff, size); - *ppos += size; - return size; -} - -static ssize_t tracing_write(struct file *file, const char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - union ibmvnic_crq crq; - unsigned long val; - char buff[9]; /* decimal max int plus \n and \0 */ - - copy_from_user(buff, user_buf, sizeof(buff)); - val = kstrtoul(buff, 10, NULL); - - memset(&crq, 0, sizeof(crq)); - crq.control_ras.first = IBMVNIC_CRQ_CMD; - crq.control_ras.cmd = CONTROL_RAS; - crq.control_ras.correlator = adapter->ras_comps[num].correlator; - crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF; - - return len; -} - -static const struct file_operations tracing_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = tracing_read, - .write = tracing_write, -}; - -static ssize_t error_level_read(struct file *file, char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - char buff[5]; /* decimal max char plus \n and \0 */ - int size; - - size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level); - - if (*ppos >= size) - return 0; - - copy_to_user(user_buf, buff, size); - *ppos += size; - return size; -} - -static ssize_t error_level_write(struct file *file, const char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - union ibmvnic_crq crq; - unsigned long val; - char buff[9]; /* decimal max int plus \n and \0 */ - - copy_from_user(buff, user_buf, sizeof(buff)); - val = kstrtoul(buff, 10, NULL); - - if (val > 9) - val = 9; - - memset(&crq, 0, sizeof(crq)); - crq.control_ras.first = IBMVNIC_CRQ_CMD; - crq.control_ras.cmd = CONTROL_RAS; - crq.control_ras.correlator = adapter->ras_comps[num].correlator; - crq.control_ras.op = IBMVNIC_ERROR_LEVEL; - crq.control_ras.level = val; - ibmvnic_send_crq(adapter, &crq); - - return len; -} - -static const struct file_operations error_level_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = error_level_read, - .write = error_level_write, -}; - -static ssize_t trace_level_read(struct file *file, char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - char buff[5]; /* decimal max char plus \n and \0 */ - int size; - - size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level); - if (*ppos >= size) - return 0; - - copy_to_user(user_buf, buff, size); - *ppos += size; - return size; -} - -static ssize_t trace_level_write(struct file *file, const char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - union ibmvnic_crq crq; - unsigned long val; - char buff[9]; /* decimal max int plus \n and \0 */ - - copy_from_user(buff, user_buf, sizeof(buff)); - val = kstrtoul(buff, 10, NULL); - if (val > 9) - val = 9; - - memset(&crq, 0, sizeof(crq)); - crq.control_ras.first = IBMVNIC_CRQ_CMD; - crq.control_ras.cmd = CONTROL_RAS; - crq.control_ras.correlator = - adapter->ras_comps[ras_comp_int->num].correlator; - crq.control_ras.op = IBMVNIC_TRACE_LEVEL; - crq.control_ras.level = val; - ibmvnic_send_crq(adapter, &crq); - - return len; -} - -static const struct file_operations trace_level_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = trace_level_read, - .write = trace_level_write, -}; - -static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf, - size_t len, loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - int num = ras_comp_int->num; - char buff[9]; /* decimal max int plus \n and \0 */ - int size; - - size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size); - if (*ppos >= size) - return 0; - - copy_to_user(user_buf, buff, size); - *ppos += size; - return size; -} - -static ssize_t trace_buff_size_write(struct file *file, - const char __user *user_buf, size_t len, - loff_t *ppos) -{ - struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data; - struct ibmvnic_adapter *adapter = ras_comp_int->adapter; - union ibmvnic_crq crq; - unsigned long val; - char buff[9]; /* decimal max int plus \n and \0 */ - - copy_from_user(buff, user_buf, sizeof(buff)); - val = kstrtoul(buff, 10, NULL); - - memset(&crq, 0, sizeof(crq)); - crq.control_ras.first = IBMVNIC_CRQ_CMD; - crq.control_ras.cmd = CONTROL_RAS; - crq.control_ras.correlator = - adapter->ras_comps[ras_comp_int->num].correlator; - crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ; - /* trace_buff_sz is 3 bytes, stuff an int into it */ - crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5]; - crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6]; - crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7]; - ibmvnic_send_crq(adapter, &crq); - - return len; -} - -static const struct file_operations trace_size_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = trace_buff_size_read, - .write = trace_buff_size_write, -}; - -static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - struct dentry *dir_ent; - struct dentry *ent; - int i; - - debugfs_remove_recursive(adapter->ras_comps_ent); - - adapter->ras_comps_ent = debugfs_create_dir("ras_comps", - adapter->debugfs_dir); - if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) { - dev_info(dev, "debugfs create ras_comps dir failed\n"); - return; - } - - for (i = 0; i < adapter->ras_comp_num; i++) { - dir_ent = debugfs_create_dir(adapter->ras_comps[i].name, - adapter->ras_comps_ent); - if (!dir_ent || IS_ERR(dir_ent)) { - dev_info(dev, "debugfs create %s dir failed\n", - adapter->ras_comps[i].name); - continue; - } - - adapter->ras_comp_int[i].adapter = adapter; - adapter->ras_comp_int[i].num = i; - adapter->ras_comp_int[i].desc_blob.data = - &adapter->ras_comps[i].description; - adapter->ras_comp_int[i].desc_blob.size = - sizeof(adapter->ras_comps[i].description); + adapter->max_rx_queues); + break; + case MAX_RX_ADD_QUEUES: + adapter->max_rx_add_queues = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_queues = %lld\n", + adapter->max_rx_add_queues); + break; + case MIN_TX_ENTRIES_PER_SUBCRQ: + adapter->min_tx_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", + adapter->min_tx_entries_per_subcrq); + break; + case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->min_rx_add_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", + adapter->min_rx_add_entries_per_subcrq); + break; + case MAX_TX_ENTRIES_PER_SUBCRQ: + adapter->max_tx_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", + adapter->max_tx_entries_per_subcrq); + break; + case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: + adapter->max_rx_add_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", + adapter->max_rx_add_entries_per_subcrq); + break; + case TCP_IP_OFFLOAD: + adapter->tcp_ip_offload = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "tcp_ip_offload = %lld\n", + adapter->tcp_ip_offload); + break; + case PROMISC_SUPPORTED: + adapter->promisc_supported = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "promisc_supported = %lld\n", + adapter->promisc_supported); + break; + case MIN_MTU: + adapter->min_mtu = be64_to_cpu(crq->query_capability.number); + netdev->min_mtu = adapter->min_mtu - ETH_HLEN; + netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); + break; + case MAX_MTU: + adapter->max_mtu = be64_to_cpu(crq->query_capability.number); + netdev->max_mtu = adapter->max_mtu - ETH_HLEN; + netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); + break; + case MAX_MULTICAST_FILTERS: + adapter->max_multicast_filters = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_multicast_filters = %lld\n", + adapter->max_multicast_filters); + break; + case VLAN_HEADER_INSERTION: + adapter->vlan_header_insertion = + be64_to_cpu(crq->query_capability.number); + if (adapter->vlan_header_insertion) + netdev->features |= NETIF_F_HW_VLAN_STAG_TX; + netdev_dbg(netdev, "vlan_header_insertion = %lld\n", + adapter->vlan_header_insertion); + break; + case RX_VLAN_HEADER_INSERTION: + adapter->rx_vlan_header_insertion = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", + adapter->rx_vlan_header_insertion); + break; + case MAX_TX_SG_ENTRIES: + adapter->max_tx_sg_entries = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", + adapter->max_tx_sg_entries); + break; + case RX_SG_SUPPORTED: + adapter->rx_sg_supported = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "rx_sg_supported = %lld\n", + adapter->rx_sg_supported); + break; + case OPT_TX_COMP_SUB_QUEUES: + adapter->opt_tx_comp_sub_queues = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", + adapter->opt_tx_comp_sub_queues); + break; + case OPT_RX_COMP_QUEUES: + adapter->opt_rx_comp_queues = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", + adapter->opt_rx_comp_queues); + break; + case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: + adapter->opt_rx_bufadd_q_per_rx_comp_q = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", + adapter->opt_rx_bufadd_q_per_rx_comp_q); + break; + case OPT_TX_ENTRIES_PER_SUBCRQ: + adapter->opt_tx_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", + adapter->opt_tx_entries_per_subcrq); + break; + case OPT_RXBA_ENTRIES_PER_SUBCRQ: + adapter->opt_rxba_entries_per_subcrq = + be64_to_cpu(crq->query_capability.number); + netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", + adapter->opt_rxba_entries_per_subcrq); + break; + case TX_RX_DESC_REQ: + adapter->tx_rx_desc_req = crq->query_capability.number; + netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", + adapter->tx_rx_desc_req); + break; - /* Don't need to remember the dentry's because the debugfs dir - * gets removed recursively - */ - ent = debugfs_create_blob("description", S_IRUGO, dir_ent, - &adapter->ras_comp_int[i].desc_blob); - ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR, - dir_ent, &adapter->ras_comp_int[i], - &trace_size_ops); - ent = debugfs_create_file("trace_level", - S_IRUGO | - (adapter->ras_comps[i].trace_level != - 0xFF ? S_IWUSR : 0), - dir_ent, &adapter->ras_comp_int[i], - &trace_level_ops); - ent = debugfs_create_file("error_level", - S_IRUGO | - (adapter-> - ras_comps[i].error_check_level != - 0xFF ? S_IWUSR : 0), - dir_ent, &adapter->ras_comp_int[i], - &trace_level_ops); - ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR, - dir_ent, &adapter->ras_comp_int[i], - &tracing_ops); - ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR, - dir_ent, &adapter->ras_comp_int[i], - &paused_ops); - ent = debugfs_create_file("trace", S_IRUGO, dir_ent, - &adapter->ras_comp_int[i], - &trace_ops); - } -} - -static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component); - struct device *dev = &adapter->vdev->dev; - union ibmvnic_crq newcrq; - - adapter->ras_comps = dma_alloc_coherent(dev, len, - &adapter->ras_comps_tok, - GFP_KERNEL); - if (!adapter->ras_comps) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(dev, "Couldn't alloc fw comps buffer\n"); - return; + default: + netdev_err(netdev, "Got invalid cap rsp %d\n", + crq->query_capability.capability); } - adapter->ras_comp_int = kmalloc(adapter->ras_comp_num * - sizeof(struct ibmvnic_fw_comp_internal), - GFP_KERNEL); - if (!adapter->ras_comp_int) - dma_free_coherent(dev, len, adapter->ras_comps, - adapter->ras_comps_tok); - - memset(&newcrq, 0, sizeof(newcrq)); - newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD; - newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS; - newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok); - newcrq.request_ras_comps.len = cpu_to_be32(len); - ibmvnic_send_crq(adapter, &newcrq); -} - -static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) -{ - struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1; - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_error_buff *error_buff, *tmp2; - unsigned long flags; - unsigned long flags2; - - spin_lock_irqsave(&adapter->inflight_lock, flags); - list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) { - switch (inflight_cmd->crq.generic.cmd) { - case LOGIN: - dma_unmap_single(dev, adapter->login_buf_token, - adapter->login_buf_sz, - DMA_BIDIRECTIONAL); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, - DMA_BIDIRECTIONAL); - kfree(adapter->login_rsp_buf); - kfree(adapter->login_buf); - break; - case REQUEST_DUMP: - complete(&adapter->fw_done); - break; - case REQUEST_ERROR_INFO: - spin_lock_irqsave(&adapter->error_list_lock, flags2); - list_for_each_entry_safe(error_buff, tmp2, - &adapter->errors, list) { - dma_unmap_single(dev, error_buff->dma, - error_buff->len, - DMA_FROM_DEVICE); - kfree(error_buff->buff); - list_del(&error_buff->list); - kfree(error_buff); - } - spin_unlock_irqrestore(&adapter->error_list_lock, - flags2); - break; - } - list_del(&inflight_cmd->list); - kfree(inflight_cmd); +out: + if (atomic_read(&adapter->running_cap_crqs) == 0) { + adapter->wait_capability = false; + ibmvnic_send_req_caps(adapter, 0); } - spin_unlock_irqrestore(&adapter->inflight_lock, flags); } static void ibmvnic_xport_event(struct work_struct *work) @@ -3271,7 +2896,6 @@ static void ibmvnic_xport_event(struct work_struct *work) struct device *dev = &adapter->vdev->dev; long rc; - ibmvnic_free_inflight(adapter); release_sub_crqs(adapter); if (adapter->migrated) { rc = ibmvnic_reenable_crq_queue(adapter); @@ -3290,11 +2914,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_generic_crq *gen_crq = &crq->generic; struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->vdev->dev; + u64 *u64_crq = (u64 *)crq; long rc; netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", - ((unsigned long int *)crq)[0], - ((unsigned long int *)crq)[1]); + (unsigned long int)cpu_to_be64(u64_crq[0]), + (unsigned long int)cpu_to_be64(u64_crq[1])); switch (gen_crq->first) { case IBMVNIC_CRQ_INIT_RSP: switch (gen_crq->cmd) { @@ -3374,9 +2999,14 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, handle_login_rsp(crq, adapter); break; case LOGICAL_LINK_STATE_RSP: - netdev_dbg(netdev, "Got Logical Link State Response\n"); + netdev_dbg(netdev, + "Got Logical Link State Response, state: %d rc: %d\n", + crq->logical_link_state_rsp.link_state, + crq->logical_link_state_rsp.rc.code); adapter->logical_link_state = crq->logical_link_state_rsp.link_state; + adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; + complete(&adapter->init_done); break; case LINK_STATE_INDICATION: netdev_dbg(netdev, "Got Logical Link State Indication\n"); @@ -3401,14 +3031,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, netdev_dbg(netdev, "Got Statistics Response\n"); complete(&adapter->stats_done); break; - case REQUEST_DUMP_SIZE_RSP: - netdev_dbg(netdev, "Got Request Dump Size Response\n"); - handle_dump_size_rsp(crq, adapter); - break; - case REQUEST_DUMP_RSP: - netdev_dbg(netdev, "Got Request Dump Response\n"); - complete(&adapter->fw_done); - break; case QUERY_IP_OFFLOAD_RSP: netdev_dbg(netdev, "Got Query IP offload Response\n"); handle_query_ip_offload_rsp(adapter); @@ -3421,26 +3043,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); - /* We're done with the queries, perform the login */ - send_login(adapter); - break; - case REQUEST_RAS_COMP_NUM_RSP: - netdev_dbg(netdev, "Got Request RAS Comp Num Response\n"); - if (crq->request_ras_comp_num_rsp.rc.code == 10) { - netdev_dbg(netdev, "Request RAS Comp Num not supported\n"); - break; - } - adapter->ras_comp_num = - be32_to_cpu(crq->request_ras_comp_num_rsp.num_components); - handle_request_ras_comp_num_rsp(crq, adapter); - break; - case REQUEST_RAS_COMPS_RSP: - netdev_dbg(netdev, "Got Request RAS Comps Response\n"); - handle_request_ras_comps_rsp(crq, adapter); - break; - case CONTROL_RAS_RSP: - netdev_dbg(netdev, "Got Control RAS Response\n"); - handle_control_ras_rsp(crq, adapter); + complete(&adapter->init_done); break; case COLLECT_FW_TRACE_RSP: netdev_dbg(netdev, "Got Collect firmware trace Response\n"); @@ -3455,12 +3058,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, static irqreturn_t ibmvnic_interrupt(int irq, void *instance) { struct ibmvnic_adapter *adapter = instance; - unsigned long flags; - spin_lock_irqsave(&adapter->crq.lock, flags); - vio_disable_interrupts(adapter->vdev); tasklet_schedule(&adapter->tasklet); - spin_unlock_irqrestore(&adapter->crq.lock, flags); return IRQ_HANDLED; } @@ -3468,32 +3067,23 @@ static void ibmvnic_tasklet(void *data) { struct ibmvnic_adapter *adapter = data; struct ibmvnic_crq_queue *queue = &adapter->crq; - struct vio_dev *vdev = adapter->vdev; union ibmvnic_crq *crq; unsigned long flags; bool done = false; spin_lock_irqsave(&queue->lock, flags); - vio_disable_interrupts(vdev); while (!done) { /* Pull all the valid messages off the CRQ */ while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } - vio_enable_interrupts(vdev); - crq = ibmvnic_next_crq(adapter); - if (crq) { - vio_disable_interrupts(vdev); - ibmvnic_handle_crq(crq, adapter); - crq->generic.first = 0; - } else { - /* remain in tasklet until all - * capabilities responses are received - */ - if (!adapter->wait_capability) - done = true; - } + + /* remain in tasklet until all + * capabilities responses are received + */ + if (!adapter->wait_capability) + done = true; } /* if capabilities CRQ's were sent in this tasklet, the following * tasklet must wait until all responses are received @@ -3547,12 +3137,15 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) return rc; } -static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) +static void release_crq_queue(struct ibmvnic_adapter *adapter) { struct ibmvnic_crq_queue *crq = &adapter->crq; struct vio_dev *vdev = adapter->vdev; long rc; + if (!crq->msgs) + return; + netdev_dbg(adapter->netdev, "Releasing CRQ\n"); free_irq(vdev->irq, adapter); tasklet_kill(&adapter->tasklet); @@ -3563,15 +3156,19 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); + crq->msgs = NULL; } -static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) +static int init_crq_queue(struct ibmvnic_adapter *adapter) { struct ibmvnic_crq_queue *crq = &adapter->crq; struct device *dev = &adapter->vdev->dev; struct vio_dev *vdev = adapter->vdev; int rc, retrc = -ENOMEM; + if (crq->msgs) + return 0; + crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); /* Should we allocate more than one page? */ @@ -3633,48 +3230,10 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); map_failed: free_page((unsigned long)crq->msgs); + crq->msgs = NULL; return retrc; } -/* debugfs for dump */ -static int ibmvnic_dump_show(struct seq_file *seq, void *v) -{ - struct net_device *netdev = seq->private; - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - struct device *dev = &adapter->vdev->dev; - union ibmvnic_crq crq; - - memset(&crq, 0, sizeof(crq)); - crq.request_dump_size.first = IBMVNIC_CRQ_CMD; - crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; - - init_completion(&adapter->fw_done); - ibmvnic_send_crq(adapter, &crq); - wait_for_completion(&adapter->fw_done); - - seq_write(seq, adapter->dump_data, adapter->dump_data_size); - - dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size, - DMA_BIDIRECTIONAL); - - kfree(adapter->dump_data); - - return 0; -} - -static int ibmvnic_dump_open(struct inode *inode, struct file *file) -{ - return single_open(file, ibmvnic_dump_show, inode->i_private); -} - -static const struct file_operations ibmvnic_dump_ops = { - .owner = THIS_MODULE, - .open = ibmvnic_dump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static void handle_crq_init_rsp(struct work_struct *work) { struct ibmvnic_adapter *adapter = container_of(work, @@ -3702,26 +3261,6 @@ static void handle_crq_init_rsp(struct work_struct *work) goto task_failed; } - do { - if (adapter->renegotiate) { - adapter->renegotiate = false; - release_sub_crqs_no_irqs(adapter); - - reinit_completion(&adapter->init_done); - send_cap_queries(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, - timeout)) { - dev_err(dev, "Passive init timeout\n"); - goto task_failed; - } - } - } while (adapter->renegotiate); - rc = init_sub_crq_irqs(adapter); - - if (rc) - goto task_failed; - - netdev->real_num_tx_queues = adapter->req_tx_queues; netdev->mtu = adapter->req_mtu - ETH_HLEN; if (adapter->failover) { @@ -3753,14 +3292,40 @@ static void handle_crq_init_rsp(struct work_struct *work) dev_err(dev, "Passive initialization was not successful\n"); } -static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) +static int ibmvnic_init(struct ibmvnic_adapter *adapter) { + struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); + int rc; + + rc = init_crq_queue(adapter); + if (rc) { + dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); + return rc; + } + + init_completion(&adapter->init_done); + ibmvnic_send_crq_init(adapter); + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { + dev_err(dev, "Initialization sequence timed out\n"); + release_crq_queue(adapter); + return -1; + } + + rc = init_sub_crqs(adapter); + if (rc) { + dev_err(dev, "Initialization of sub crqs failed\n"); + release_crq_queue(adapter); + } + + return rc; +} + +static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) +{ struct ibmvnic_adapter *adapter; struct net_device *netdev; unsigned char *mac_addr_p; - struct dentry *ent; - char buf[17]; /* debugfs name buf */ int rc; dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", @@ -3798,91 +3363,27 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->stats_lock); - rc = ibmvnic_init_crq_queue(adapter); - if (rc) { - dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc); - goto free_netdev; - } - INIT_LIST_HEAD(&adapter->errors); - INIT_LIST_HEAD(&adapter->inflight); spin_lock_init(&adapter->error_list_lock); - spin_lock_init(&adapter->inflight_lock); - - adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats, - sizeof(struct ibmvnic_statistics), - DMA_FROM_DEVICE); - if (dma_mapping_error(&dev->dev, adapter->stats_token)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(&dev->dev, "Couldn't map stats buffer\n"); - rc = -ENOMEM; - goto free_crq; - } - - snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address); - ent = debugfs_create_dir(buf, NULL); - if (!ent || IS_ERR(ent)) { - dev_info(&dev->dev, "debugfs create directory failed\n"); - adapter->debugfs_dir = NULL; - } else { - adapter->debugfs_dir = ent; - ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir, - netdev, &ibmvnic_dump_ops); - if (!ent || IS_ERR(ent)) { - dev_info(&dev->dev, - "debugfs create dump file failed\n"); - adapter->debugfs_dump = NULL; - } else { - adapter->debugfs_dump = ent; - } - } - - init_completion(&adapter->init_done); - ibmvnic_send_crq_init(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) - return 0; - - do { - if (adapter->renegotiate) { - adapter->renegotiate = false; - release_sub_crqs_no_irqs(adapter); - reinit_completion(&adapter->init_done); - send_cap_queries(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, - timeout)) - return 0; - } - } while (adapter->renegotiate); - - rc = init_sub_crq_irqs(adapter); + rc = ibmvnic_init(adapter); if (rc) { - dev_err(&dev->dev, "failed to initialize sub crq irqs\n"); - goto free_debugfs; + free_netdev(netdev); + return rc; } - netdev->real_num_tx_queues = adapter->req_tx_queues; netdev->mtu = adapter->req_mtu - ETH_HLEN; + adapter->is_closed = false; rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - goto free_sub_crqs; + free_netdev(netdev); + return rc; } dev_info(&dev->dev, "ibmvnic registered\n"); return 0; - -free_sub_crqs: - release_sub_crqs(adapter); -free_debugfs: - if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) - debugfs_remove_recursive(adapter->debugfs_dir); -free_crq: - ibmvnic_release_crq_queue(adapter); -free_netdev: - free_netdev(netdev); - return rc; } static int ibmvnic_remove(struct vio_dev *dev) @@ -3892,23 +3393,9 @@ static int ibmvnic_remove(struct vio_dev *dev) unregister_netdev(netdev); + release_resources(adapter); release_sub_crqs(adapter); - - ibmvnic_release_crq_queue(adapter); - - if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) - debugfs_remove_recursive(adapter->debugfs_dir); - - dma_unmap_single(&dev->dev, adapter->stats_token, - sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); - - if (adapter->ras_comps) - dma_free_coherent(&dev->dev, - adapter->ras_comp_num * - sizeof(struct ibmvnic_fw_component), - adapter->ras_comps, adapter->ras_comps_tok); - - kfree(adapter->ras_comp_int); + release_crq_queue(adapter); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); @@ -3933,7 +3420,6 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) adapter = netdev_priv(netdev); ret += PAGE_SIZE; /* the crq message queue */ - ret += adapter->bounce_buffer_size; ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1993b42666f73d659773b6b88bcd8e8552ac97b7..a69979f6f19d14043585036afad0564321711ac2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -518,8 +518,8 @@ struct ibmvnic_change_mac_addr { u8 first; u8 cmd; u8 mac_addr[6]; - struct ibmvnic_rc rc; u8 reserved[4]; + struct ibmvnic_rc rc; } __packed __aligned(8); struct ibmvnic_multicast_ctrl { @@ -733,6 +733,7 @@ enum ibmvnic_capabilities { REQ_MTU = 21, MAX_MULTICAST_FILTERS = 22, VLAN_HEADER_INSERTION = 23, + RX_VLAN_HEADER_INSERTION = 24, MAX_TX_SG_ENTRIES = 25, RX_SG_SUPPORTED = 26, RX_SG_REQUESTED = 27, @@ -772,20 +773,10 @@ enum ibmvnic_commands { ERROR_INDICATION = 0x08, REQUEST_ERROR_INFO = 0x09, REQUEST_ERROR_RSP = 0x89, - REQUEST_DUMP_SIZE = 0x0A, - REQUEST_DUMP_SIZE_RSP = 0x8A, - REQUEST_DUMP = 0x0B, - REQUEST_DUMP_RSP = 0x8B, LOGICAL_LINK_STATE = 0x0C, LOGICAL_LINK_STATE_RSP = 0x8C, REQUEST_STATISTICS = 0x0D, REQUEST_STATISTICS_RSP = 0x8D, - REQUEST_RAS_COMP_NUM = 0x0E, - REQUEST_RAS_COMP_NUM_RSP = 0x8E, - REQUEST_RAS_COMPS = 0x0F, - REQUEST_RAS_COMPS_RSP = 0x8F, - CONTROL_RAS = 0x10, - CONTROL_RAS_RSP = 0x90, COLLECT_FW_TRACE = 0x11, COLLECT_FW_TRACE_RSP = 0x91, LINK_STATE_INDICATION = 0x12, @@ -806,8 +797,6 @@ enum ibmvnic_commands { ACL_CHANGE_INDICATION = 0x1A, ACL_QUERY = 0x1B, ACL_QUERY_RSP = 0x9B, - REQUEST_DEBUG_STATS = 0x1C, - REQUEST_DEBUG_STATS_RSP = 0x9C, QUERY_MAP = 0x1D, QUERY_MAP_RSP = 0x9D, REQUEST_MAP = 0x1E, @@ -880,7 +869,6 @@ struct ibmvnic_tx_buff { int index; int pool_index; bool last_frag; - bool used_bounce; union sub_crq indir_arr[6]; u8 hdr_data[140]; dma_addr_t indir_dma; @@ -925,18 +913,6 @@ struct ibmvnic_error_buff { __be32 error_id; }; -struct ibmvnic_fw_comp_internal { - struct ibmvnic_adapter *adapter; - int num; - struct debugfs_blob_wrapper desc_blob; - int paused; -}; - -struct ibmvnic_inflight_cmd { - union ibmvnic_crq crq; - struct list_head list; -}; - struct ibmvnic_adapter { struct vio_dev *vdev; struct net_device *netdev; @@ -948,12 +924,8 @@ struct ibmvnic_adapter { dma_addr_t ip_offload_ctrl_tok; bool migrated; u32 msg_enable; - void *bounce_buffer; - int bounce_buffer_size; - dma_addr_t bounce_buffer_dma; /* Statistics */ - struct net_device_stats net_stats; struct ibmvnic_statistics stats; dma_addr_t stats_token; struct completion stats_done; @@ -992,26 +964,12 @@ struct ibmvnic_adapter { struct ibmvnic_tx_pool *tx_pool; bool closing; struct completion init_done; + int init_done_rc; struct list_head errors; spinlock_t error_list_lock; - /* debugfs */ - struct dentry *debugfs_dir; - struct dentry *debugfs_dump; struct completion fw_done; - char *dump_data; - dma_addr_t dump_data_token; - int dump_data_size; - int ras_comp_num; - struct ibmvnic_fw_component *ras_comps; - struct ibmvnic_fw_comp_internal *ras_comp_int; - dma_addr_t ras_comps_tok; - struct dentry *ras_comps_ent; - - /* in-flight commands that allocate and/or map memory*/ - struct list_head inflight; - spinlock_t inflight_lock; /* partner capabilities */ u64 min_tx_queues; @@ -1037,6 +995,7 @@ struct ibmvnic_adapter { u64 req_mtu; u64 max_multicast_filters; u64 vlan_header_insertion; + u64 rx_vlan_header_insertion; u64 max_tx_sg_entries; u64 rx_sg_supported; u64 rx_sg_requested; @@ -1052,4 +1011,5 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_xport; struct tasklet_struct tasklet; bool failover; + bool is_closed; }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1349b45f014dd1d5ce29fa9a9f38189bca2cbe3d..1542a2158e962d66915d6330e6d94fb215735b5c 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -235,17 +235,6 @@ config I40E_DCB If unsure, say N. -config I40E_FCOE - bool "Fibre Channel over Ethernet (FCoE)" - default n - depends on I40E && DCB && FCOE - ---help--- - Say Y here if you want to use Fibre Channel over Ethernet (FCoE) - in the driver. This will create new netdev for exclusive FCoE - use with XL710 FCoE offloads enabled. - - If unsure, say N. - config I40EVF tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 975eeb885ca2b52bb6a3f28e2b95d140cdf47777..ec8aa4562cc90a90dff844872278722b24daec3c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -103,104 +103,104 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) -static int e1000_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; if (hw->media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP); - ecmd->advertising = ADVERTISED_TP; + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; if (hw->autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->autoneg_advertised; + advertising |= hw->autoneg_advertised; } - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy_addr; - - if (hw->mac_type == e1000_82543) - ecmd->transceiver = XCVR_EXTERNAL; - else - ecmd->transceiver = XCVR_INTERNAL; - + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); - ecmd->port = PORT_FIBRE; - - if (hw->mac_type >= e1000_82545) - ecmd->transceiver = XCVR_INTERNAL; - else - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_FIBRE; } if (er32(STATUS) & E1000_STATUS_LU) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); - ethtool_cmd_speed_set(ecmd, adapter->link_speed); + cmd->base.speed = adapter->link_speed; /* unfortunately FULL_DUPLEX != DUPLEX_FULL * and HALF_DUPLEX != DUPLEX_HALF */ if (adapter->link_duplex == FULL_DUPLEX) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; /* MDI-X => 1; MDI => 0 */ if ((hw->media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) - ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? ETH_TP_MDI_X : ETH_TP_MDI); else - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; if (hw->mdix == AUTO_ALL_MODES) - ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; else - ecmd->eth_tp_mdix_ctrl = hw->mdix; + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int e1000_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); /* MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ - if (ecmd->eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl) { if (hw->media_type != e1000_media_type_copper) return -EOPNOTSUPP; - if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && - (ecmd->autoneg != AUTONEG_ENABLE)) { + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL; } @@ -209,32 +209,31 @@ static int e1000_set_settings(struct net_device *netdev, while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->autoneg = 1; if (hw->media_type == e1000_media_type_fiber) hw->autoneg_advertised = ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg; + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; else - hw->autoneg_advertised = ecmd->advertising | + hw->autoneg_advertised = advertising | ADVERTISED_TP | ADVERTISED_Autoneg; - ecmd->advertising = hw->autoneg_advertised; } else { - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; /* calling this overrides forced MDI setting */ - if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { clear_bit(__E1000_RESETTING, &adapter->flags); return -EINVAL; } } /* MDI-X => 2; MDI => 1; Auto => 3 */ - if (ecmd->eth_tp_mdix_ctrl) { - if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) hw->mdix = AUTO_ALL_MODES; else - hw->mdix = ecmd->eth_tp_mdix_ctrl; + hw->mdix = cmd->base.eth_tp_mdix_ctrl; } /* reset the link */ @@ -1875,8 +1874,6 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, } static const struct ethtool_ops e1000_ethtool_ops = { - .get_settings = e1000_get_settings, - .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, .get_regs_len = e1000_get_regs_len, .get_regs = e1000_get_regs, @@ -1901,6 +1898,8 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 93fc6c67306bf507dc65579f45ec44917df1d096..bd8b05fe82581f7af25bf2d39a88d6f5e56089f8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -131,7 +131,6 @@ static void e1000_watchdog(struct work_struct *work); static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -static struct net_device_stats *e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); @@ -846,7 +845,6 @@ static const struct net_device_ops e1000_netdev_ops = { .ndo_open = e1000_open, .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, - .ndo_get_stats = e1000_get_stats, .ndo_set_rx_mode = e1000_set_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_tx_timeout = e1000_tx_timeout, @@ -3529,19 +3527,6 @@ static void e1000_reset_task(struct work_struct *work) e1000_reinit_locked(adapter); } -/** - * e1000_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the watchdog. - **/ -static struct net_device_stats *e1000_get_stats(struct net_device *netdev) -{ - /* only return the current stats */ - return &netdev->stats; -} - /** * e1000_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index a29b12e80855b1f13b7f6c86af460dc2e661c5e6..c7c994eb410e6a00e91095e83c9762451d78430d 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -135,7 +135,8 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, - board_pch_spt + board_pch_spt, + board_pch_cnp }; struct e1000_ps_page { @@ -378,18 +379,22 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) * bits to count nanoseconds leaving the rest for fractional nonseconds. */ -#define INCVALUE_96MHz 125 -#define INCVALUE_SHIFT_96MHz 17 -#define INCPERIOD_SHIFT_96MHz 2 -#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) +#define INCVALUE_96MHZ 125 +#define INCVALUE_SHIFT_96MHZ 17 +#define INCPERIOD_SHIFT_96MHZ 2 +#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ) -#define INCVALUE_25MHz 40 -#define INCVALUE_SHIFT_25MHz 18 -#define INCPERIOD_25MHz 1 +#define INCVALUE_25MHZ 40 +#define INCVALUE_SHIFT_25MHZ 18 +#define INCPERIOD_25MHZ 1 -#define INCVALUE_24MHz 125 -#define INCVALUE_SHIFT_24MHz 14 -#define INCPERIOD_24MHz 3 +#define INCVALUE_24MHZ 125 +#define INCVALUE_SHIFT_24MHZ 14 +#define INCPERIOD_24MHZ 3 + +#define INCVALUE_38400KHZ 26 +#define INCVALUE_SHIFT_38400KHZ 19 +#define INCPERIOD_38400KHZ 1 /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with @@ -515,6 +520,7 @@ extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_pch_spt_info; +extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 7aff68a4a4df527d26c50da69d0cad2dd91c2767..e23dbd9190d6b4ff47cd71bd30849676b2689acc 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -117,55 +117,52 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) -static int e1000_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 speed; + u32 speed, supported, advertising; if (hw->phy.media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_TP); + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); if (hw->phy.type == e1000_phy_ife) - ecmd->supported &= ~SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_TP; + supported &= ~SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->phy.autoneg_advertised; + advertising |= hw->phy.autoneg_advertised; } - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy.addr; - ecmd->transceiver = XCVR_INTERNAL; - + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg); + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_FIBRE; } speed = SPEED_UNKNOWN; - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; if (netif_running(netdev)) { if (netif_carrier_ok(netdev)) { speed = adapter->link_speed; - ecmd->duplex = adapter->link_duplex - 1; + cmd->base.duplex = adapter->link_duplex - 1; } } else if (!pm_runtime_suspended(netdev->dev.parent)) { u32 status = er32(STATUS); @@ -179,30 +176,36 @@ static int e1000_get_settings(struct net_device *netdev, speed = SPEED_10; if (status & E1000_STATUS_FD) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; } } - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + cmd->base.speed = speed; + cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; /* MDI-X => 2; MDI =>1; Invalid =>0 */ if ((hw->phy.media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) - ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? + ETH_TP_MDI_X : ETH_TP_MDI; else - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; if (hw->phy.mdix == AUTO_ALL_MODES) - ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; else - ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; if (hw->phy.media_type != e1000_media_type_copper) - ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } @@ -262,12 +265,16 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) return -EINVAL; } -static int e1000_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int ret_val = 0; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); pm_runtime_get_sync(netdev->dev.parent); @@ -285,14 +292,14 @@ static int e1000_set_settings(struct net_device *netdev, * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ - if (ecmd->eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl) { if (hw->phy.media_type != e1000_media_type_copper) { ret_val = -EOPNOTSUPP; goto out; } - if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && - (ecmd->autoneg != AUTONEG_ENABLE)) { + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); ret_val = -EINVAL; goto out; @@ -302,35 +309,35 @@ static int e1000_set_settings(struct net_device *netdev, while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE | ADVERTISED_Autoneg; else - hw->phy.autoneg_advertised = ecmd->advertising | + hw->phy.autoneg_advertised = advertising | ADVERTISED_TP | ADVERTISED_Autoneg; - ecmd->advertising = hw->phy.autoneg_advertised; + advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; } else { - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; /* calling this overrides forced MDI setting */ - if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { ret_val = -EINVAL; goto out; } } /* MDI-X => 2; MDI => 1; Auto => 3 */ - if (ecmd->eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto */ - if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) hw->phy.mdix = AUTO_ALL_MODES; else - hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; } /* reset the link */ @@ -904,19 +911,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + /* fall through */ + case e1000_pch_cnp: mask |= BIT(18); break; default: break; } - if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) + if (mac->type >= e1000_pch_lpt) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if ((mac->type == e1000_pch_lpt) || - (mac->type == e1000_pch_spt)) { + if (mac->type >= e1000_pch_lpt) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; @@ -1525,7 +1533,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rctl, fext_nvm11, tarc0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { fext_nvm11 = er32(FEXTNVM11); fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -1569,6 +1577,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch_spt: + case e1000_pch_cnp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -2313,8 +2322,6 @@ static int e1000e_get_ts_info(struct net_device *netdev, } static const struct ethtool_ops e1000_ethtool_ops = { - .get_settings = e1000_get_settings, - .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, .get_regs_len = e1000_get_regs_len, .get_regs = e1000_get_regs, @@ -2342,6 +2349,8 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_ts_info = e1000e_get_ts_info, .get_eee = e1000e_get_eee, .set_eee = e1000e_set_eee, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, }; void e1000e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 4e733bf1a38e3d24953ed430946129546e3785ff..66bd5060a65b2bd9c6453583a0db67efcb6080f1 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -96,6 +96,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8 #define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3 #define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6 +#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD +#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE +#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB +#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC #define E1000_REVISION_4 4 @@ -118,6 +122,7 @@ enum e1000_mac_type { e1000_pch2lan, e1000_pch_lpt, e1000_pch_spt, + e1000_pch_cnp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index f3aaca743ea3ff3a2250547e47fb8d1c275f0cea..68ea8b4555ab479e43cee9381769d7f8a59ba5e9 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -237,7 +237,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { /* Only unforce SMBus if ME is not active */ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Unforce SMBus mode in PHY */ @@ -333,6 +333,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -474,6 +475,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -607,7 +609,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) nvm->type = e1000_nvm_flash_sw; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { /* in SPT, gfpreg doesn't exist. NVM size is taken from the * STRAP register. This is because in SPT the GbE Flash region * is no longer accessed through the flash registers. Instead, @@ -715,6 +717,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) /* fall-through */ case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -732,7 +735,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { + if (mac->type >= e1000_pch_lpt) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -1399,9 +1402,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ - if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) && link) { + if ((hw->mac.type >= e1000_pch2lan) && link) { u16 speed, duplex; e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); @@ -1412,7 +1413,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ emi_val = 0; - } else if (hw->mac.type == e1000_pch_spt && + } else if (hw->mac.type >= e1000_pch_spt && duplex == FULL_DUPLEX && speed != SPEED_1000) { tipg_reg |= 0xC; emi_val = 1; @@ -1435,8 +1436,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) emi_addr = I217_RX_CONFIG; ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - if (hw->mac.type == e1000_pch_lpt || - hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_lpt) { u16 phy_reg; e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); @@ -1452,7 +1452,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { u16 data; u16 ptr_gap; @@ -1502,7 +1502,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * on power up. * Set the Beacon Duration for I217 to 8 usec */ - if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { u32 mac_reg; mac_reg = er32(FEXTNVM4); @@ -1520,8 +1520,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; } - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1533,15 +1532,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; - /* FEXTNVM6 K1-off workaround */ - if (hw->mac.type == e1000_pch_spt) { - u32 pcieanacfg = er32(PCIEANACFG); + if (hw->mac.type >= e1000_pch_lpt) { u32 fextnvm6 = er32(FEXTNVM6); - if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) - fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; - else - fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + if (hw->mac.type == e1000_pch_spt) { + /* FEXTNVM6 K1-off workaround - for SPT only */ + u32 pcieanacfg = er32(PCIEANACFG); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + } ew32(FEXTNVM6, fextnvm6); } @@ -1640,6 +1642,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2091,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3125,6 +3129,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) switch (hw->mac.type) { case e1000_pch_spt: + case e1000_pch_cnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -3380,7 +3385,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); @@ -3399,7 +3404,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); @@ -3423,7 +3428,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); else @@ -3450,13 +3455,13 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -3527,7 +3532,7 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, only 32 bits access is supported, * so this function should not be called. */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) return -E1000_ERR_NVM; else ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); @@ -3634,8 +3639,7 @@ static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val = -E1000_ERR_NVM; u8 count = 0; - if (offset > ICH_FLASH_LINEAR_ADDR_MASK || - hw->mac.type != e1000_pch_spt) + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -4068,6 +4072,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -4153,7 +4158,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } else { @@ -4173,7 +4178,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); @@ -4185,7 +4190,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * not flash. Therefore, only 32 bit access is * supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -4243,7 +4248,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (hw->mac.type == e1000_pch_spt) { + if (hw->mac.type >= e1000_pch_spt) { if (offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; } @@ -4259,7 +4264,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, /* In SPT, This register is in Lan memory space, not * flash. Therefore, only 32 bit access is supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else @@ -4272,7 +4277,7 @@ static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, * not flash. Therefore, only 32 bit access is * supported */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -4464,14 +4469,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; else hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); else @@ -4894,8 +4899,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -5299,7 +5303,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || (device_id == E1000_DEV_ID_PCH_I218_V3) || - (hw->mac.type == e1000_pch_spt)) { + (hw->mac.type >= e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5865,7 +5869,8 @@ const struct e1000_info e1000_pch2_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, + | FLAG2_HAS_EEE + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, @@ -5914,3 +5919,23 @@ const struct e1000_info e1000_pch_spt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; + +const struct e1000_info e1000_pch_cnp_info = { + .mac = e1000_pch_cnp, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e9af89ad039c6f0e227878b9de85ea7819cd19d9..b3679728caac451354873e60e5114429af0a7fdd 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -71,6 +71,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, [board_pch_spt] = &e1000_pch_spt_info, + [board_pch_cnp] = &e1000_pch_cnp_info, }; struct e1000_reg_info { @@ -1791,8 +1792,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt))) { + if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1872,8 +1872,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt))) { + if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2241,8 +2240,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); - } else if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + } else if (hw->mac.type >= e1000_pch_lpt) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3000,8 +2998,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT Si errata workaround to avoid data corruption */ - if (hw->mac.type == e1000_pch_spt) { + /* SPT and CNP Si errata workaround to avoid data corruption */ + if (hw->mac.type >= e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3497,8 +3495,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) /* Make sure clock is enabled on I217/I218/I219 before checking * the frequency */ - if (((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) && + if ((hw->mac.type >= e1000_pch_lpt) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3511,37 +3508,58 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHZ; + incvalue = INCVALUE_96MHZ; + shift = INCVALUE_SHIFT_96MHZ; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; + break; case e1000_pch_lpt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ - incperiod = INCPERIOD_96MHz; - incvalue = INCVALUE_96MHz; - shift = INCVALUE_SHIFT_96MHz; - adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + incperiod = INCPERIOD_96MHZ; + incvalue = INCVALUE_96MHZ; + shift = INCVALUE_SHIFT_96MHZ; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; } else { /* Stable 25MHz frequency */ - incperiod = INCPERIOD_25MHz; - incvalue = INCVALUE_25MHz; - shift = INCVALUE_SHIFT_25MHz; + incperiod = INCPERIOD_25MHZ; + incvalue = INCVALUE_25MHZ; + shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; } break; case e1000_pch_spt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHz; - incvalue = INCVALUE_24MHz; - shift = INCVALUE_SHIFT_24MHz; + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; adapter->cc.shift = shift; break; } return -EINVAL; + case e1000_pch_cnp: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + } else { + /* Stable 38400KHz frequency */ + incperiod = INCPERIOD_38400KHZ; + incvalue = INCVALUE_38400KHZ; + shift = INCVALUE_SHIFT_38400KHZ; + adapter->cc.shift = shift; + } + break; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ - incperiod = INCPERIOD_25MHz; - incvalue = INCVALUE_25MHz; - shift = INCVALUE_SHIFT_25MHz; + incperiod = INCPERIOD_25MHZ; + incvalue = INCVALUE_25MHZ; + shift = INCVALUE_SHIFT_25MHZ; adapter->cc.shift = shift; break; default: @@ -4032,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_pch_cnp: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4076,7 +4095,7 @@ void e1000e_reset(struct e1000_adapter *adapter) } } - if (hw->mac.type == e1000_pch_spt) + if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -4151,7 +4170,7 @@ void e1000e_reset(struct e1000_adapter *adapter) phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } - if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { u32 reg; /* Fextnvm7 @ 0xe4[2] = 1 */ @@ -4285,7 +4304,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) if (!pci_channel_offline(adapter->pdev)) { if (reset) e1000e_reset(adapter); - else if (hw->mac.type == e1000_pch_spt) + else if (hw->mac.type >= e1000_pch_spt) e1000_flush_desc_rings(adapter); } e1000_clean_tx_ring(adapter->tx_ring); @@ -4973,8 +4992,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + if (hw->mac.type >= e1000_pch_lpt) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6348,8 +6366,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { + } else if (hw->mac.type >= e1000_pch_lpt) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -7508,6 +7525,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 34cc3be0df8ed7a44e56d4187aef2747dd2620bd..b366885487a8cbf884997042edf1303533c79869 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -301,8 +301,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: - if (((hw->mac.type != e1000_pch_lpt) && - (hw->mac.type != e1000_pch_spt)) || + case e1000_pch_cnp: + if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 52b979443cdecd702177ff40bf71c1162a0d0783..689c413b7782f353aa7a261ecd8800b9f7b24ee9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -65,14 +65,16 @@ enum fm10k_ring_state_t { __FM10K_TX_DETECT_HANG, __FM10K_HANG_CHECK_ARMED, __FM10K_TX_XPS_INIT_DONE, + /* This must be last and is used to calculate BITMAP size */ + __FM10K_TX_STATE_SIZE__, }; #define check_for_tx_hang(ring) \ - test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) + test_bit(__FM10K_TX_DETECT_HANG, (ring)->state) #define set_check_for_tx_hang(ring) \ - set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) + set_bit(__FM10K_TX_DETECT_HANG, (ring)->state) #define clear_check_for_tx_hang(ring) \ - clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) + clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state) struct fm10k_tx_buffer { struct fm10k_tx_desc *next_to_watch; @@ -126,7 +128,7 @@ struct fm10k_ring { struct fm10k_rx_buffer *rx_buffer; }; u32 __iomem *tail; - unsigned long state; + DECLARE_BITMAP(state, __FM10K_TX_STATE_SIZE__); dma_addr_t dma; /* phys. address of descriptor ring */ unsigned int size; /* length in bytes */ @@ -249,18 +251,46 @@ struct fm10k_udp_port { /* one work queue for entire driver */ extern struct workqueue_struct *fm10k_workqueue; +/* The following enumeration contains flags which indicate or enable modified + * driver behaviors. To avoid race conditions, the flags are stored in + * a BITMAP in the fm10k_intfc structure. The BITMAP should be accessed using + * atomic *_bit() operations. + */ +enum fm10k_flags_t { + FM10K_FLAG_RESET_REQUESTED, + FM10K_FLAG_RSS_FIELD_IPV4_UDP, + FM10K_FLAG_RSS_FIELD_IPV6_UDP, + FM10K_FLAG_SWPRI_CONFIG, + /* __FM10K_FLAGS_SIZE__ is used to calculate the size of + * interface->flags and must be the last value in this + * enumeration. + */ + __FM10K_FLAGS_SIZE__ +}; + +enum fm10k_state_t { + __FM10K_RESETTING, + __FM10K_DOWN, + __FM10K_SERVICE_SCHED, + __FM10K_SERVICE_REQUEST, + __FM10K_SERVICE_DISABLE, + __FM10K_MBX_LOCK, + __FM10K_LINK_DOWN, + __FM10K_UPDATING_STATS, + /* This value must be last and determines the BITMAP size */ + __FM10K_STATE_SIZE__, +}; + struct fm10k_intfc { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */ struct pci_dev *pdev; - unsigned long state; + DECLARE_BITMAP(state, __FM10K_STATE_SIZE__); + + /* Access flag values using atomic *_bit() operations */ + DECLARE_BITMAP(flags, __FM10K_FLAGS_SIZE__); - u32 flags; -#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) -#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) -#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(3)) int xcast_mode; /* Tx fast path data */ @@ -352,22 +382,12 @@ struct fm10k_intfc { u16 vid; }; -enum fm10k_state_t { - __FM10K_RESETTING, - __FM10K_DOWN, - __FM10K_SERVICE_SCHED, - __FM10K_SERVICE_DISABLE, - __FM10K_MBX_LOCK, - __FM10K_LINK_DOWN, - __FM10K_UPDATING_STATS, -}; - static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) { /* busy loop if we cannot obtain the lock as some calls * such as ndo_set_rx_mode may be made in atomic context */ - while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state)) + while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state)) udelay(20); } @@ -375,12 +395,12 @@ static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface) { /* flush memory to make sure state is correct */ smp_mb__before_atomic(); - clear_bit(__FM10K_MBX_LOCK, &interface->state); + clear_bit(__FM10K_MBX_LOCK, interface->state); } static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface) { - return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state); + return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state); } /* fm10k_test_staterr - test bits in Rx descriptor status and error fields */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 0c84fef750f43a2c9230a580003bbd9b9a5d4ef9..c7234f35f8ff3462dc667a01dafd10208dcc9d82 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -562,7 +562,7 @@ static int fm10k_set_ringparam(struct net_device *netdev, return 0; } - while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + while (test_and_set_bit(__FM10K_RESETTING, interface->state)) usleep_range(1000, 2000); if (!netif_running(interface->netdev)) { @@ -648,7 +648,7 @@ static int fm10k_set_ringparam(struct net_device *netdev, fm10k_up(interface); vfree(temp_ring); clear_reset: - clear_bit(__FM10K_RESETTING, &interface->state); + clear_bit(__FM10K_RESETTING, interface->state); return err; } @@ -716,7 +716,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V4_FLOW: - if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case SCTP_V4_FLOW: @@ -732,7 +733,8 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; case UDP_V6_FLOW: - if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; @@ -764,12 +766,13 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \ - FM10K_FLAG_RSS_FIELD_IPV6_UDP) static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, struct ethtool_rxnfc *nfc) { - u32 flags = interface->flags; + int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags); + int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags); /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -793,10 +796,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP; + clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP; + set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags); break; default: return -EINVAL; @@ -808,10 +813,12 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, return -EINVAL; switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP; + clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP; + set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags); break; default: return -EINVAL; @@ -835,28 +842,41 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, return -EINVAL; } - /* if we changed something we need to update flags */ - if (flags != interface->flags) { + /* If something changed we need to update the MRQC register. Note that + * test_bit() is guaranteed to return strictly 0 or 1, so testing for + * equality is safe. + */ + if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags)) || + (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags))) { struct fm10k_hw *hw = &interface->hw; + bool warn = false; u32 mrqc; - if ((flags & UDP_RSS_FLAGS) && - !(interface->flags & UDP_RSS_FLAGS)) - netif_warn(interface, drv, interface->netdev, - "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); - - interface->flags = flags; - /* Perform hash on these packet types */ mrqc = FM10K_MRQC_IPV4 | FM10K_MRQC_TCP_IPV4 | FM10K_MRQC_IPV6 | FM10K_MRQC_TCP_IPV6; - if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, + interface->flags)) { mrqc |= FM10K_MRQC_UDP_IPV4; - if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + warn = true; + } + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, + interface->flags)) { mrqc |= FM10K_MRQC_UDP_IPV6; + warn = true; + } + + /* If we enable UDP RSS display a warning that this may cause + * fragmented UDP packets to arrive out of order. + */ + if (warn) + netif_warn(interface, drv, interface->netdev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); } @@ -939,7 +959,7 @@ static void fm10k_self_test(struct net_device *dev, memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); - if (FM10K_REMOVED(hw)) { + if (FM10K_REMOVED(hw->hw_addr)) { netif_err(interface, drv, dev, "Interface removed - test blocked\n"); eth_test->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5bb233a9614c1cc172d87a7f40bc68003f4af673..9dffaba85ae6bbd92fd608824ef480f802c452b6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright (c) 2013 - 2016 Intel Corporation."; + "Copyright(c) 2013 - 2017 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); @@ -1175,13 +1175,13 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) /* update completed stats and continue */ tx_ring->tx_stats.tx_done_old = tx_done; /* reset the countdown */ - clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); + clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); return false; } /* make sure it is true for two checks in a row */ - return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); + return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state); } /** @@ -1191,9 +1191,9 @@ bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) { /* Do the reset outside of interrupt context */ - if (!test_bit(__FM10K_DOWN, &interface->state)) { + if (!test_bit(__FM10K_DOWN, interface->state)) { interface->tx_timeout_count++; - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); fm10k_service_event_schedule(interface); } } @@ -1214,7 +1214,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; - if (test_bit(__FM10K_DOWN, &interface->state)) + if (test_bit(__FM10K_DOWN, interface->state)) return true; tx_buffer = &tx_ring->tx_buffer[i]; @@ -1344,7 +1344,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__FM10K_DOWN, &interface->state)) { + !test_bit(__FM10K_DOWN, interface->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 01db688cf5398d434e81c2d4016e5764bbd49820..24f2f6f86f5a3a50929eb52b5dbe6eb08e0e8635 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -737,6 +737,23 @@ static void fm10k_tx_timeout(struct net_device *netdev) } } +/** + * fm10k_host_mbx_ready - Check PF interface's mailbox readiness + * @interface: board private structure + * + * This function checks if the PF interface's mailbox is ready before queueing + * mailbox messages for transmission. This will prevent filling the TX mailbox + * queue when the receiver is not ready. VF interfaces are exempt from this + * check since it will block all PF-VF mailbox messages from being sent from + * the VF to the PF at initialization. + **/ +static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + return (hw->mac.type == fm10k_mac_vf || interface->host_ready); +} + static int fm10k_uc_vlan_unsync(struct net_device *netdev, const unsigned char *uc_addr) { @@ -745,12 +762,15 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err; + int err = -EHOSTDOWN; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; - err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); + if (fm10k_host_mbx_ready(interface)) + err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, + vid, set, 0); + if (err) return err; @@ -766,12 +786,14 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); - int err; + int err = -EHOSTDOWN; /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; - err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); + if (fm10k_host_mbx_ready(interface)) + err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); + if (err) return err; @@ -822,7 +844,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) /* Do not throw an error if the interface is down. We will sync once * we come up */ - if (test_bit(__FM10K_DOWN, &interface->state)) + if (test_bit(__FM10K_DOWN, interface->state)) return 0; fm10k_mbx_lock(interface); @@ -834,9 +856,13 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) goto err_out; } - /* update our base MAC address */ - err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, - vid, set, 0); + /* update our base MAC address if host's mailbox is ready */ + if (fm10k_host_mbx_ready(interface)) + err = hw->mac.ops.update_uc_addr(hw, interface->glort, + hw->mac.addr, vid, set, 0); + else + err = -EHOSTDOWN; + if (err) goto err_out; @@ -907,12 +933,15 @@ static int __fm10k_uc_sync(struct net_device *dev, if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; - /* update table with current entries */ + /* update table with current entries if host's mailbox is ready */ + if (!fm10k_host_mbx_ready(interface)) + return -EHOSTDOWN; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { err = hw->mac.ops.update_uc_addr(hw, glort, addr, - vid, sync, 0); + vid, sync, 0); if (err) return err; } @@ -970,7 +999,10 @@ static int __fm10k_mc_sync(struct net_device *dev, struct fm10k_hw *hw = &interface->hw; u16 vid, glort = interface->glort; - /* update table with current entries */ + /* update table with current entries if host's mailbox is ready */ + if (!fm10k_host_mbx_ready(interface)) + return 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { @@ -1018,8 +1050,10 @@ static void fm10k_set_rx_mode(struct net_device *dev) if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) fm10k_clear_unused_vlans(interface); - /* update xcast mode */ - hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); + /* update xcast mode if host's mailbox is ready */ + if (fm10k_host_mbx_ready(interface)) + hw->mac.ops.update_xcast_mode(hw, interface->glort, + xcast_mode); /* record updated xcast mode state */ interface->xcast_mode = xcast_mode; @@ -1054,8 +1088,10 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) fm10k_mbx_lock(interface); - /* Enable logical port */ - hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); + /* Enable logical port if host's mailbox is ready */ + if (fm10k_host_mbx_ready(interface)) + hw->mac.ops.update_lport_state(hw, glort, + interface->glort_count, true); /* update VLAN table */ hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, @@ -1069,12 +1105,18 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { hw->mac.ops.update_vlan(hw, vid, 0, true); - hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, - vid, true, 0); + + /* Update unicast entries if host's mailbox is ready */ + if (fm10k_host_mbx_ready(interface)) + hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, + vid, true, 0); } - /* update xcast mode before synchronizing addresses */ - hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); + /* update xcast mode before synchronizing addresses if host's mailbox + * is ready + */ + if (fm10k_host_mbx_ready(interface)) + hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); /* synchronize all of the addresses */ __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); @@ -1096,9 +1138,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) fm10k_mbx_lock(interface); - /* clear the logical port state on lower device */ - hw->mac.ops.update_lport_state(hw, interface->glort, - interface->glort_count, false); + /* clear the logical port state on lower device if host's mailbox is + * ready + */ + if (fm10k_host_mbx_ready(interface)) + hw->mac.ops.update_lport_state(hw, interface->glort, + interface->glort_count, false); fm10k_mbx_unlock(interface); @@ -1115,8 +1160,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) * @netdev: network interface device structure * @stats: storage space for 64bit statistics * - * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This - * function replaces fm10k_get_stats for kernels which support it. + * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit + * architectures. */ static void fm10k_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) @@ -1207,7 +1252,7 @@ int fm10k_setup_tc(struct net_device *dev, u8 tc) goto err_open; /* flag to indicate SWPRI has yet to be updated */ - interface->flags |= FM10K_FLAG_SWPRI_CONFIG; + set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags); return 0; err_open: @@ -1226,7 +1271,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return fm10k_setup_tc(dev, tc->tc); + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return fm10k_setup_tc(dev, tc->mqprio->num_tc); } static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, @@ -1317,8 +1364,13 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, fm10k_mbx_lock(interface); glort = l2_accel->dglort + 1 + i; - hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); - hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0); + + if (fm10k_host_mbx_ready(interface)) { + hw->mac.ops.update_xcast_mode(hw, glort, + FM10K_XCAST_MODE_MULTI); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, + 0, true, 0); + } fm10k_mbx_unlock(interface); @@ -1352,8 +1404,13 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) fm10k_mbx_lock(interface); glort = l2_accel->dglort + 1 + i; - hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); - hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0); + + if (fm10k_host_mbx_ready(interface)) { + hw->mac.ops.update_xcast_mode(hw, glort, + FM10K_XCAST_MODE_NONE); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, + 0, false, 0); + } fm10k_mbx_unlock(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e372a582348015355e5406eae522fbda148558bd..3e26d27ad213362bcc473262435740ccf8d2c697 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -19,6 +19,7 @@ */ #include +#include #include #include "fm10k.h" @@ -92,18 +93,29 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface) void fm10k_service_event_schedule(struct fm10k_intfc *interface) { - if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && - !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) + if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) && + !test_and_set_bit(__FM10K_SERVICE_SCHED, interface->state)) { + clear_bit(__FM10K_SERVICE_REQUEST, interface->state); queue_work(fm10k_workqueue, &interface->service_task); + } else { + set_bit(__FM10K_SERVICE_REQUEST, interface->state); + } } static void fm10k_service_event_complete(struct fm10k_intfc *interface) { - WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); + WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); - clear_bit(__FM10K_SERVICE_SCHED, &interface->state); + clear_bit(__FM10K_SERVICE_SCHED, interface->state); + + /* If a service event was requested since we started, immediately + * re-schedule now. This ensures we don't drop a request until the + * next timer event. + */ + if (test_bit(__FM10K_SERVICE_REQUEST, interface->state)) + fm10k_service_event_schedule(interface); } /** @@ -136,7 +148,7 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) if (~value) { interface->hw.hw_addr = interface->uc_addr; netif_device_attach(netdev); - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); netdev_warn(netdev, "PCIe link restored, device now attached\n"); return; } @@ -158,7 +170,7 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) /* put off any impending NetWatchDogTimeout */ netif_trans_update(netdev); - while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + while (test_and_set_bit(__FM10K_RESETTING, interface->state)) usleep_range(1000, 2000); rtnl_lock(); @@ -241,7 +253,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) rtnl_unlock(); - clear_bit(__FM10K_RESETTING, &interface->state); + clear_bit(__FM10K_RESETTING, interface->state); return err; err_open: @@ -253,7 +265,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) rtnl_unlock(); - clear_bit(__FM10K_RESETTING, &interface->state); + clear_bit(__FM10K_RESETTING, interface->state); return err; } @@ -272,11 +284,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface) static void fm10k_reset_subtask(struct fm10k_intfc *interface) { - if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED)) + if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED, + interface->flags)) return; - interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; - netdev_err(interface->netdev, "Reset interface\n"); fm10k_reinit(interface); @@ -295,7 +306,7 @@ static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) int i; /* clear flag indicating update is needed */ - interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG; + clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags); /* these registers are only available on the PF */ if (hw->mac.type != fm10k_mac_pf) @@ -316,14 +327,14 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; s32 err; - if (test_bit(__FM10K_LINK_DOWN, &interface->state)) { + if (test_bit(__FM10K_LINK_DOWN, interface->state)) { interface->host_ready = false; if (time_is_after_jiffies(interface->link_down_event)) return; - clear_bit(__FM10K_LINK_DOWN, &interface->state); + clear_bit(__FM10K_LINK_DOWN, interface->state); } - if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) { + if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) { if (rtnl_trylock()) { fm10k_configure_swpri_map(interface); rtnl_unlock(); @@ -335,7 +346,7 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) err = hw->mac.ops.get_host_state(hw, &interface->host_ready); if (err && time_is_before_jiffies(interface->last_reset)) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); /* free the lock */ fm10k_mbx_unlock(interface); @@ -411,7 +422,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) int i; /* ensure only one thread updates stats at a time */ - if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state)) return; /* do not allow stats update via service task for next second */ @@ -492,7 +503,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; - clear_bit(__FM10K_UPDATING_STATS, &interface->state); + clear_bit(__FM10K_UPDATING_STATS, interface->state); } /** @@ -522,7 +533,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) * controller to flush Tx. */ if (some_tx_pending) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); } /** @@ -532,8 +543,8 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) { /* if interface is down do nothing */ - if (test_bit(__FM10K_DOWN, &interface->state) || - test_bit(__FM10K_RESETTING, &interface->state)) + if (test_bit(__FM10K_DOWN, interface->state) || + test_bit(__FM10K_RESETTING, interface->state)) return; if (interface->host_ready) @@ -563,8 +574,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) int i; /* If we're down or resetting, just bail */ - if (test_bit(__FM10K_DOWN, &interface->state) || - test_bit(__FM10K_RESETTING, &interface->state)) + if (test_bit(__FM10K_DOWN, interface->state) || + test_bit(__FM10K_RESETTING, interface->state)) return; /* rate limit tx hang checks to only once every 2 seconds */ @@ -663,7 +674,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, FM10K_PFVTCTL_FTAG_DESC_ENABLE); /* Initialize XPS */ - if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) && + if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) && ring->q_vector) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, @@ -743,6 +754,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* disable queue to avoid issues while updating state */ rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx)); rxqctl &= ~FM10K_RXQCTL_ENABLE; + fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); fm10k_write_flush(hw); /* possible poll here to verify ring resources have been cleaned */ @@ -863,9 +875,9 @@ static void fm10k_configure_dglort(struct fm10k_intfc *interface) FM10K_MRQC_IPV6 | FM10K_MRQC_TCP_IPV6; - if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags)) mrqc |= FM10K_MRQC_UDP_IPV4; - if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags)) mrqc |= FM10K_MRQC_UDP_IPV6; fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); @@ -980,7 +992,7 @@ void fm10k_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__FM10K_DOWN, &interface->state)) + if (test_bit(__FM10K_DOWN, interface->state)) return; for (i = 0; i < interface->num_q_vectors; i++) @@ -1167,13 +1179,13 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) } if (err == FM10K_ERR_RESET_REQUESTED) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); /* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */ interface->link_down_event = jiffies + (4 * HZ); - set_bit(__FM10K_LINK_DOWN, &interface->state); + set_bit(__FM10K_LINK_DOWN, interface->state); /* reset dglort_map back to no config */ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; @@ -1246,12 +1258,12 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, /* MAC was changed so we need reset */ if (is_valid_ether_addr(hw->mac.perm_addr) && !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr)) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); /* VLAN override was changed, or default VLAN changed */ if ((vlan_override != hw->mac.vlan_override) || (default_vid != hw->mac.default_vid)) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); return 0; } @@ -1325,7 +1337,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, if (!err && hw->swapi.status) { /* force link down for a reasonable delay */ interface->link_down_event = jiffies + (2 * HZ); - set_bit(__FM10K_LINK_DOWN, &interface->state); + set_bit(__FM10K_LINK_DOWN, interface->state); /* reset dglort_map back to no config */ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; @@ -1356,7 +1368,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, /* we need to reset if port count was just updated */ if (dglort_map != hw->mac.dglort_map) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); return 0; } @@ -1395,7 +1407,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, /* we need to reset if default VLAN was just updated */ if (pvid != hw->mac.default_vid) - interface->flags |= FM10K_FLAG_RESET_REQUESTED; + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); hw->mac.default_vid = pvid; @@ -1623,10 +1635,10 @@ void fm10k_up(struct fm10k_intfc *interface) hw->mac.ops.update_int_moderator(hw); /* enable statistics capture again */ - clear_bit(__FM10K_UPDATING_STATS, &interface->state); + clear_bit(__FM10K_UPDATING_STATS, interface->state); /* clear down bit to indicate we are ready to go */ - clear_bit(__FM10K_DOWN, &interface->state); + clear_bit(__FM10K_DOWN, interface->state); /* enable polling cleanups */ fm10k_napi_enable_all(interface); @@ -1660,7 +1672,7 @@ void fm10k_down(struct fm10k_intfc *interface) int err, i = 0, count = 0; /* signal that we are down to the interrupt handler and service task */ - if (test_and_set_bit(__FM10K_DOWN, &interface->state)) + if (test_and_set_bit(__FM10K_DOWN, interface->state)) return; /* call carrier off first to avoid false dev_watchdog timeouts */ @@ -1680,7 +1692,7 @@ void fm10k_down(struct fm10k_intfc *interface) fm10k_update_stats(interface); /* prevent updating statistics while we're down */ - while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state)) usleep_range(1000, 2000); /* skip waiting for TX DMA if we lost PCIe link */ @@ -1849,8 +1861,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, memcpy(interface->rssrk, rss_key, sizeof(rss_key)); /* Start off interface as being down */ - set_bit(__FM10K_DOWN, &interface->state); - set_bit(__FM10K_UPDATING_STATS, &interface->state); + set_bit(__FM10K_DOWN, interface->state); + set_bit(__FM10K_UPDATING_STATS, interface->state); return 0; } @@ -2027,7 +2039,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * must ensure it is disabled since we haven't yet requested the timer * or work item. */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + set_bit(__FM10K_SERVICE_DISABLE, interface->state); err = fm10k_mbx_request_irq(interface); if (err) @@ -2068,7 +2080,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fm10k_iov_configure(pdev, 0); /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + clear_bit(__FM10K_SERVICE_DISABLE, interface->state); return 0; @@ -2106,7 +2118,7 @@ static void fm10k_remove(struct pci_dev *pdev) del_timer_sync(&interface->service_timer); - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + set_bit(__FM10K_SERVICE_DISABLE, interface->state); cancel_work_sync(&interface->service_task); /* free netdev, this may bounce the interrupts due to setup_tc */ @@ -2145,7 +2157,7 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface) * stopped. We stop the watchdog task until after we resume software * activity. */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + set_bit(__FM10K_SERVICE_DISABLE, interface->state); cancel_work_sync(&interface->service_task); fm10k_prepare_for_reset(interface); @@ -2171,10 +2183,10 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) /* force link to stay down for a second to prevent link flutter */ interface->link_down_event = jiffies + (HZ); - set_bit(__FM10K_LINK_DOWN, &interface->state); + set_bit(__FM10K_LINK_DOWN, interface->state); /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + clear_bit(__FM10K_SERVICE_DISABLE, interface->state); fm10k_service_event_schedule(interface); return err; diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 3b3c63e54ed638f03278506b91c0eaa854a6c575..3da482c3d68db4bcf32688dca09a8d1132ad157f 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -28,6 +28,9 @@ # Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver # +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + obj-$(CONFIG_I40E) += i40e.o i40e-objs := i40e_main.o \ @@ -45,4 +48,3 @@ i40e-objs := i40e_main.o \ i40e_virtchnl_pf.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o -i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 82d8040fa418a3cf905d3f27335d1ec177f00b99..cdde3cc28fb59f16234fe5aaf501f569abb62dc4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -56,9 +56,6 @@ #include #include "i40e_type.h" #include "i40e_prototype.h" -#ifdef I40E_FCOE -#include "i40e_fcoe.h" -#endif #include "i40e_client.h" #include "i40e_virtchnl.h" #include "i40e_virtchnl_pf.h" @@ -85,10 +82,6 @@ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 -#ifdef I40E_FCOE -#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ -#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ -#endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ @@ -98,14 +91,6 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) -/* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) -#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) -#define I40E_PRIV_FLAGS_FD_ATR BIT(2) -#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) -#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4) -#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5) - #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 @@ -140,7 +125,6 @@ enum i40e_state_t { __I40E_CONFIG_BUSY, __I40E_CONFIG_DONE, __I40E_DOWN, - __I40E_NEEDS_RESTART, __I40E_SERVICE_SCHED, __I40E_ADMINQ_EVENT_PENDING, __I40E_MDD_EVENT_PENDING, @@ -153,15 +137,28 @@ enum i40e_state_t { __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, __I40E_EMP_RESET_INTR_RECEIVED, - __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, __I40E_BAD_EEPROM, __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, __I40E_RESET_FAILED, - __I40E_PORT_TX_SUSPENDED, + __I40E_PORT_SUSPENDED, __I40E_VF_DISABLE, + /* This must be last as it determines the size of the BITMAP */ + __I40E_STATE_SIZE__, +}; + +/* VSI state flags */ +enum i40e_vsi_state_t { + __I40E_VSI_DOWN, + __I40E_VSI_NEEDS_RESTART, + __I40E_VSI_SYNCING_FILTERS, + __I40E_VSI_OVERFLOW_PROMISC, + __I40E_VSI_REINIT_REQUESTED, + __I40E_VSI_DOWN_REQUESTED, + /* This must be last as it determines the size of the BITMAP */ + __I40E_VSI_STATE_SIZE__, }; enum i40e_interrupt_policy { @@ -202,17 +199,32 @@ enum i40e_fd_stat_idx { #define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL) +/* The following structure contains the data parsed from the user-defined + * field of the ethtool_rx_flow_spec structure. + */ +struct i40e_rx_flow_userdef { + bool flex_filter; + u16 flex_word; + u16 flex_offset; +}; + struct i40e_fdir_filter { struct hlist_node fdir_node; /* filter ipnut set */ u8 flow_type; u8 ip4_proto; /* TX packet view of src and dst */ - __be32 dst_ip[4]; - __be32 src_ip[4]; + __be32 dst_ip; + __be32 src_ip; __be16 src_port; __be16 dst_port; __be32 sctp_v_tag; + + /* Flexible data to match within the packet payload */ + __be16 flex_word; + u16 flex_offset; + bool flex_filter; + /* filter control */ u16 q_index; u8 flex_off; @@ -244,15 +256,85 @@ struct i40e_tc_configuration { }; struct i40e_udp_port_config { - __be16 index; + /* AdminQ command interface expects port number in Host byte order */ + u16 port; u8 type; }; +/* macros related to FLX_PIT */ +#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \ + I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ + I40E_PRTQF_FLX_PIT_FSIZE_MASK) +#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \ + I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) +#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) +#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \ + I40E_FLEX_SET_FSIZE(fsize) | \ + I40E_FLEX_SET_SRC_WORD(src)) + +#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \ + I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \ + I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \ + I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \ + I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) + +#define I40E_MAX_FLEX_SRC_OFFSET 0x1F + +/* macros related to GLQF_ORT */ +#define I40E_ORT_SET_IDX(idx) (((idx) << \ + I40E_GLQF_ORT_PIT_INDX_SHIFT) & \ + I40E_GLQF_ORT_PIT_INDX_MASK) + +#define I40E_ORT_SET_COUNT(count) (((count) << \ + I40E_GLQF_ORT_FIELD_CNT_SHIFT) & \ + I40E_GLQF_ORT_FIELD_CNT_MASK) + +#define I40E_ORT_SET_PAYLOAD(payload) (((payload) << \ + I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) & \ + I40E_GLQF_ORT_FLX_PAYLOAD_MASK) + +#define I40E_ORT_PREP_VAL(idx, count, payload) (I40E_ORT_SET_IDX(idx) | \ + I40E_ORT_SET_COUNT(count) | \ + I40E_ORT_SET_PAYLOAD(payload)) + +#define I40E_L3_GLQF_ORT_IDX 34 +#define I40E_L4_GLQF_ORT_IDX 35 + +/* Flex PIT register index */ +#define I40E_FLEX_PIT_IDX_START_L2 0 +#define I40E_FLEX_PIT_IDX_START_L3 3 +#define I40E_FLEX_PIT_IDX_START_L4 6 + +#define I40E_FLEX_PIT_TABLE_SIZE 3 + +#define I40E_FLEX_DEST_UNUSED 63 + +#define I40E_FLEX_INDEX_ENTRIES 8 + +/* Flex MASK to disable all flexible entries */ +#define I40E_FLEX_INPUT_MASK (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK | \ + I40E_FLEX_52_MASK | I40E_FLEX_53_MASK | \ + I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \ + I40E_FLEX_56_MASK | I40E_FLEX_57_MASK) + +struct i40e_flex_pit { + struct list_head list; + u16 src_offset; + u8 pit_index; +}; + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; struct i40e_hw hw; - unsigned long state; + DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; bool fc_autoneg_status; @@ -262,10 +344,6 @@ struct i40e_pf { u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_req_vfs; /* num VFs requested for this VF */ u16 num_vf_qps; /* num queue pairs per VF */ -#ifdef I40E_FCOE - u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ - u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ -#endif /* I40E_FCOE */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */ @@ -285,7 +363,23 @@ struct i40e_pf { u32 fd_flush_cnt; u32 fd_add_err; u32 fd_atr_cnt; - u32 fd_tcp_rule; + + /* Book-keeping of side-band filter count per flow-type. + * This is used to detect and handle input set changes for + * respective flow-type. + */ + u16 fd_tcp4_filter_cnt; + u16 fd_udp4_filter_cnt; + u16 fd_sctp4_filter_cnt; + u16 fd_ip4_filter_cnt; + + /* Flexible filter table values that need to be programmed into + * hardware, which expects L3 and L4 to be programmed separately. We + * need to ensure that the values are in ascended order and don't have + * duplicates, so we track each L3 and L4 values in separate lists. + */ + struct list_head l3_flex_pit_list; + struct list_head l4_flex_pit_list; struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; u16 pending_udp_bitmap; @@ -307,21 +401,15 @@ struct i40e_pf { #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) -#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) -#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) -#ifdef I40E_FCOE -#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) -#endif /* I40E_FCOE */ -#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) -#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) -#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) #define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) #define I40E_FLAG_DCB_ENABLED BIT_ULL(20) #define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) #define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) +#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23) +#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24) #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) @@ -348,16 +436,13 @@ struct i40e_pf { #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) #define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) #define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) -#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) +#define I40E_FLAG_CLIENT_RESET BIT_ULL(54) #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) +#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) +#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) +#define I40E_FLAG_LEGACY_RX BIT_ULL(58) - /* tracks features that get auto disabled by errors */ - u64 auto_disable_flags; - -#ifdef I40E_FCOE - struct i40e_fcoe fcoe; - -#endif /* I40E_FCOE */ + struct i40e_client_instance *cinst; bool stat_offsets_loaded; struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; @@ -412,8 +497,6 @@ struct i40e_pf { */ u16 dcbx_cap; - u32 fcoe_hmc_filt_num; - u32 fcoe_hmc_cntx_num; struct i40e_filter_control_settings filter_settings; struct ptp_clock *ptp_clock; @@ -517,7 +600,7 @@ struct i40e_vsi { bool stat_offsets_loaded; u32 current_netdev_flags; - unsigned long state; + DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); #define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) #define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; @@ -533,16 +616,10 @@ struct i40e_vsi { struct rtnl_link_stats64 net_stats_offsets; struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; -#ifdef I40E_FCOE - struct i40e_fcoe_stats fcoe_stats; - struct i40e_fcoe_stats fcoe_stats_offsets; - bool fcoe_stat_offsets_loaded; -#endif u32 tx_restart; u32 tx_busy; u64 tx_linearize; u64 tx_force_wb; - u64 tx_lost_interrupt; u32 rx_buf_failed; u32 rx_page_failed; @@ -628,9 +705,6 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ -#define I40E_Q_VECTOR_HUNG_DETECT 0 /* Bit Index for hung detection logic */ - unsigned long hung_detected; /* Set/Reset for hung_detection logic */ - cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; @@ -696,27 +770,49 @@ static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi, } /** - * i40e_rx_is_programming_status - check for programming status descriptor - * @qw: the first quad word of the program status descriptor + * i40e_get_fd_cnt_all - get the total FD filter space available + * @pf: pointer to the PF struct + **/ +static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) +{ + return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; +} + +/** + * i40e_read_fd_input_set - reads value of flow director input set register + * @pf: pointer to the PF struct + * @addr: register addr * - * The value of in the descriptor length field indicate if this - * is a programming status descriptor for flow director or FCoE - * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise - * it is a packet descriptor. + * This function reads value of flow director input set register + * specified by 'addr' (which is specific to flow-type) **/ -static inline bool i40e_rx_is_programming_status(u64 qw) +static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr) { - return I40E_RX_PROG_STATUS_DESC_LENGTH == - (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); + u64 val; + + val = i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1)); + val <<= 32; + val += i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0)); + + return val; } /** - * i40e_get_fd_cnt_all - get the total FD filter space available + * i40e_write_fd_input_set - writes value into flow director input set register * @pf: pointer to the PF struct + * @addr: register addr + * @val: value to be written + * + * This function writes specified value to the register specified by 'addr'. + * This register is input set register based on flow-type. **/ -static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) +static inline void i40e_write_fd_input_set(struct i40e_pf *pf, + u16 addr, u64 val) { - return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; + i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1), + (u32)(val >> 32)); + i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0), + (u32)(val & 0xFFFFFFFFULL)); } /* needed by i40e_ethtool.c */ @@ -725,7 +821,7 @@ void i40e_down(struct i40e_vsi *vsi); extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); -void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired); int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, @@ -773,17 +869,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink, u32 param1); int i40e_vsi_release(struct i40e_vsi *vsi); -#ifdef I40E_FCOE -void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, - struct i40e_vsi_context *ctxt, - u8 enabled_tc, bool is_add); -#endif void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len); int i40e_vsi_start_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings(struct i40e_vsi *vsi); +void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); +int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc); @@ -813,8 +906,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); -int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, - enum i40e_client_type type); +int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id); /** * i40e_irq_dynamic_enable - Enable default interrupt generation settings * @vsi: pointer to a vsi @@ -838,20 +930,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); -#ifdef I40E_FCOE -void i40e_get_netdev_stats_struct(struct net_device *netdev, - struct rtnl_link_stats64 *storage); -int i40e_set_mac(struct net_device *netdev, void *p); -void i40e_set_rx_mode(struct net_device *netdev); -#endif int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); -#ifdef I40E_FCOE -void i40e_tx_timeout(struct net_device *netdev); -int i40e_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid); -int i40e_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid); -#endif int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); @@ -865,25 +944,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); -#ifdef I40E_FCOE -int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, - struct tc_to_netdev *tc); -void i40e_netpoll(struct net_device *netdev); -int i40e_fcoe_enable(struct net_device *netdev); -int i40e_fcoe_disable(struct net_device *netdev); -int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt); -u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf); -void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi); -void i40e_fcoe_vsi_setup(struct i40e_pf *pf); -void i40e_init_pf_fcoe(struct i40e_pf *pf); -int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi); -void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi); -int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb); -void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id); -#endif /* I40E_FCOE */ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); #ifdef CONFIG_I40E_DCB void i40e_dcbnl_flush_apps(struct i40e_pf *pf, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 56fb272989369c22be618049e523ac9f24ca850b..ba04988e0598091695ca119f543f20cf44d2dac0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -850,8 +850,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - usleep_range(1000, 2000); - total_delay++; + udelay(50); + total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index d92aad38afdc5fb0669e3f9d58329685e2af4644..2349fbe04bd251322a93b6c968afac519ad0564b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ +#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 451f48b7540aa0360615599b6681e2d31f2b6554..5eb04114e13f6ec2178c7a71fe3a84f4cf16fbb3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -132,6 +132,10 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + /* LAA */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -139,6 +143,10 @@ enum i40e_admin_queue_opc { /* PXE */ i40e_aqc_opc_clear_pxe_mode = 0x0110, + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, i40e_aqc_opc_add_statistics = 0x0201, @@ -177,10 +185,15 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_control_packet_filter = 0x025B, i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, + /* Pipeline Personalization Profile */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, @@ -563,6 +576,56 @@ struct i40e_aqc_clear_pxe { I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); +/* Set WoL Filter (0x0120) */ + +struct i40e_aqc_set_wol_filter { + __le16 filter_index; +#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ + I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) + +#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 +#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ + I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; +#define I40E_AQC_SET_WOL_FILTER 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 +#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 + __le16 valid_flags; +#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); + +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + +/* Get Wake Reason (0x0121) */ + +struct i40e_aqc_get_wake_reason_completion { + u8 reserved_1[2]; + __le16 wake_reason; +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) + u8 reserved_2[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); + /* Switch configuration commands (0x02xx) */ /* Used by many indirect commands that only pass an seid and a buffer in the @@ -645,6 +708,8 @@ struct i40e_aqc_set_port_parameters { #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -696,6 +761,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); /* Set Switch Configuration (direct 0x0205) */ struct i40e_aqc_set_switch_config { __le16 flags; +/* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; @@ -1369,6 +1435,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); +/* Pipeline Personalization Profile */ +struct i40e_aqc_write_personalization_profile { + u8 flags; + u8 reserved[3]; + __le32 profile_track_id; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); + +struct i40e_aqc_write_ppp_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +struct i40e_aqc_get_applied_profiles { + u8 flags; +#define I40E_AQC_GET_PPP_GET_CONF 0x1 +#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 + u8 rsv[3]; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); + /* DCB 0x03xx*/ /* PFC Ignore (direct 0x0301) @@ -1844,11 +1940,12 @@ struct i40e_aqc_get_link_status { #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 external_power_ability; + u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 +#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index d570219efd9f33b8934fdfe8ad3e256fb78a2e97..c3b81a97558e07c7729a0556a9bb87c01444ca37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -32,16 +32,10 @@ #include "i40e_client.h" static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; - +static struct i40e_client *registered_client; static LIST_HEAD(i40e_devices); static DEFINE_MUTEX(i40e_device_mutex); -static LIST_HEAD(i40e_clients); -static DEFINE_MUTEX(i40e_client_mutex); - -static LIST_HEAD(i40e_client_instances); -static DEFINE_MUTEX(i40e_client_instance_mutex); - static int i40e_client_virtchnl_send(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id, u8 *msg, u16 len); @@ -66,28 +60,6 @@ static struct i40e_ops i40e_lan_ops = { .update_vsi_ctxt = i40e_client_update_vsi_ctxt, }; -/** - * i40e_client_type_to_vsi_type - convert client type to vsi type - * @client_type: the i40e_client type - * - * returns the related vsi type value - **/ -static -enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type) -{ - switch (type) { - case I40E_CLIENT_IWARP: - return I40E_VSI_IWARP; - - case I40E_CLIENT_VMDQ2: - return I40E_VSI_VMDQ2; - - default: - pr_err("i40e: Client type unknown\n"); - return I40E_VSI_TYPE_UNKNOWN; - } -} - /** * i40e_client_get_params - Get the params that can change at runtime * @vsi: the VSI with the message @@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) { - struct i40e_client_instance *cdev; + struct i40e_pf *pf = vsi->back; + struct i40e_client_instance *cdev = pf->cinst; - if (!vsi) + if (!cdev || !cdev->client) + return; + if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance virtual channel receive routine\n"); return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.pf == vsi->back) { - if (!cdev->client || - !cdev->client->ops || - !cdev->client->ops->virtchnl_receive) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance virtual channel receive routine\n"); - continue; - } - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n"); - continue; - } - cdev->client->ops->virtchnl_receive(&cdev->lan_info, - cdev->client, - vf_id, msg, len); - } } - mutex_unlock(&i40e_client_instance_mutex); + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n"); + return; + } + cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client, + vf_id, msg, len); } /** @@ -169,39 +132,30 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) **/ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) { - struct i40e_client_instance *cdev; + struct i40e_pf *pf = vsi->back; + struct i40e_client_instance *cdev = pf->cinst; struct i40e_params params; - if (!vsi) + if (!cdev || !cdev->client) return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.pf == vsi->back) { - if (!cdev->client || - !cdev->client->ops || - !cdev->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance l2_param_change routine\n"); - continue; - } + if (!cdev->client->ops || !cdev->client->ops->l2_param_change) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance l2_param_change routine\n"); + return; + } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); + return; + } memset(¶ms, 0, sizeof(params)); i40e_client_get_params(vsi, ¶ms); - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); - continue; - } - cdev->lan_info.params = params; - cdev->client->ops->l2_param_change(&cdev->lan_info, - cdev->client, - ¶ms); - } - } - mutex_unlock(&i40e_client_instance_mutex); + memcpy(&cdev->lan_info.params, ¶ms, sizeof(struct i40e_params)); + cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, + ¶ms); } /** - * i40e_client_release_qvlist + * i40e_client_release_qvlist - release MSI-X vector mapping for client * @ldev: pointer to L2 context. * **/ @@ -237,26 +191,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev) **/ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) { - struct i40e_client_instance *cdev; + struct i40e_pf *pf = vsi->back; + struct i40e_client_instance *cdev = pf->cinst; - if (!vsi) + if (!cdev || !cdev->client) + return; + if (!cdev->client->ops || !cdev->client->ops->close) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance close routine\n"); return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.netdev == vsi->netdev) { - if (!cdev->client || - !cdev->client->ops || !cdev->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance close routine\n"); - continue; - } - cdev->client->ops->close(&cdev->lan_info, cdev->client, - reset); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_release_qvlist(&cdev->lan_info); - } } - mutex_unlock(&i40e_client_instance_mutex); + cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + i40e_client_release_qvlist(&cdev->lan_info); } /** @@ -268,30 +215,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) **/ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) { - struct i40e_client_instance *cdev; + struct i40e_client_instance *cdev = pf->cinst; - if (!pf) + if (!cdev || !cdev->client) + return; + if (!cdev->client->ops || !cdev->client->ops->vf_reset) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance VF reset routine\n"); + return; + } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n"); return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.pf == pf) { - if (!cdev->client || - !cdev->client->ops || - !cdev->client->ops->vf_reset) { - dev_dbg(&pf->pdev->dev, - "Cannot locate client instance VF reset routine\n"); - continue; - } - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n"); - continue; - } - cdev->client->ops->vf_reset(&cdev->lan_info, - cdev->client, vf_id); - } } - mutex_unlock(&i40e_client_instance_mutex); + cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id); } /** @@ -303,30 +240,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) **/ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) { - struct i40e_client_instance *cdev; + struct i40e_client_instance *cdev = pf->cinst; - if (!pf) + if (!cdev || !cdev->client) + return; + if (!cdev->client->ops || !cdev->client->ops->vf_enable) { + dev_dbg(&pf->pdev->dev, + "Cannot locate client instance VF enable routine\n"); + return; + } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n"); return; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.pf == pf) { - if (!cdev->client || - !cdev->client->ops || - !cdev->client->ops->vf_enable) { - dev_dbg(&pf->pdev->dev, - "Cannot locate client instance VF enable routine\n"); - continue; - } - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n"); - continue; - } - cdev->client->ops->vf_enable(&cdev->lan_info, - cdev->client, num_vfs); - } } - mutex_unlock(&i40e_client_instance_mutex); + cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs); } /** @@ -337,37 +265,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) * If there is a client of the specified type attached to this PF, call * its vf_capable routine **/ -int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, - enum i40e_client_type type) +int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id) { - struct i40e_client_instance *cdev; + struct i40e_client_instance *cdev = pf->cinst; int capable = false; - if (!pf) - return false; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if (cdev->lan_info.pf == pf) { - if (!cdev->client || - !cdev->client->ops || - !cdev->client->ops->vf_capable || - !(cdev->client->type == type)) { - dev_dbg(&pf->pdev->dev, - "Cannot locate client instance VF capability routine\n"); - continue; - } - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n"); - continue; - } - capable = cdev->client->ops->vf_capable(&cdev->lan_info, - cdev->client, - vf_id); - break; - } + if (!cdev || !cdev->client) + goto out; + if (!cdev->client->ops || !cdev->client->ops->vf_capable) { + dev_info(&pf->pdev->dev, + "Cannot locate client instance VF capability routine\n"); + goto out; } - mutex_unlock(&i40e_client_instance_mutex); + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) + goto out; + + capable = cdev->client->ops->vf_capable(&cdev->lan_info, + cdev->client, + vf_id); +out: return capable; } @@ -377,27 +293,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, * @client: pointer to a client struct in the client list. * @existing: if there was already an existing instance * - * Returns cdev ptr on success or if already exists, NULL on failure **/ -static -struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, - struct i40e_client *client, - bool *existing) +static void i40e_client_add_instance(struct i40e_pf *pf) { - struct i40e_client_instance *cdev; + struct i40e_client_instance *cdev = NULL; struct netdev_hw_addr *mac = NULL; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry(cdev, &i40e_client_instances, list) { - if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { - *existing = true; - goto out; - } - } + if (!registered_client || pf->cinst) + return; + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) - goto out; + return; cdev->lan_info.pf = (void *)pf; cdev->lan_info.netdev = vsi->netdev; @@ -417,7 +325,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, if (i40e_client_get_params(vsi, &cdev->lan_info.params)) { kfree(cdev); cdev = NULL; - goto out; + return; } cdev->lan_info.msix_count = pf->num_iwarp_msix; @@ -430,41 +338,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, else dev_err(&pf->pdev->dev, "MAC address list is empty!\n"); - cdev->client = client; - INIT_LIST_HEAD(&cdev->list); - list_add(&cdev->list, &i40e_client_instances); -out: - mutex_unlock(&i40e_client_instance_mutex); - return cdev; + cdev->client = registered_client; + pf->cinst = cdev; } /** * i40e_client_del_instance - removes a client instance from the list * @pf: pointer to the board struct * - * Returns 0 on success or non-0 on error **/ static -int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client) +void i40e_client_del_instance(struct i40e_pf *pf) { - struct i40e_client_instance *cdev, *tmp; - int ret = -ENODEV; - - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) { - if ((cdev->lan_info.pf != pf) || (cdev->client != client)) - continue; - - dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n", - client->name, pf->hw.pf_id, - pf->hw.bus.device, pf->hw.bus.func); - list_del(&cdev->list); - kfree(cdev); - ret = 0; - break; - } - mutex_unlock(&i40e_client_instance_mutex); - return ret; + kfree(pf->cinst); + pf->cinst = NULL; } /** @@ -473,67 +360,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client) **/ void i40e_client_subtask(struct i40e_pf *pf) { + struct i40e_client *client = registered_client; struct i40e_client_instance *cdev; - struct i40e_client *client; - bool existing = false; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int ret = 0; if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) return; pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED; + cdev = pf->cinst; /* If we're down or resetting, just bail */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_DOWN, pf->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; - /* Check client state and instantiate client if client registered */ - mutex_lock(&i40e_client_mutex); - list_for_each_entry(client, &i40e_clients, list) { - /* first check client is registered */ - if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state)) - continue; - - /* Do we also need the LAN VSI to be up, to create instance */ - if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) { - /* check if L2 VSI is up, if not we are not ready */ - if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) - continue; - } else { - dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n", - client->name); - } - - /* Add the client instance to the instance list */ - cdev = i40e_client_add_instance(pf, client, &existing); - if (!cdev) - continue; - - if (!existing) { - dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n", - client->name, pf->hw.pf_id, - pf->hw.bus.bus_id, pf->hw.bus.device, - pf->hw.bus.func); - } + if (!client || !cdev) + return; - mutex_lock(&i40e_client_instance_mutex); - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state)) { - /* Send an Open request to the client */ - if (client->ops && client->ops->open) - ret = client->ops->open(&cdev->lan_info, - client); - if (!ret) { - set_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state); - } else { - /* remove client instance */ - i40e_client_del_instance(pf, client); + /* Here we handle client opens. If the client is down, but + * the netdev is up, then open the client. + */ + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + if (!test_bit(__I40E_VSI_DOWN, vsi->state) && + client->ops && client->ops->open) { + set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + ret = client->ops->open(&cdev->lan_info, client); + if (ret) { + /* Remove failed client instance */ + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state); + i40e_client_del_instance(pf); } } - mutex_unlock(&i40e_client_instance_mutex); + } else { + /* Likewise for client close. If the client is up, but the netdev + * is down, then close the client. + */ + if (test_bit(__I40E_VSI_DOWN, vsi->state) && + client->ops && client->ops->close) { + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); + client->ops->close(&cdev->lan_info, client, false); + i40e_client_release_qvlist(&cdev->lan_info); + } } - mutex_unlock(&i40e_client_mutex); } /** @@ -566,6 +436,12 @@ int i40e_lan_add_device(struct i40e_pf *pf) pf->hw.pf_id, pf->hw.bus.bus_id, pf->hw.bus.device, pf->hw.bus.func); + /* If a client has already been registered, we need to add an instance + * of it to our new LAN device. + */ + if (registered_client) + i40e_client_add_instance(pf); + /* Since in some cases register may have happened before a device gets * added, we can schedule a subtask to go initiate the clients if * they can be launched at probe time. @@ -589,6 +465,9 @@ int i40e_lan_del_device(struct i40e_pf *pf) struct i40e_device *ldev, *tmp; int ret = -ENODEV; + /* First, remove any client instance. */ + i40e_client_del_instance(pf); + mutex_lock(&i40e_device_mutex); list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) { if (ldev->pf == pf) { @@ -601,7 +480,6 @@ int i40e_lan_del_device(struct i40e_pf *pf) break; } } - mutex_unlock(&i40e_device_mutex); return ret; } @@ -610,22 +488,24 @@ int i40e_lan_del_device(struct i40e_pf *pf) * i40e_client_release - release client specific resources * @client: pointer to the registered client * - * Return 0 on success or < 0 on error **/ -static int i40e_client_release(struct i40e_client *client) +static void i40e_client_release(struct i40e_client *client) { - struct i40e_client_instance *cdev, *tmp; + struct i40e_client_instance *cdev; + struct i40e_device *ldev; struct i40e_pf *pf; - int ret = 0; - LIST_HEAD(cdevs_tmp); - - mutex_lock(&i40e_client_instance_mutex); - list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) { - if (strncmp(cdev->client->name, client->name, - I40E_CLIENT_STR_LENGTH)) + mutex_lock(&i40e_device_mutex); + list_for_each_entry(ldev, &i40e_devices, list) { + pf = ldev->pf; + cdev = pf->cinst; + if (!cdev) continue; - pf = (struct i40e_pf *)cdev->lan_info.pf; + + while (test_and_set_bit(__I40E_SERVICE_SCHED, + pf->state)) + usleep_range(500, 1000); + if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (client->ops && client->ops->close) client->ops->close(&cdev->lan_info, client, @@ -637,18 +517,13 @@ static int i40e_client_release(struct i40e_client *client) "Client %s instance for PF id %d closed\n", client->name, pf->hw.pf_id); } - /* delete the client instance from the list */ - list_move(&cdev->list, &cdevs_tmp); + /* delete the client instance */ + i40e_client_del_instance(pf); dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", client->name); + clear_bit(__I40E_SERVICE_SCHED, pf->state); } - mutex_unlock(&i40e_client_instance_mutex); - - /* free the client device and release its vsi */ - list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) { - kfree(cdev); - } - return ret; + mutex_unlock(&i40e_device_mutex); } /** @@ -664,6 +539,7 @@ static void i40e_client_prepare(struct i40e_client *client) mutex_lock(&i40e_device_mutex); list_for_each_entry(ldev, &i40e_devices, list) { pf = ldev->pf; + i40e_client_add_instance(pf); /* Start the client subtask */ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; i40e_service_event_schedule(pf); @@ -785,15 +661,15 @@ static void i40e_client_request_reset(struct i40e_info *ldev, switch (reset_level) { case I40E_CLIENT_RESET_LEVEL_PF: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case I40E_CLIENT_RESET_LEVEL_CORE: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; default: dev_warn(&pf->pdev->dev, - "Client %s instance for PF id %d request an unsupported reset: %d.\n", - client->name, pf->hw.pf_id, reset_level); + "Client for PF id %d requested an unsupported reset: %d.\n", + pf->hw.pf_id, reset_level); break; } @@ -852,8 +728,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, } else { update = false; dev_warn(&pf->pdev->dev, - "Client %s instance for PF id %d request an unsupported Config: %x.\n", - client->name, pf->hw.pf_id, flag); + "Client for PF id %d request an unsupported Config: %x.\n", + pf->hw.pf_id, flag); } if (update) { @@ -878,7 +754,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, int i40e_register_client(struct i40e_client *client) { int ret = 0; - enum i40e_vsi_type vsi_type; if (!client) { ret = -EIO; @@ -891,11 +766,9 @@ int i40e_register_client(struct i40e_client *client) goto out; } - mutex_lock(&i40e_client_mutex); - if (i40e_client_is_registered(client)) { + if (registered_client) { pr_info("i40e: Client %s has already been registered!\n", client->name); - mutex_unlock(&i40e_client_mutex); ret = -EEXIST; goto out; } @@ -908,22 +781,11 @@ int i40e_register_client(struct i40e_client *client) client->version.major, client->version.minor, client->version.build, i40e_client_interface_version_str); - mutex_unlock(&i40e_client_mutex); ret = -EIO; goto out; } - vsi_type = i40e_client_type_to_vsi_type(client->type); - if (vsi_type == I40E_VSI_TYPE_UNKNOWN) { - pr_info("i40e: Failed to register client %s due to unknown client type %d\n", - client->name, client->type); - mutex_unlock(&i40e_client_mutex); - ret = -EIO; - goto out; - } - list_add(&client->list, &i40e_clients); - set_bit(__I40E_CLIENT_REGISTERED, &client->state); - mutex_unlock(&i40e_client_mutex); + registered_client = client; i40e_client_prepare(client); @@ -943,29 +805,21 @@ int i40e_unregister_client(struct i40e_client *client) { int ret = 0; - /* When a unregister request comes through we would have to send - * a close for each of the client instances that were opened. - * client_release function is called to handle this. - */ - mutex_lock(&i40e_client_mutex); - if (!client || i40e_client_release(client)) { - ret = -EIO; - goto out; - } - - /* TODO: check if device is in reset, or if that matters? */ - if (!i40e_client_is_registered(client)) { + if (registered_client != client) { pr_info("i40e: Client %s has not been registered\n", client->name); ret = -ENODEV; goto out; } - clear_bit(__I40E_CLIENT_REGISTERED, &client->state); - list_del(&client->list); - pr_info("i40e: Unregistered client %s with return code %d\n", - client->name, ret); + registered_client = NULL; + /* When a unregister request comes through we would have to send + * a close for each of the client instances that were opened. + * client_release function is called to handle this. + */ + i40e_client_release(client); + + pr_info("i40e: Unregistered client %s\n", client->name); out: - mutex_unlock(&i40e_client_mutex); return ret; } EXPORT_SYMBOL(i40e_unregister_client); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index 528bd79b05fecc68d981ea08b144d9898c6aaaa0..15b21a5315b597a0ee4cd933fc10bfa5cec0a510 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -57,11 +57,6 @@ enum i40e_client_instance_state { __I40E_CLIENT_INSTANCE_OPENED, }; -enum i40e_client_type { - I40E_CLIENT_IWARP, - I40E_CLIENT_VMDQ2 -}; - struct i40e_ops; struct i40e_client; @@ -214,7 +209,8 @@ struct i40e_client { u32 flags; #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - enum i40e_client_type type; + u8 type; +#define I40E_CLIENT_IWARP 0 const struct i40e_client_ops *ops; /* client ops provided by the client */ }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ece57d6a6e232f93ca28f493f91e27428b7cb41e..24f020655291510b7c798904927f66f9e731fa45 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); } -#ifdef I40E_FCOE - -/** - * i40e_get_san_mac_addr - get SAN MAC address - * @hw: pointer to the HW structure - * @mac_addr: pointer to SAN MAC address - * - * Reads the adapter's SAN MAC address from NVM - **/ -i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr) -{ - struct i40e_aqc_mac_address_read_data addrs; - i40e_status status; - u16 flags = 0; - - status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); - if (status) - return status; - - if (flags & I40E_AQC_SAN_ADDR_VALID) - ether_addr_copy(mac_addr, addrs.pf_san_mac); - else - status = I40E_ERR_INVALID_MAC_ADDR; - - return status; -} -#endif /** * i40e_read_pba_string - Reads part number string from EEPROM @@ -4990,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) int retry = 5; u32 val = 0; - use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); @@ -5049,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) bool use_register; int retry = 5; - use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40e_aq_rx_ctl_write_register(hw, reg_addr, @@ -5065,3 +5042,215 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) if (status || use_register) wr32(hw, reg_addr, reg_val); } + +/** + * i40e_aq_write_ppp - Write pipeline personalization profile (ppp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @track_id: package tracking id + * @error_offset: returns error offset + * @error_info: returns error information + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_write_personalization_profile *cmd = + (struct i40e_aqc_write_personalization_profile *) + &desc.params.raw; + struct i40e_aqc_write_ppp_resp *resp; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_write_personalization_profile); + + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(buff_size); + + cmd->profile_track_id = cpu_to_le32(track_id); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_applied_profiles *cmd = + (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_personalization_profile_list); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(buff_size); + + cmd->flags = flags; + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_find_segment_in_package + * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + **/ +struct i40e_generic_seg_header * +i40e_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_hdr) +{ + struct i40e_generic_seg_header *segment; + u32 i; + + /* Search all package segments for the requested segment type */ + for (i = 0; i < pkg_hdr->segment_count; i++) { + segment = + (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + + pkg_hdr->segment_offset[i]); + + if (segment->type == segment_type) + return segment; + } + + return NULL; +} + +/** + * i40e_write_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be downloaded + * @track_id: package tracking id + * + * Handles the download of a complete package. + */ +enum i40e_status_code +i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + i40e_status status = 0; + struct i40e_section_table *sec_tbl; + struct i40e_profile_section_header *sec = NULL; + u32 dev_cnt; + u32 vendor_dev_id; + u32 *nvm; + u32 section_size = 0; + u32 offset = 0, info = 0; + u32 i; + + if (!track_id) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); + return I40E_NOT_SUPPORTED; + } + + dev_cnt = profile->device_table_count; + + for (i = 0; i < dev_cnt; i++) { + vendor_dev_id = profile->device_table[i].vendor_dev_id; + if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) + if (hw->device_id == (vendor_dev_id & 0xFFFF)) + break; + } + if (i == dev_cnt) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + nvm = (u32 *)&profile->device_table[dev_cnt]; + sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; + + for (i = 0; i < sec_tbl->section_count; i++) { + sec = (struct i40e_profile_section_header *)((u8 *)profile + + sec_tbl->section_offset[i]); + + /* Skip 'AQ', 'note' and 'name' sections */ + if (sec->section.type != SECTION_TYPE_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write profile */ + status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: offset %d, info %d", + offset, info); + break; + } + } + return status; +} + +/** + * i40e_add_pinfo_to_list + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package + * @profile_info_sec: buffer for information section + * @track_id: package tracking id + * + * Register a profile to the list of loaded profiles. + */ +enum i40e_status_code +i40e_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + i40e_status status = 0; + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_info *pinfo; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_PPP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + + status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 267ad2588255deeb196340da7788b10bc89d1f3e..8f326f87a815bf8fd4ea1fe2c464fa9df19b4660 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -158,9 +158,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, - " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", - vsi->state, vsi->flags, - vsi->netdev_registered, vsi->current_netdev_flags); + " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); + for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++) + dev_info(&pf->pdev->dev, + " state[%d] = %08lx\n", + i, vsi->state[i]); if (vsi == pf->vsi[pf->lan_vsi]) dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, @@ -174,7 +177,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) } dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n", vsi->active_filters, vsi->promisc_threshold, - (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? + (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ? "ON" : "OFF")); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, @@ -384,6 +387,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n", vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc); dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); + if (vsi->type == I40E_VSI_SRIOV) + dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id); dev_info(&pf->pdev->dev, " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", vsi->info.valid_sections, vsi->info.switch_id); @@ -484,25 +489,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->bw_ets_limit_credits[i], vsi->bw_ets_max_quanta[i]); } -#ifdef I40E_FCOE - if (vsi->type == I40E_VSI_FCOE) { - dev_info(&pf->pdev->dev, - " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n", - vsi->fcoe_stats.rx_fcoe_packets, - vsi->fcoe_stats.rx_fcoe_dwords, - vsi->fcoe_stats.rx_fcoe_dropped); - dev_info(&pf->pdev->dev, - " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n", - vsi->fcoe_stats.tx_fcoe_packets, - vsi->fcoe_stats.tx_fcoe_dwords); - dev_info(&pf->pdev->dev, - " fcoe_stats: bad_crc = %llu, last_error = %llu\n", - vsi->fcoe_stats.fcoe_bad_fccrc, - vsi->fcoe_stats.fcoe_last_error); - dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n", - vsi->fcoe_stats.fcoe_ddp_count); - } -#endif } /** @@ -713,6 +699,47 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) } } +/** + * i40e_dbg_dump_vf - dump VF info + * @pf: the i40e_pf created in command write + * @vf_id: the vf_id from the user + **/ +static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) +{ + struct i40e_vf *vf; + struct i40e_vsi *vsi; + + if (!pf->num_alloc_vfs) { + dev_info(&pf->pdev->dev, "no VFs allocated\n"); + } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) { + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", + vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); + dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", + vf->num_mdd_events, + vf->num_invalid_msgs, + vf->num_valid_msgs); + } else { + dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); + } +} + +/** + * i40e_dbg_dump_vf_all - dump VF info for all VFs + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) +{ + int i; + + if (!pf->num_alloc_vfs) + dev_info(&pf->pdev->dev, "no VFs enabled!\n"); + else + for (i = 0; i < pf->num_alloc_vfs; i++) + i40e_dbg_dump_vf(pf, i); +} + #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -731,6 +758,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, struct i40e_vsi *vsi; int vsi_seid; int veb_seid; + int vf_id; int cnt; /* don't allow partial writes */ @@ -933,6 +961,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp, i40e_dbg_dump_veb_seid(pf, vsi_seid); else i40e_dbg_dump_veb_all(pf); + } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) { + cnt = sscanf(&cmd_buf[7], "%i", &vf_id); + if (cnt > 0) + i40e_dbg_dump_vf(pf, vf_id); + else + i40e_dbg_dump_vf_all(pf); } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { int ring_id, desc_n; if (strncmp(&cmd_buf[10], "rx", 2) == 0) { @@ -1128,6 +1162,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); + dev_info(&pf->pdev->dev, "dump vf [vf_id]\n"); dev_info(&pf->pdev->dev, "dump debug fwdata \n"); } @@ -1674,7 +1709,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, } else if (!vsi->netdev) { dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", vsi_seid); - } else if (test_bit(__I40E_DOWN, &vsi->state)) { + } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) { dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", vsi_seid); } else if (rtnl_trylock()) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a22e26200bccb1a7e79716a7429e0eb7fe700ecc..7a8eb486b9ea554421fc7f62929a973b9c6efad4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -89,7 +89,6 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), - I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; @@ -162,19 +161,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), }; -#ifdef I40E_FCOE -static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { - I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc), - I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped), - I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets), - I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords), - I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count), - I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error), - I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets), - I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords), -}; - -#endif /* I40E_FCOE */ #define I40E_QUEUE_STATS_LEN(n) \ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ * 2 /* Tx and Rx together */ \ @@ -182,17 +168,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#ifdef I40E_FCOE -#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ - I40E_FCOE_STATS_LEN + \ - I40E_MISC_STATS_LEN + \ - I40E_QUEUE_STATS_LEN((n))) -#else #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ I40E_MISC_STATS_LEN + \ I40E_QUEUE_STATS_LEN((n))) -#endif /* I40E_FCOE */ #define I40E_PFC_STATS_LEN ( \ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ @@ -228,22 +206,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) -static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { - "MFP", - "LinkPolling", - "flow-director-atr", - "veb-stats", - "hw-atr-eviction", +struct i40e_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u64 flag; + bool read_only; }; -#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) +#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { + /* NOTE: MFP setting cannot be changed */ + I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), + I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), + I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), + I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), + I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), +}; + +#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) /* Private flags with a global effect, restricted to PF 0 */ -static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = { - "vf-true-promisc-support", +static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { + I40E_PRIV_FLAG("vf-true-promisc-support", + I40E_FLAG_TRUE_PROMISC_SUPPORT, 0), }; -#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings) +#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) /** * i40e_partition_setting_complaint - generic complaint for MFP restriction @@ -387,7 +380,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, * **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, - struct ethtool_cmd *ecmd, + struct ethtool_link_ksettings *cmd, struct net_device *netdev, struct i40e_pf *pf) { @@ -395,90 +388,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, u32 link_speed = hw_link_info->link_speed; u32 e_advertising = 0x0; u32 e_supported = 0x0; + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_40000baseCR4_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; + supported = SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + advertising = ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; break; case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: - ecmd->supported = SUPPORTED_40000baseCR4_Full; + supported = SUPPORTED_40000baseCR4_Full; break; case I40E_PHY_TYPE_40GBASE_SR4: - ecmd->supported = SUPPORTED_40000baseSR4_Full; + supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: - ecmd->supported = SUPPORTED_40000baseLR4_Full; + supported = SUPPORTED_40000baseLR4_Full; break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; if (hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_SX || hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_LX) { - ecmd->supported |= SUPPORTED_1000baseT_Full; + supported |= SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; + advertising |= ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg; + supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - ecmd->advertising |= ADVERTISED_10000baseT_Full; + advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_10GBASE_AOC: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = SUPPORTED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = SUPPORTED_10000baseT_Full; break; case I40E_PHY_TYPE_SGMII: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; + supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { - ecmd->supported |= SUPPORTED_100baseT_Full; + supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; } break; case I40E_PHY_TYPE_40GBASE_KR4: @@ -486,25 +485,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_1000BASE_KX: - ecmd->supported |= SUPPORTED_40000baseKR4_Full | - SUPPORTED_20000baseKR2_Full | - SUPPORTED_10000baseKR_Full | - SUPPORTED_10000baseKX4_Full | - SUPPORTED_1000baseKX_Full | - SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_40000baseKR4_Full | - ADVERTISED_20000baseKR2_Full | - ADVERTISED_10000baseKR_Full | - ADVERTISED_10000baseKX4_Full | - ADVERTISED_1000baseKX_Full | - ADVERTISED_Autoneg; + supported |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg; + advertising |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_20000baseKR2_Full | + ADVERTISED_10000baseKR_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_1000baseKX_Full | + ADVERTISED_Autoneg; break; case I40E_PHY_TYPE_25GBASE_KR: case I40E_PHY_TYPE_25GBASE_CR: case I40E_PHY_TYPE_25GBASE_SR: case I40E_PHY_TYPE_25GBASE_LR: - ecmd->supported = SUPPORTED_Autoneg; - ecmd->advertising = ADVERTISED_Autoneg; + supported = SUPPORTED_Autoneg; + advertising = ADVERTISED_Autoneg; /* TODO: add speeds when ethtool is ready to support*/ break; default: @@ -520,38 +519,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, i40e_phy_type_to_ethtool(pf, &e_supported, &e_advertising); - ecmd->supported = ecmd->supported & e_supported; - ecmd->advertising = ecmd->advertising & e_advertising; + supported = supported & e_supported; + advertising = advertising & e_advertising; /* Set speed and duplex */ switch (link_speed) { case I40E_LINK_SPEED_40GB: - ethtool_cmd_speed_set(ecmd, SPEED_40000); + cmd->base.speed = SPEED_40000; break; case I40E_LINK_SPEED_25GB: #ifdef SPEED_25000 - ethtool_cmd_speed_set(ecmd, SPEED_25000); + cmd->base.speed = SPEED_25000; #else netdev_info(netdev, "Speed is 25G, display not supported by this version of ethtool.\n"); #endif break; case I40E_LINK_SPEED_20GB: - ethtool_cmd_speed_set(ecmd, SPEED_20000); + cmd->base.speed = SPEED_20000; break; case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; break; case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; break; default: break; } - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); } /** @@ -562,18 +566,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, - struct ethtool_cmd *ecmd, + struct ethtool_link_ksettings *cmd, struct i40e_pf *pf) { + u32 supported, advertising; + /* link is down and the driver needs to fall back on * supported phy types to figure out what info to display */ - i40e_phy_type_to_ethtool(pf, &ecmd->supported, - &ecmd->advertising); + i40e_phy_type_to_ethtool(pf, &supported, &advertising); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); /* With no link speed and duplex are unknown */ - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } /** @@ -583,74 +593,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, * * Reports speed/duplex settings based on media_type **/ -static int i40e_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int i40e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + u32 advertising; if (link_up) - i40e_get_settings_link_up(hw, ecmd, netdev, pf); + i40e_get_settings_link_up(hw, cmd, netdev, pf); else - i40e_get_settings_link_down(hw, ecmd, pf); + i40e_get_settings_link_down(hw, cmd, pf); /* Now set the settings that don't rely on link being up/down */ /* Set autoneg settings */ - ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); switch (hw->phy.media_type) { case I40E_MEDIA_TYPE_BACKPLANE: - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_Backplane; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_Backplane; - ecmd->port = PORT_NONE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, + Backplane); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Backplane); + cmd->base.port = PORT_NONE; break; case I40E_MEDIA_TYPE_BASET: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + cmd->base.port = PORT_TP; break; case I40E_MEDIA_TYPE_DA: case I40E_MEDIA_TYPE_CX4: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_DA; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_DA; break; case I40E_MEDIA_TYPE_FIBER: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + cmd->base.port = PORT_FIBRE; break; case I40E_MEDIA_TYPE_UNKNOWN: default: - ecmd->port = PORT_OTHER; + cmd->base.port = PORT_OTHER; break; } - /* Set transceiver */ - ecmd->transceiver = XCVR_EXTERNAL; - /* Set flow control settings */ - ecmd->supported |= SUPPORTED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); switch (hw->fc.requested_mode) { case I40E_FC_FULL: - ecmd->advertising |= ADVERTISED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); break; case I40E_FC_TX_PAUSE: - ecmd->advertising |= ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; case I40E_FC_RX_PAUSE: - ecmd->advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; default: - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + ethtool_convert_link_mode_to_legacy_u32( + &advertising, cmd->link_modes.advertising); + + advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.advertising, advertising); break; } @@ -664,8 +685,8 @@ static int i40e_get_settings(struct net_device *netdev, * * Set speed/duplex per media_types advertised/forced **/ -static int i40e_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int i40e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; @@ -673,12 +694,15 @@ static int i40e_set_settings(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; - struct ethtool_cmd safe_ecmd; + struct ethtool_link_ksettings safe_cmd; + struct ethtool_link_ksettings copy_cmd; i40e_status status = 0; bool change = false; + int timeout = 50; int err = 0; - u8 autoneg; + u32 autoneg; u32 advertise; + u32 tmp; /* Changing port settings is not supported if this isn't the * port's controlling PF @@ -706,33 +730,47 @@ static int i40e_set_settings(struct net_device *netdev, return -EOPNOTSUPP; } + /* copy the cmd to copy_cmd to avoid modifying the origin */ + memcpy(©_cmd, cmd, sizeof(struct ethtool_link_ksettings)); + /* get our own copy of the bits to check against */ - memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); - i40e_get_settings(netdev, &safe_ecmd); + memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings)); + i40e_get_link_ksettings(netdev, &safe_cmd); - /* save autoneg and speed out of ecmd */ - autoneg = ecmd->autoneg; - advertise = ecmd->advertising; + /* save autoneg and speed out of cmd */ + autoneg = cmd->base.autoneg; + ethtool_convert_link_mode_to_legacy_u32(&advertise, + cmd->link_modes.advertising); /* set autoneg and speed back to what they currently are */ - ecmd->autoneg = safe_ecmd.autoneg; - ecmd->advertising = safe_ecmd.advertising; + copy_cmd.base.autoneg = safe_cmd.base.autoneg; + ethtool_convert_link_mode_to_legacy_u32( + &tmp, safe_cmd.link_modes.advertising); + ethtool_convert_legacy_u32_to_link_mode( + copy_cmd.link_modes.advertising, tmp); + + copy_cmd.base.cmd = safe_cmd.base.cmd; - ecmd->cmd = safe_ecmd.cmd; - /* If ecmd and safe_ecmd are not the same now, then they are + /* If copy_cmd and safe_cmd are not the same now, then they are * trying to set something that we do not support */ - if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) + if (memcmp(©_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings))) return -EOPNOTSUPP; - while (test_bit(__I40E_CONFIG_BUSY, &vsi->state)) + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; usleep_range(1000, 2000); + } /* Get the current phy config */ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); - if (status) - return -EAGAIN; + if (status) { + err = -EAGAIN; + goto done; + } /* Copy abilities to config in case autoneg is not * set below @@ -745,9 +783,11 @@ static int i40e_set_settings(struct net_device *netdev, /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { /* If autoneg is not supported, return error */ - if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + if (!ethtool_link_ksettings_test_link_mode( + &safe_cmd, supported, Autoneg)) { netdev_info(netdev, "Autoneg not supported on this phy\n"); - return -EINVAL; + err = -EINVAL; + goto done; } /* Autoneg is allowed to change */ config.abilities = abilities.abilities | @@ -760,11 +800,13 @@ static int i40e_set_settings(struct net_device *netdev, /* If autoneg is supported 10GBASE_T is the only PHY * that can disable it, so otherwise return error */ - if (safe_ecmd.supported & SUPPORTED_Autoneg && + if (ethtool_link_ksettings_test_link_mode( + &safe_cmd, supported, Autoneg) && hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); - return -EINVAL; + err = -EINVAL; + goto done; } /* Autoneg is allowed to change */ config.abilities = abilities.abilities & @@ -773,8 +815,12 @@ static int i40e_set_settings(struct net_device *netdev, } } - if (advertise & ~safe_ecmd.supported) - return -EINVAL; + ethtool_convert_link_mode_to_legacy_u32(&tmp, + safe_cmd.link_modes.supported); + if (advertise & ~tmp) { + err = -EINVAL; + goto done; + } if (advertise & ADVERTISED_100baseT_Full) config.link_speed |= I40E_LINK_SPEED_100MB; @@ -830,7 +876,8 @@ static int i40e_set_settings(struct net_device *netdev, netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); - return -EAGAIN; + err = -EAGAIN; + goto done; } status = i40e_update_link_info(hw); @@ -843,6 +890,9 @@ static int i40e_set_settings(struct net_device *netdev, netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); } +done: + clear_bit(__I40E_CONFIG_BUSY, pf->state); + return err; } @@ -937,7 +987,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, } /* If we have link and don't have autoneg */ - if (!test_bit(__I40E_DOWN, &pf->state) && + if (!test_bit(__I40E_DOWN, pf->state) && !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); @@ -989,10 +1039,10 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } - if (!test_bit(__I40E_DOWN, &pf->state)) { + if (!test_bit(__I40E_DOWN, pf->state)) { /* Give it a little more time to try to come back */ msleep(75); - if (!test_bit(__I40E_DOWN, &pf->state)) + if (!test_bit(__I40E_DOWN, pf->state)) return i40e_nway_reset(netdev); } @@ -1089,8 +1139,8 @@ static int i40e_get_eeprom(struct net_device *netdev, /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; - else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); @@ -1165,6 +1215,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev) struct i40e_hw *hw = &np->vsi->back->hw; u32 val; +#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF + if (hw->mac.type == I40E_MAC_X722) { + val = X722_EEPROM_SCOPE_LIMIT + 1; + return val; + } val = (rd32(hw, I40E_GLPCI_LBARCTRL) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; @@ -1191,8 +1246,8 @@ static int i40e_set_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) errno = -EINVAL; - else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) errno = -EBUSY; else ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); @@ -1252,6 +1307,7 @@ static int i40e_set_ringparam(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u32 new_rx_count, new_tx_count; + int timeout = 50; int i, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) @@ -1276,8 +1332,12 @@ static int i40e_set_ringparam(struct net_device *netdev, (new_rx_count == vsi->rx_rings[0]->count)) return 0; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; usleep_range(1000, 2000); + } if (!netif_running(vsi->netdev)) { /* simple case - set for the next time the netdev is started */ @@ -1425,7 +1485,7 @@ static int i40e_set_ringparam(struct net_device *netdev, } done: - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); return err; } @@ -1483,13 +1543,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } -#ifdef I40E_FCOE - for (j = 0; j < I40E_FCOE_STATS_LEN; j++) { - p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset; - data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } -#endif rcu_read_lock(); for (j = 0; j < vsi->num_queue_pairs; j++) { tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); @@ -1577,13 +1630,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, i40e_gstrings_misc_stats[i].stat_string); p += ETH_GSTRING_LEN; } -#ifdef I40E_FCOE - for (i = 0; i < I40E_FCOE_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_fcoe_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } -#endif for (i = 0; i < vsi->num_queue_pairs; i++) { snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); p += ETH_GSTRING_LEN; @@ -1648,12 +1694,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; case ETH_SS_PRIV_FLAGS: - memcpy(data, i40e_priv_flags_strings, - I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); - data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN; - if (pf->hw.pf_id == 0) - memcpy(data, i40e_gl_priv_flags_strings, - I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } + if (pf->hw.pf_id != 0) + break; + for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gl_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } break; default: break; @@ -1774,7 +1826,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) int i; for (i = 0; i < pf->num_alloc_vfs; i++) - if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states)) + if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states)) return true; return false; } @@ -1795,7 +1847,7 @@ static void i40e_diag_test(struct net_device *netdev, /* Offline tests */ netif_info(pf, drv, netdev, "offline testing starting\n"); - set_bit(__I40E_TESTING, &pf->state); + set_bit(__I40E_TESTING, pf->state); if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, @@ -1805,7 +1857,7 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 1; data[I40E_ETH_TEST_LINK] = 1; eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, &pf->state); + clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -1819,7 +1871,7 @@ static void i40e_diag_test(struct net_device *netdev, * link then the following link test would have * to be moved to before the reset */ - i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1834,8 +1886,8 @@ static void i40e_diag_test(struct net_device *netdev, if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, &pf->state); - i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + clear_bit(__I40E_TESTING, pf->state); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); if (if_running) i40e_open(netdev); @@ -2284,6 +2336,102 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) return 0; } +/** + * i40e_check_mask - Check whether a mask field is set + * @mask: the full mask value + * @field; mask of the field to check + * + * If the given mask is fully set, return positive value. If the mask for the + * field is fully unset, return zero. Otherwise return a negative error code. + **/ +static int i40e_check_mask(u64 mask, u64 field) +{ + u64 value = mask & field; + + if (value == field) + return 1; + else if (!value) + return 0; + else + return -1; +} + +/** + * i40e_parse_rx_flow_user_data - Deconstruct user-defined data + * @fsp: pointer to rx flow specification + * @data: pointer to userdef data structure for storage + * + * Read the user-defined data and deconstruct the value into a structure. No + * other code should read the user-defined data, so as to ensure that every + * place consistently reads the value correctly. + * + * The user-defined field is a 64bit Big Endian format value, which we + * deconstruct by reading bits or bit fields from it. Single bit flags shall + * be defined starting from the highest bits, while small bit field values + * shall be defined starting from the lowest bits. + * + * Returns 0 if the data is valid, and non-zero if the userdef data is invalid + * and the filter should be rejected. The data structure will always be + * modified even if FLOW_EXT is not set. + * + **/ +static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct i40e_rx_flow_userdef *data) +{ + u64 value, mask; + int valid; + + /* Zero memory first so it's always consistent. */ + memset(data, 0, sizeof(*data)); + + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); + mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); + +#define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0) +#define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16) +#define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0) + + valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER); + if (valid < 0) { + return -EINVAL; + } else if (valid) { + data->flex_word = value & I40E_USERDEF_FLEX_WORD; + data->flex_offset = + (value & I40E_USERDEF_FLEX_OFFSET) >> 16; + data->flex_filter = true; + } + + return 0; +} + +/** + * i40e_fill_rx_flow_user_data - Fill in user-defined data field + * @fsp: pointer to rx_flow specification + * + * Reads the userdef data structure and properly fills in the user defined + * fields of the rx_flow_spec. + **/ +static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct i40e_rx_flow_userdef *data) +{ + u64 value = 0, mask = 0; + + if (data->flex_filter) { + value |= data->flex_word; + value |= (u64)data->flex_offset << 16; + mask |= I40E_USERDEF_FLEX_FILTER; + } + + if (value || mask) + fsp->flow_type |= FLOW_EXT; + + *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value); + *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask); +} + /** * i40e_get_ethtool_fdir_all - Populates the rule count of a command * @pf: Pointer to the physical function struct @@ -2335,8 +2483,11 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_rx_flow_userdef userdef = {0}; struct i40e_fdir_filter *rule = NULL; struct hlist_node *node2; + u64 input_set; + u16 index; hlist_for_each_entry_safe(rule, node2, &pf->fdir_filter_list, fdir_node) { @@ -2359,8 +2510,48 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, */ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; - fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; - fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip; + + switch (rule->flow_type) { + case SCTP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + break; + case TCP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + break; + case UDP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + break; + case IP_USER_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + break; + default: + /* If we have stored a filter with a flow type not listed here + * it is almost certainly a driver bug. WARN(), and then + * assign the input_set as if all fields are enabled to avoid + * reading unassigned memory. + */ + WARN(1, "Missing input set index for flow_type %d\n", + rule->flow_type); + input_set = 0xFFFFFFFFFFFFFFFFULL; + goto no_input_set; + } + + input_set = i40e_read_fd_input_set(pf, index); + +no_input_set: + if (input_set & I40E_L3_SRC_MASK) + fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF); + + if (input_set & I40E_L3_DST_MASK) + fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF); + + if (input_set & I40E_L4_SRC_MASK) + fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF); + + if (input_set & I40E_L4_DST_MASK) + fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF); if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) fsp->ring_cookie = RX_CLS_FLOW_DISC; @@ -2372,11 +2563,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); if (vsi && vsi->type == I40E_VSI_SRIOV) { - fsp->h_ext.data[1] = htonl(vsi->vf_id); - fsp->m_ext.data[1] = htonl(0x1); + /* VFs are zero-indexed by the driver, but ethtool + * expects them to be one-indexed, so add one here + */ + u64 ring_vf = vsi->vf_id + 1; + + ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fsp->ring_cookie |= ring_vf; } } + if (rule->flex_filter) { + userdef.flex_filter = true; + userdef.flex_word = be16_to_cpu(rule->flex_word); + userdef.flex_offset = rule->flex_offset; + } + + i40e_fill_rx_flow_user_data(fsp, &userdef); + return 0; } @@ -2573,24 +2777,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return 0; } -/** - * i40e_match_fdir_input_set - Match a new filter against an existing one - * @rule: The filter already added - * @input: The new filter to comapre against - * - * Returns true if the two input set match - **/ -static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, - struct i40e_fdir_filter *input) -{ - if ((rule->dst_ip[0] != input->dst_ip[0]) || - (rule->src_ip[0] != input->src_ip[0]) || - (rule->dst_port != input->dst_port) || - (rule->src_port != input->src_port)) - return false; - return true; -} - /** * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry * @vsi: Pointer to the targeted VSI @@ -2626,22 +2812,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, /* if there is an old rule occupying our place remove it */ if (rule && (rule->fd_id == sw_idx)) { - if (input && !i40e_match_fdir_input_set(rule, input)) - err = i40e_add_del_fdir(vsi, rule, false); - else if (!input) - err = i40e_add_del_fdir(vsi, rule, false); + /* Remove this rule, since we're either deleting it, or + * replacing it. + */ + err = i40e_add_del_fdir(vsi, rule, false); hlist_del(&rule->fdir_node); kfree(rule); pf->fdir_pf_active_filters--; } - /* If no input this was a delete, err should be 0 if a rule was - * successfully found and removed from the list else -EINVAL + /* If we weren't given an input, this is a delete, so just return the + * error code indicating if there was an entry at the requested slot */ if (!input) return err; - /* initialize node and set software index */ + /* Otherwise, install the new rule as requested */ INIT_HLIST_NODE(&input->fdir_node); /* add filter to the list */ @@ -2657,6 +2843,69 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, return 0; } +/** + * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table + * @pf: pointer to PF structure + * + * This function searches the list of filters and determines which FLX_PIT + * entries are still required. It will prune any entries which are no longer + * in use after the deletion. + **/ +static void i40e_prune_flex_pit_list(struct i40e_pf *pf) +{ + struct i40e_flex_pit *entry, *tmp; + struct i40e_fdir_filter *rule; + + /* First, we'll check the l3 table */ + list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) { + bool found = false; + + hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { + if (rule->flow_type != IP_USER_FLOW) + continue; + if (rule->flex_filter && + rule->flex_offset == entry->src_offset) { + found = true; + break; + } + } + + /* If we didn't find the filter, then we can prune this entry + * from the list. + */ + if (!found) { + list_del(&entry->list); + kfree(entry); + } + } + + /* Followed by the L4 table */ + list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) { + bool found = false; + + hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { + /* Skip this filter if it's L3, since we already + * checked those in the above loop + */ + if (rule->flow_type == IP_USER_FLOW) + continue; + if (rule->flex_filter && + rule->flex_offset == entry->src_offset) { + found = true; + break; + } + } + + /* If we didn't find the filter, then we can prune this entry + * from the list. + */ + if (!found) { + list_del(&entry->list); + kfree(entry); + } + } +} + /** * i40e_del_fdir_entry - Deletes a Flow Director filter entry * @vsi: Pointer to the targeted VSI @@ -2675,19 +2924,699 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; int ret = 0; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); + i40e_prune_flex_pit_list(pf); + i40e_fdir_check_and_reenable(pf); return ret; } +/** + * i40e_unused_pit_index - Find an unused PIT index for given list + * @pf: the PF data structure + * + * Find the first unused flexible PIT index entry. We search both the L3 and + * L4 flexible PIT lists so that the returned index is unique and unused by + * either currently programmed L3 or L4 filters. We use a bit field as storage + * to track which indexes are already used. + **/ +static u8 i40e_unused_pit_index(struct i40e_pf *pf) +{ + unsigned long available_index = 0xFF; + struct i40e_flex_pit *entry; + + /* We need to make sure that the new index isn't in use by either L3 + * or L4 filters so that IP_USER_FLOW filters can program both L3 and + * L4 to use the same index. + */ + + list_for_each_entry(entry, &pf->l4_flex_pit_list, list) + clear_bit(entry->pit_index, &available_index); + + list_for_each_entry(entry, &pf->l3_flex_pit_list, list) + clear_bit(entry->pit_index, &available_index); + + return find_first_bit(&available_index, 8); +} + +/** + * i40e_find_flex_offset - Find an existing flex src_offset + * @flex_pit_list: L3 or L4 flex PIT list + * @src_offset: new src_offset to find + * + * Searches the flex_pit_list for an existing offset. If no offset is + * currently programmed, then this will return an ERR_PTR if there is no space + * to add a new offset, otherwise it returns NULL. + **/ +static +struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list, + u16 src_offset) +{ + struct i40e_flex_pit *entry; + int size = 0; + + /* Search for the src_offset first. If we find a matching entry + * already programmed, we can simply re-use it. + */ + list_for_each_entry(entry, flex_pit_list, list) { + size++; + if (entry->src_offset == src_offset) + return entry; + } + + /* If we haven't found an entry yet, then the provided src offset has + * not yet been programmed. We will program the src offset later on, + * but we need to indicate whether there is enough space to do so + * here. We'll make use of ERR_PTR for this purpose. + */ + if (size >= I40E_FLEX_PIT_TABLE_SIZE) + return ERR_PTR(-ENOSPC); + + return NULL; +} + +/** + * i40e_add_flex_offset - Add src_offset to flex PIT table list + * @flex_pit_list: L3 or L4 flex PIT list + * @src_offset: new src_offset to add + * @pit_index: the PIT index to program + * + * This function programs the new src_offset to the list. It is expected that + * i40e_find_flex_offset has already been tried and returned NULL, indicating + * that this offset is not programmed, and that the list has enough space to + * store another offset. + * + * Returns 0 on success, and negative value on error. + **/ +static int i40e_add_flex_offset(struct list_head *flex_pit_list, + u16 src_offset, + u8 pit_index) +{ + struct i40e_flex_pit *new_pit, *entry; + + new_pit = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!new_pit) + return -ENOMEM; + + new_pit->src_offset = src_offset; + new_pit->pit_index = pit_index; + + /* We need to insert this item such that the list is sorted by + * src_offset in ascending order. + */ + list_for_each_entry(entry, flex_pit_list, list) { + if (new_pit->src_offset < entry->src_offset) { + list_add_tail(&new_pit->list, &entry->list); + return 0; + } + + /* If we found an entry with our offset already programmed we + * can simply return here, after freeing the memory. However, + * if the pit_index does not match we need to report an error. + */ + if (new_pit->src_offset == entry->src_offset) { + int err = 0; + + /* If the PIT index is not the same we can't re-use + * the entry, so we must report an error. + */ + if (new_pit->pit_index != entry->pit_index) + err = -EINVAL; + + kfree(new_pit); + return err; + } + } + + /* If we reached here, then we haven't yet added the item. This means + * that we should add the item at the end of the list. + */ + list_add_tail(&new_pit->list, flex_pit_list); + return 0; +} + +/** + * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table + * @pf: Pointer to the PF structure + * @flex_pit_list: list of flexible src offsets in use + * #flex_pit_start: index to first entry for this section of the table + * + * In order to handle flexible data, the hardware uses a table of values + * called the FLX_PIT table. This table is used to indicate which sections of + * the input correspond to what PIT index values. Unfortunately, hardware is + * very restrictive about programming this table. Entries must be ordered by + * src_offset in ascending order, without duplicates. Additionally, unused + * entries must be set to the unused index value, and must have valid size and + * length according to the src_offset ordering. + * + * This function will reprogram the FLX_PIT register from a book-keeping + * structure that we guarantee is already ordered correctly, and has no more + * than 3 entries. + * + * To make things easier, we only support flexible values of one word length, + * rather than allowing variable length flexible values. + **/ +static void __i40e_reprogram_flex_pit(struct i40e_pf *pf, + struct list_head *flex_pit_list, + int flex_pit_start) +{ + struct i40e_flex_pit *entry = NULL; + u16 last_offset = 0; + int i = 0, j = 0; + + /* First, loop over the list of flex PIT entries, and reprogram the + * registers. + */ + list_for_each_entry(entry, flex_pit_list, list) { + /* We have to be careful when programming values for the + * largest SRC_OFFSET value. It is possible that adding + * additional empty values at the end would overflow the space + * for the SRC_OFFSET in the FLX_PIT register. To avoid this, + * we check here and add the empty values prior to adding the + * largest value. + * + * To determine this, we will use a loop from i+1 to 3, which + * will determine whether the unused entries would have valid + * SRC_OFFSET. Note that there cannot be extra entries past + * this value, because the only valid values would have been + * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not + * have been added to the list in the first place. + */ + for (j = i + 1; j < 3; j++) { + u16 offset = entry->src_offset + j; + int index = flex_pit_start + i; + u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, + 1, + offset - 3); + + if (offset > I40E_MAX_FLEX_SRC_OFFSET) { + i40e_write_rx_ctl(&pf->hw, + I40E_PRTQF_FLX_PIT(index), + value); + i++; + } + } + + /* Now, we can program the actual value into the table */ + i40e_write_rx_ctl(&pf->hw, + I40E_PRTQF_FLX_PIT(flex_pit_start + i), + I40E_FLEX_PREP_VAL(entry->pit_index + 50, + 1, + entry->src_offset)); + i++; + } + + /* In order to program the last entries in the table, we need to + * determine the valid offset. If the list is empty, we'll just start + * with 0. Otherwise, we'll start with the last item offset and add 1. + * This ensures that all entries have valid sizes. If we don't do this + * correctly, the hardware will disable flexible field parsing. + */ + if (!list_empty(flex_pit_list)) + last_offset = list_prev_entry(entry, list)->src_offset + 1; + + for (; i < 3; i++, last_offset++) { + i40e_write_rx_ctl(&pf->hw, + I40E_PRTQF_FLX_PIT(flex_pit_start + i), + I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, + 1, + last_offset)); + } +} + +/** + * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change + * @pf: pointer to the PF structure + * + * This function reprograms both the L3 and L4 FLX_PIT tables. See the + * internal helper function for implementation details. + **/ +static void i40e_reprogram_flex_pit(struct i40e_pf *pf) +{ + __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list, + I40E_FLEX_PIT_IDX_START_L3); + + __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list, + I40E_FLEX_PIT_IDX_START_L4); + + /* We also need to program the L3 and L4 GLQF ORT register */ + i40e_write_rx_ctl(&pf->hw, + I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX), + I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3, + 3, 1)); + + i40e_write_rx_ctl(&pf->hw, + I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX), + I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4, + 3, 1)); +} + +/** + * i40e_flow_str - Converts a flow_type into a human readable string + * @flow_type: the flow type from a flow specification + * + * Currently only flow types we support are included here, and the string + * value attempts to match what ethtool would use to configure this flow type. + **/ +static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + return "tcp4"; + case UDP_V4_FLOW: + return "udp4"; + case SCTP_V4_FLOW: + return "sctp4"; + case IP_USER_FLOW: + return "ip4"; + default: + return "unknown"; + } +} + +/** + * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index + * @pit_index: PIT index to convert + * + * Returns the mask for a given PIT index. Will return 0 if the pit_index is + * of range. + **/ +static u64 i40e_pit_index_to_mask(int pit_index) +{ + switch (pit_index) { + case 0: + return I40E_FLEX_50_MASK; + case 1: + return I40E_FLEX_51_MASK; + case 2: + return I40E_FLEX_52_MASK; + case 3: + return I40E_FLEX_53_MASK; + case 4: + return I40E_FLEX_54_MASK; + case 5: + return I40E_FLEX_55_MASK; + case 6: + return I40E_FLEX_56_MASK; + case 7: + return I40E_FLEX_57_MASK; + default: + return 0; + } +} + +/** + * i40e_print_input_set - Show changes between two input sets + * @vsi: the vsi being configured + * @old: the old input set + * @new: the new input set + * + * Print the difference between old and new input sets by showing which series + * of words are toggled on or off. Only displays the bits we actually support + * changing. + **/ +static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new) +{ + struct i40e_pf *pf = vsi->back; + bool old_value, new_value; + int i; + + old_value = !!(old & I40E_L3_SRC_MASK); + new_value = !!(new & I40E_L3_SRC_MASK); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n", + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + + old_value = !!(old & I40E_L3_DST_MASK); + new_value = !!(new & I40E_L3_DST_MASK); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n", + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + + old_value = !!(old & I40E_L4_SRC_MASK); + new_value = !!(new & I40E_L4_SRC_MASK); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n", + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + + old_value = !!(old & I40E_L4_DST_MASK); + new_value = !!(new & I40E_L4_DST_MASK); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n", + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + + old_value = !!(old & I40E_VERIFY_TAG_MASK); + new_value = !!(new & I40E_VERIFY_TAG_MASK); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n", + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + + /* Show change of flexible filter entries */ + for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) { + u64 flex_mask = i40e_pit_index_to_mask(i); + + old_value = !!(old & flex_mask); + new_value = !!(new & flex_mask); + if (old_value != new_value) + netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n", + i, + old_value ? "ON" : "OFF", + new_value ? "ON" : "OFF"); + } + + netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n", + old); + netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n", + new); +} + +/** + * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid + * @vsi: pointer to the targeted VSI + * @fsp: pointer to Rx flow specification + * @userdef: userdefined data from flow specification + * + * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support + * for partial matches exists with a few limitations. First, hardware only + * supports masking by word boundary (2 bytes) and not per individual bit. + * Second, hardware is limited to using one mask for a flow type and cannot + * use a separate mask for each filter. + * + * To support these limitations, if we already have a configured filter for + * the specified type, this function enforces that new filters of the type + * match the configured input set. Otherwise, if we do not have a filter of + * the specified type, we allow the input set to be updated to match the + * desired filter. + * + * To help ensure that administrators understand why filters weren't displayed + * as supported, we print a diagnostic message displaying how the input set + * would change and warning to delete the preexisting filters if required. + * + * Returns 0 on successful input set match, and a negative return code on + * failure. + **/ +static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, + struct ethtool_rx_flow_spec *fsp, + struct i40e_rx_flow_userdef *userdef) +{ + struct i40e_pf *pf = vsi->back; + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + u64 current_mask, new_mask; + bool new_flex_offset = false; + bool flex_l3 = false; + u16 *fdir_filter_count; + u16 index, src_offset = 0; + u8 pit_index = 0; + int err; + + switch (fsp->flow_type & ~FLOW_EXT) { + case SCTP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + fdir_filter_count = &pf->fd_sctp4_filter_cnt; + break; + case TCP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + fdir_filter_count = &pf->fd_tcp4_filter_cnt; + break; + case UDP_V4_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + fdir_filter_count = &pf->fd_udp4_filter_cnt; + break; + case IP_USER_FLOW: + index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + fdir_filter_count = &pf->fd_ip4_filter_cnt; + flex_l3 = true; + break; + default: + return -EOPNOTSUPP; + } + + /* Read the current input set from register memory. */ + current_mask = i40e_read_fd_input_set(pf, index); + new_mask = current_mask; + + /* Determine, if any, the required changes to the input set in order + * to support the provided mask. + * + * Hardware only supports masking at word (2 byte) granularity and does + * not support full bitwise masking. This implementation simplifies + * even further and only supports fully enabled or fully disabled + * masks for each field, even though we could split the ip4src and + * ip4dst fields. + */ + switch (fsp->flow_type & ~FLOW_EXT) { + case SCTP_V4_FLOW: + new_mask &= ~I40E_VERIFY_TAG_MASK; + /* Fall through */ + case TCP_V4_FLOW: + case UDP_V4_FLOW: + tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; + + /* IPv4 source address */ + if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) + new_mask |= I40E_L3_SRC_MASK; + else if (!tcp_ip4_spec->ip4src) + new_mask &= ~I40E_L3_SRC_MASK; + else + return -EOPNOTSUPP; + + /* IPv4 destination address */ + if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) + new_mask |= I40E_L3_DST_MASK; + else if (!tcp_ip4_spec->ip4dst) + new_mask &= ~I40E_L3_DST_MASK; + else + return -EOPNOTSUPP; + + /* L4 source port */ + if (tcp_ip4_spec->psrc == htons(0xFFFF)) + new_mask |= I40E_L4_SRC_MASK; + else if (!tcp_ip4_spec->psrc) + new_mask &= ~I40E_L4_SRC_MASK; + else + return -EOPNOTSUPP; + + /* L4 destination port */ + if (tcp_ip4_spec->pdst == htons(0xFFFF)) + new_mask |= I40E_L4_DST_MASK; + else if (!tcp_ip4_spec->pdst) + new_mask &= ~I40E_L4_DST_MASK; + else + return -EOPNOTSUPP; + + /* Filtering on Type of Service is not supported. */ + if (tcp_ip4_spec->tos) + return -EOPNOTSUPP; + + break; + case IP_USER_FLOW: + usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + + /* IPv4 source address */ + if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) + new_mask |= I40E_L3_SRC_MASK; + else if (!usr_ip4_spec->ip4src) + new_mask &= ~I40E_L3_SRC_MASK; + else + return -EOPNOTSUPP; + + /* IPv4 destination address */ + if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) + new_mask |= I40E_L3_DST_MASK; + else if (!usr_ip4_spec->ip4dst) + new_mask &= ~I40E_L3_DST_MASK; + else + return -EOPNOTSUPP; + + /* First 4 bytes of L4 header */ + if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) + new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; + else if (!usr_ip4_spec->l4_4_bytes) + new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + else + return -EOPNOTSUPP; + + /* Filtering on Type of Service is not supported. */ + if (usr_ip4_spec->tos) + return -EOPNOTSUPP; + + /* Filtering on IP version is not supported */ + if (usr_ip4_spec->ip_ver) + return -EINVAL; + + /* Filtering on L4 protocol is not supported */ + if (usr_ip4_spec->proto) + return -EINVAL; + + break; + default: + return -EOPNOTSUPP; + } + + /* First, clear all flexible filter entries */ + new_mask &= ~I40E_FLEX_INPUT_MASK; + + /* If we have a flexible filter, try to add this offset to the correct + * flexible filter PIT list. Once finished, we can update the mask. + * If the src_offset changed, we will get a new mask value which will + * trigger an input set change. + */ + if (userdef->flex_filter) { + struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL; + + /* Flexible offset must be even, since the flexible payload + * must be aligned on 2-byte boundary. + */ + if (userdef->flex_offset & 0x1) { + dev_warn(&pf->pdev->dev, + "Flexible data offset must be 2-byte aligned\n"); + return -EINVAL; + } + + src_offset = userdef->flex_offset >> 1; + + /* FLX_PIT source offset value is only so large */ + if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) { + dev_warn(&pf->pdev->dev, + "Flexible data must reside within first 64 bytes of the packet payload\n"); + return -EINVAL; + } + + /* See if this offset has already been programmed. If we get + * an ERR_PTR, then the filter is not safe to add. Otherwise, + * if we get a NULL pointer, this means we will need to add + * the offset. + */ + flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list, + src_offset); + if (IS_ERR(flex_pit)) + return PTR_ERR(flex_pit); + + /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown) + * packet types, and thus we need to program both L3 and L4 + * flexible values. These must have identical flexible index, + * as otherwise we can't correctly program the input set. So + * we'll find both an L3 and L4 index and make sure they are + * the same. + */ + if (flex_l3) { + l3_flex_pit = + i40e_find_flex_offset(&pf->l3_flex_pit_list, + src_offset); + if (IS_ERR(l3_flex_pit)) + return PTR_ERR(l3_flex_pit); + + if (flex_pit) { + /* If we already had a matching L4 entry, we + * need to make sure that the L3 entry we + * obtained uses the same index. + */ + if (l3_flex_pit) { + if (l3_flex_pit->pit_index != + flex_pit->pit_index) { + return -EINVAL; + } + } else { + new_flex_offset = true; + } + } else { + flex_pit = l3_flex_pit; + } + } + + /* If we didn't find an existing flex offset, we need to + * program a new one. However, we don't immediately program it + * here because we will wait to program until after we check + * that it is safe to change the input set. + */ + if (!flex_pit) { + new_flex_offset = true; + pit_index = i40e_unused_pit_index(pf); + } else { + pit_index = flex_pit->pit_index; + } + + /* Update the mask with the new offset */ + new_mask |= i40e_pit_index_to_mask(pit_index); + } + + /* If the mask and flexible filter offsets for this filter match the + * currently programmed values we don't need any input set change, so + * this filter is safe to install. + */ + if (new_mask == current_mask && !new_flex_offset) + return 0; + + netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n", + i40e_flow_str(fsp)); + i40e_print_input_set(vsi, current_mask, new_mask); + if (new_flex_offset) { + netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d", + pit_index, src_offset); + } + + /* Hardware input sets are global across multiple ports, so even the + * main port cannot change them when in MFP mode as this would impact + * any filters on the other ports. + */ + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); + return -EOPNOTSUPP; + } + + /* This filter requires us to update the input set. However, hardware + * only supports one input set per flow type, and does not support + * separate masks for each filter. This means that we can only support + * a single mask for all filters of a specific type. + * + * If we have preexisting filters, they obviously depend on the + * current programmed input set. Display a diagnostic message in this + * case explaining why the filter could not be accepted. + */ + if (*fdir_filter_count) { + netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n", + i40e_flow_str(fsp), + *fdir_filter_count); + return -EOPNOTSUPP; + } + + i40e_write_fd_input_set(pf, index, new_mask); + + /* Add the new offset and update table, if necessary */ + if (new_flex_offset) { + err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, + pit_index); + if (err) + return err; + + if (flex_l3) { + err = i40e_add_flex_offset(&pf->l3_flex_pit_list, + src_offset, + pit_index); + if (err) + return err; + } + + i40e_reprogram_flex_pit(pf); + } + + return 0; +} + /** * i40e_add_fdir_ethtool - Add/Remove Flow Director filters * @vsi: pointer to the targeted VSI @@ -2699,11 +3628,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, struct ethtool_rxnfc *cmd) { + struct i40e_rx_flow_userdef userdef; struct ethtool_rx_flow_spec *fsp; struct i40e_fdir_filter *input; + u16 dest_vsi = 0, q_index = 0; struct i40e_pf *pf; int ret = -EINVAL; - u16 vf_id; + u8 dest_ctl; if (!vsi) return -EINVAL; @@ -2712,26 +3643,61 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return -EOPNOTSUPP; - if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) + if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) return -ENOSPC; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return -EBUSY; fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + /* Parse the user-defined field */ + if (i40e_parse_rx_flow_user_data(fsp, &userdef)) + return -EINVAL; + + /* Extended MAC field is not supported */ + if (fsp->flow_type & FLOW_MAC_EXT) + return -EINVAL; + + ret = i40e_check_fdir_input_set(vsi, fsp, &userdef); + if (ret) + return ret; + if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + pf->hw.func_caps.fd_filters_guaranteed)) { return -EINVAL; } - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= vsi->num_queue_pairs)) - return -EINVAL; + /* ring_cookie is either the drop index, or is a mask of the queue + * index and VF id we wish to target. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf) { + if (ring >= vsi->num_queue_pairs) + return -EINVAL; + dest_vsi = vsi->id; + } else { + /* VFs are zero-indexed, so we subtract one here */ + vf--; + + if (vf >= pf->num_alloc_vfs) + return -EINVAL; + if (ring >= pf->vf[vf].num_queue_pairs) + return -EINVAL; + dest_vsi = pf->vf[vf].lan_vsi_id; + } + dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + q_index = ring; + } input = kzalloc(sizeof(*input), GFP_KERNEL); @@ -2739,20 +3705,14 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, return -ENOMEM; input->fd_id = fsp->location; - - if (fsp->ring_cookie == RX_CLS_FLOW_DISC) - input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; - else - input->dest_ctl = - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; - - input->q_index = fsp->ring_cookie; - input->flex_off = 0; - input->pctype = 0; - input->dest_vsi = vsi->id; + input->q_index = q_index; + input->dest_vsi = dest_vsi; + input->dest_ctl = dest_ctl; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); - input->flow_type = fsp->flow_type; + input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; + input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + input->flow_type = fsp->flow_type & ~FLOW_EXT; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; /* Reverse the src and dest notion, since the HW expects them to be from @@ -2760,33 +3720,29 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, */ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; input->src_port = fsp->h_u.tcp_ip4_spec.pdst; - input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; - input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; - - if (ntohl(fsp->m_ext.data[1])) { - vf_id = ntohl(fsp->h_ext.data[1]); - if (vf_id >= pf->num_alloc_vfs) { - netif_info(pf, drv, vsi->netdev, - "Invalid VF id %d\n", vf_id); - goto free_input; - } - /* Find vsi id from vf id and override dest vsi */ - input->dest_vsi = pf->vf[vf_id].lan_vsi_id; - if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { - netif_info(pf, drv, vsi->netdev, - "Invalid queue id %d for VF %d\n", - input->q_index, vf_id); - goto free_input; - } + input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; + input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + + if (userdef.flex_filter) { + input->flex_filter = true; + input->flex_word = cpu_to_be16(userdef.flex_word); + input->flex_offset = userdef.flex_offset; } ret = i40e_add_del_fdir(vsi, input, true); -free_input: if (ret) - kfree(input); - else - i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); + goto free_input; + + /* Add the input filter to the fdir_input_list, possibly replacing + * a previous filter. Do not free the input structure after adding it + * to the list as this would cause a use-after-free bug. + */ + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); + return 0; + +free_input: + kfree(input); return ret; } @@ -3036,7 +3992,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, * @dev: network interface device structure * * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags * array. * * Returns a u32 bitmap of flags. @@ -3046,19 +4002,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u32 ret_flags = 0; + u32 i, j, ret_flags = 0; + + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + const struct i40e_priv_flags *priv_flags; + + priv_flags = &i40e_gstrings_priv_flags[i]; + + if (priv_flags->flag & pf->flags) + ret_flags |= BIT(i); + } + + if (pf->hw.pf_id != 0) + return ret_flags; + + for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { + const struct i40e_priv_flags *priv_flags; - ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? - I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; - ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? - I40E_PRIV_FLAGS_FD_ATR : 0; - ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? - I40E_PRIV_FLAGS_VEB_STATS : 0; - ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? - 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; - if (pf->hw.pf_id == 0) { - ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ? - I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0; + priv_flags = &i40e_gl_gstrings_priv_flags[j]; + + if (priv_flags->flag & pf->flags) + ret_flags |= BIT(i + j); } return ret_flags; @@ -3074,54 +4038,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u16 sw_flags = 0, valid_flags = 0; - bool reset_required = false; - bool promisc_change = false; - int ret; + u64 changed_flags; + u32 i, j; - /* NOTE: MFP is not settable */ + changed_flags = pf->flags; - if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) - pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; - else - pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED; + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + const struct i40e_priv_flags *priv_flags; - /* allow the user to control the state of the Flow - * Director ATR (Application Targeted Routing) feature - * of the driver + priv_flags = &i40e_gstrings_priv_flags[i]; + + if (priv_flags->read_only) + continue; + + if (flags & BIT(i)) + pf->flags |= priv_flags->flag; + else + pf->flags &= ~(priv_flags->flag); + } + + if (pf->hw.pf_id != 0) + goto flags_complete; + + for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { + const struct i40e_priv_flags *priv_flags; + + priv_flags = &i40e_gl_gstrings_priv_flags[j]; + + if (priv_flags->read_only) + continue; + + if (flags & BIT(i + j)) + pf->flags |= priv_flags->flag; + else + pf->flags &= ~(priv_flags->flag); + } + +flags_complete: + /* check for flags that changed */ + changed_flags ^= pf->flags; + + /* Process any additional changes needed as a result of flag changes. + * The changed_flags value reflects the list of bits that were + * changed in the code above. */ - if (flags & I40E_PRIV_FLAGS_FD_ATR) { - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - } else { - pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; - - /* flush current ATR settings */ - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); - } - - if ((flags & I40E_PRIV_FLAGS_VEB_STATS) && - !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; - reset_required = true; - } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { - pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; - reset_required = true; - } - - if (pf->hw.pf_id == 0) { - if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && - !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { - pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT; - promisc_change = true; - } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && - (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { - pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT; - promisc_change = true; - } + + /* Flush current ATR settings if ATR was disabled */ + if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && + !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } - if (promisc_change) { + + /* Only allow ATR evict on hardware that is capable of handling it */ + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + + if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { + u16 sw_flags = 0, valid_flags = 0; + int ret; + if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; @@ -3137,22 +4113,17 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) } } - if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && - (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) - pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; - else - pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; - - /* if needed, issue reset to cause things to take effect */ - if (reset_required) - i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); + /* Issue reset to cause things to take effect, as additional bits + * are added we will need to create a mask of bits requiring reset + */ + if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) || + ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev))) + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); return 0; } static const struct ethtool_ops i40e_ethtool_ops = { - .get_settings = i40e_get_settings, - .set_settings = i40e_set_settings, .get_drvinfo = i40e_get_drvinfo, .get_regs_len = i40e_get_regs_len, .get_regs = i40e_get_regs, @@ -3189,6 +4160,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_priv_flags = i40e_set_priv_flags, .get_per_queue_coalesce = i40e_get_per_queue_coalesce, .set_per_queue_coalesce = i40e_set_per_queue_coalesce, + .get_link_ksettings = i40e_get_link_ksettings, + .set_link_ksettings = i40e_set_link_ksettings, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 82a95cc2c8ee386c725dfd01e5367bc95e26ca0a..d5c9c9e06ff57e21c1e28d09b9eea18af6e22f1a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -32,6 +32,12 @@ #include "i40e.h" #include "i40e_diag.h" #include +/* All i40e tracepoints are defined by the include below, which + * must be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "i40e_trace.h" const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -39,9 +45,9 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" -#define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 27 +#define DRV_VERSION_MAJOR 2 +#define DRV_VERSION_MINOR 1 +#define DRV_VERSION_BUILD 14 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -50,13 +56,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); -static void i40e_handle_reset_warning(struct i40e_pf *pf); +static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); +static int i40e_reset(struct i40e_pf *pf); +static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); @@ -286,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) **/ void i40e_service_event_schedule(struct i40e_pf *pf) { - if (!test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) queue_work(i40e_wq, &pf->service_task); } @@ -299,11 +308,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf) * device is munged, not just the one netdev port, so go for the full * reset. **/ -#ifdef I40E_FCOE -void i40e_tx_timeout(struct net_device *netdev) -#else static void i40e_tx_timeout(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -372,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev) switch (pf->tx_timeout_recovery_level) { case 1: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case 2: - set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); break; case 3: - set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); @@ -408,10 +413,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ -#ifndef I40E_FCOE -static -#endif -void i40e_get_netdev_stats_struct(struct net_device *netdev, +static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -420,7 +422,7 @@ void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (!vsi->tx_rings) @@ -723,55 +725,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) veb->stat_offsets_loaded = true; } -#ifdef I40E_FCOE -/** - * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. - * @vsi: the VSI that is capable of doing FCoE - **/ -static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - struct i40e_fcoe_stats *ofs; - struct i40e_fcoe_stats *fs; /* device's eth stats */ - int idx; - - if (vsi->type != I40E_VSI_FCOE) - return; - - idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; - fs = &vsi->fcoe_stats; - ofs = &vsi->fcoe_stats_offsets; - - i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); - i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); - i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); - i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); - i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); - i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); - i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_last_error, &fs->fcoe_last_error); - i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); - - vsi->fcoe_stat_offsets_loaded = true; -} - -#endif /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated @@ -790,7 +743,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; - u64 tx_lost_interrupt; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; @@ -801,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 tx_p, tx_b; u16 q; - if (test_bit(__I40E_DOWN, &vsi->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); @@ -816,7 +768,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; - tx_lost_interrupt = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -835,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -854,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; - vsi->tx_lost_interrupt = tx_lost_interrupt; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1101,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && - !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && - !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; @@ -1129,9 +1078,6 @@ void i40e_update_stats(struct i40e_vsi *vsi) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); -#ifdef I40E_FCOE - i40e_update_fcoe_stats(vsi); -#endif } /** @@ -1400,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * to failed, so we don't bother to try sending the filter * to the hardware. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state)) f->state = I40E_FILTER_FAILED; else f->state = I40E_FILTER_NEW; @@ -1562,11 +1508,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) * * Returns 0 on success, negative on failure **/ -#ifdef I40E_FCOE -int i40e_set_mac(struct net_device *netdev, void *p) -#else static int i40e_set_mac(struct net_device *netdev, void *p) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1583,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1626,17 +1568,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) * * Setup VSI queue mapping for enabled traffic classes. **/ -#ifdef I40E_FCOE -void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, - struct i40e_vsi_context *ctxt, - u8 enabled_tc, - bool is_add) -#else static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) -#endif { struct i40e_pf *pf = vsi->back; u16 sections = 0; @@ -1686,11 +1621,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - qcount = num_tc_qps; - break; -#endif case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: @@ -1800,11 +1730,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) * i40e_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ -#ifdef I40E_FCOE -void i40e_set_rx_mode(struct net_device *netdev) -#else static void i40e_set_rx_mode(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1883,19 +1809,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, static struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) { - while (next) { - next = hlist_entry(next->hlist.next, - typeof(struct i40e_new_mac_filter), - hlist); - - /* keep going if we found a broadcast filter */ - if (next && is_broadcast_ether_addr(next->f->macaddr)) - continue; - - break; + hlist_for_each_entry_continue(next, hlist) { + if (!is_broadcast_ether_addr(next->f->macaddr)) + return next; } - return next; + return NULL; } /** @@ -2001,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, if (fcnt != num_add) { *promisc_changed = true; - set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", i40e_aq_str(hw, aq_err), @@ -2084,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) + while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) usleep_range(1000, 2000); pf = vsi->back; @@ -2220,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)) { + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)) { new->state = I40E_FILTER_FAILED; continue; } @@ -2308,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) && !promisc_changed && !failed_filters && (vsi->active_filters < vsi->promisc_threshold)) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); promisc_changed = true; vsi->promisc_threshold = 0; } /* if the VF is not trusted do not do promisc */ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } @@ -2346,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if ((changed_flags & IFF_PROMISC) || (promisc_changed && - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { + test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)); + test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)); if ((vsi->type == I40E_VSI_MAIN) && (pf->lan_veb != I40E_NO_VEB) && !(pf->flags & I40E_FLAG_MFP_ENABLED)) { @@ -2434,7 +2353,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (retval) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return retval; err_no_memory: @@ -2446,7 +2365,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return -ENOMEM; } @@ -2487,13 +2406,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); - i40e_notify_client_of_l2_param_changes(vsi); + pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | + I40E_FLAG_CLIENT_L2_CHANGE); return 0; } @@ -2707,13 +2628,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) * * net_device_ops implementation for adding vlan ids **/ -#ifdef I40E_FCOE -int i40e_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -2744,13 +2660,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, * * net_device_ops implementation for removing vlan ids **/ -#ifdef I40E_FCOE -int i40e_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -2920,9 +2831,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); -#ifdef I40E_FCOE - i40e_fcoe_setup_ddp_resources(vsi); -#endif return err; } @@ -2942,9 +2850,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); -#ifdef I40E_FCOE - i40e_fcoe_free_ddp_resources(vsi); -#endif } /** @@ -3015,9 +2920,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); -#ifdef I40E_FCOE - tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); -#endif tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) @@ -3098,7 +3000,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->rx_buf_len = vsi->rx_buf_len; - rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; @@ -3120,9 +3023,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ rx_ctx.showiv = 0; -#ifdef I40E_FCOE - rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); -#endif /* set the prefena field to 1 because the manual says to */ rx_ctx.prefena = 1; @@ -3144,6 +3044,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) return -ENOMEM; } + /* configure Rx buffer alignment */ + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + clear_ring_build_skb_enabled(ring); + else + set_ring_build_skb_enabled(ring); + /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); @@ -3181,27 +3087,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) int err = 0; u16 i; - if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) - vsi->max_frame = vsi->netdev->mtu + ETH_HLEN - + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = I40E_RXBUFFER_2048; - - vsi->rx_buf_len = I40E_RXBUFFER_2048; - -#ifdef I40E_FCOE - /* setup rx buffer for FCoE */ - if ((vsi->type == I40E_VSI_FCOE) && - (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { - vsi->rx_buf_len = I40E_RXBUFFER_3072; - vsi->max_frame = I40E_RXBUFFER_3072; + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = I40E_RXBUFFER_2048; +#if (PAGE_SIZE < 8192) + } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : + I40E_RXBUFFER_2048; } -#endif /* I40E_FCOE */ - /* round up for the chip's needs */ - vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, - BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); - /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_configure_rx_ring(vsi->rx_rings[i]); @@ -3281,6 +3181,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; + /* Reset FDir counters as we're replaying all existing filters */ + pf->fd_tcp4_filter_cnt = 0; + pf->fd_udp4_filter_cnt = 0; + pf->fd_sctp4_filter_cnt = 0; + pf->fd_ip4_filter_cnt = 0; + hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { i40e_add_del_fdir(vsi, filter, true); @@ -3705,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data) * this is not a performance path and napi_schedule() * can deal with rescheduling. */ - if (!test_bit(__I40E_DOWN, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; - set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + set_bit(__I40E_MDD_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; - set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) - set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) @@ -3738,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); } } @@ -3771,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; @@ -3781,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); - if (!test_bit(__I40E_DOWN, &pf->state)) { + if (!test_bit(__I40E_VSI_DOWN, pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf, false); } @@ -3993,11 +3899,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) * This is used by netconsole to send skbs without having to re-enable * interrupts. It's not called while the normal interrupt routine is executing. **/ -#ifdef I40E_FCOE -void i40e_netpoll(struct net_device *netdev) -#else static void i40e_netpoll(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4005,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { @@ -4017,6 +3919,8 @@ static void i40e_netpoll(struct net_device *netdev) } #endif +#define I40E_QTX_ENA_WAIT_COUNT 50 + /** * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled * @pf: the PF being configured @@ -4046,6 +3950,50 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) return 0; } +/** + * i40e_control_tx_q - Start or stop a particular Tx queue + * @pf: the PF structure + * @pf_q: the PF queue to configure + * @enable: start or stop the queue + * + * This function enables or disables a single queue. Note that any delay + * required after the operation is expected to be handled by the caller of + * this function. + **/ +static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + struct i40e_hw *hw = &pf->hw; + u32 tx_reg; + int i; + + /* warn the TX unit of coming changes */ + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); + if (!enable) + usleep_range(10, 20); + + for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { + tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + return; + + /* turn on/off the queue */ + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + + wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); +} + /** * i40e_vsi_control_tx - Start or stop a VSI's rings * @vsi: the VSI being configured @@ -4054,41 +4002,11 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int i, j, pf_q, ret = 0; - u32 tx_reg; + int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - - /* warn the TX unit of coming changes */ - i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); - if (!enable) - usleep_range(10, 20); - - for (j = 0; j < 50; j++) { - tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); - if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == - ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) - break; - usleep_range(1000, 2000); - } - /* Skip if the queue is already in the requested state */ - if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) - continue; - - /* turn on/off the queue */ - if (enable) { - wr32(hw, I40E_QTX_HEAD(pf_q), 0); - tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; - } else { - tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; - } - - wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); - /* No waiting for the Tx queue to disable */ - if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) - continue; + i40e_control_tx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); @@ -4100,8 +4018,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) } } - if (hw->revision_id == 0) - mdelay(50); return ret; } @@ -4134,6 +4050,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) return 0; } +/** + * i40e_control_rx_q - Start or stop a particular Rx queue + * @pf: the PF structure + * @pf_q: the PF queue to configure + * @enable: start or stop the queue + * + * This function enables or disables a single queue. Note that any delay + * required after the operation is expected to be handled by the caller of + * this function. + **/ +static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + struct i40e_hw *hw = &pf->hw; + u32 rx_reg; + int i; + + for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { + rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + return; + + /* turn on/off the queue */ + if (enable) + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; + else + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + + wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); +} + /** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured @@ -4142,33 +4095,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int i, j, pf_q, ret = 0; - u32 rx_reg; + int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - for (j = 0; j < 50; j++) { - rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); - if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == - ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) - break; - usleep_range(1000, 2000); - } - - /* Skip if the queue is already in the requested state */ - if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - continue; - - /* turn on/off the queue */ - if (enable) - rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; - else - rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); - /* No waiting for the Tx queue to disable */ - if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) - continue; + i40e_control_rx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); @@ -4180,6 +4111,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) } } + /* Due to HW errata, on Rx disable only, the register can indicate done + * before it really is. Needs 50ms to be sure + */ + if (!enable) + mdelay(50); + return ret; } @@ -4206,6 +4143,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { + /* When port TX is suspended, don't wait */ + if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) + return i40e_vsi_stop_rings_no_wait(vsi); + /* do rx first for enable and last for disable * Ignore return value, we need to shutdown whatever we can */ @@ -4213,6 +4154,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) i40e_vsi_control_rx(vsi, false); } +/** + * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay + * @vsi: the VSI being shutdown + * + * This function stops all the rings for a VSI but does not delay to verify + * that rings have been disabled. It is expected that the caller is shutting + * down multiple VSIs at once and will delay together for all the VSIs after + * initiating the shutdown. This is particularly useful for shutting down lots + * of VFs together. Otherwise, a large delay can be incurred while configuring + * each VSI in serial. + **/ +void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + i40e_control_tx_q(pf, pf_q, false); + i40e_control_rx_q(pf, pf_q, false); + } +} + /** * i40e_vsi_free_irq - Free the irq association with the OS * @vsi: the VSI being configured @@ -4471,17 +4435,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { - bool reset = false; - - if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + struct i40e_pf *pf = vsi->back; + if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - reset = true; - i40e_notify_client_of_netdev_close(vsi, reset); + pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + pf->flags |= I40E_FLAG_CLIENT_RESET; } /** @@ -4490,18 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { - if (test_bit(__I40E_DOWN, &vsi->state)) - return; - - /* No need to disable FCoE VSI when Tx suspended */ - if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && - vsi->type == I40E_VSI_FCOE) { - dev_dbg(&vsi->back->pdev->dev, - "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - } - set_bit(__I40E_NEEDS_RESTART, &vsi->state); + set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else @@ -4514,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { - if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) + if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else @@ -4552,21 +4506,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) } } -#ifdef CONFIG_I40E_DCB /** * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * - * This function waits for the given VSI's queues to be disabled. + * Wait until all queues on a given VSI have been disabled. **/ -static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) +int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - /* Check and wait for the disable status of the queue */ + /* Check and wait for the Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, @@ -4574,11 +4527,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) vsi->seid, pf_q); return ret; } - } - - pf_q = vsi->base_queue; - for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - /* Check and wait for the disable status of the queue */ + /* Check and wait for the Tx queue */ ret = i40e_pf_rxq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, @@ -4591,6 +4540,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) return 0; } +#ifdef CONFIG_I40E_DCB /** * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF @@ -4603,8 +4553,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { - /* No need to wait for FCoE VSI queues */ - if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; @@ -4622,16 +4571,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) * @vsi: Pointer to VSI struct * * This function checks specified queue for given VSI. Detects hung condition. - * Sets hung bit since it is two step process. Before next run of service task - * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, - * hung condition remain unchanged and during subsequent run, this function - * issues SW interrupt to recover from hung condition. + * We proactively detect hung TX queues by checking if interrupts are disabled + * but there are pending descriptors. If it appears hung, attempt to recover + * by triggering a SW interrupt. **/ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; - u32 head, val, tx_pending_hw; + u32 val, tx_pending; int i; pf = vsi->back; @@ -4657,47 +4605,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - head = i40e_get_head(tx_ring); - - tx_pending_hw = i40e_get_tx_pending(tx_ring, false); - - /* HW is done executing descriptors, updated HEAD write back, - * but SW hasn't processed those descriptors. If interrupt is - * not generated from this point ON, it could result into - * dev_watchdog detecting timeout on those netdev_queue, - * hence proactively trigger SW interrupt. - */ - if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { - /* NAPI Poll didn't run and clear since it was set */ - if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, - &tx_ring->q_vector->hung_detected)) { - netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", - vsi->seid, q_idx, tx_pending_hw, - tx_ring->next_to_clean, head, - tx_ring->next_to_use, - readl(tx_ring->tail)); - netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", - vsi->seid, q_idx, val); - i40e_force_wb(vsi, tx_ring->q_vector); - } else { - /* First Chance - detected possible hung */ - set_bit(I40E_Q_VECTOR_HUNG_DETECT, - &tx_ring->q_vector->hung_detected); - } - } + tx_pending = i40e_get_tx_pending(tx_ring); - /* This is the case where we have interrupts missing, - * so the tx_pending in HW will most likely be 0, but we - * will have tx_pending in SW since the WB happened but the - * interrupt got lost. + /* Interrupts are disabled and TX pending is non-zero, + * trigger the SW interrupt (don't wait). Worst case + * there will be one extra interrupt which may result + * into not cleaning any queues because queues are cleaned. */ - if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && - (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { - local_bh_disable(); - if (napi_reschedule(&tx_ring->q_vector->napi)) - tx_ring->tx_stats.tx_lost_interrupt++; - local_bh_enable(); - } + if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) + i40e_force_wb(vsi, tx_ring->q_vector); } /** @@ -4721,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) return; /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return; /* Make sure type is MAIN VSI */ @@ -5228,20 +5144,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) continue; /* - Enable all TCs for the LAN VSI -#ifdef I40E_FCOE - * - For FCoE VSI only enable the TC configured - * as per the APP TLV -#endif * - For all others keep them at TC0 for now */ if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; -#ifdef I40E_FCOE - if (pf->vsi[v]->type == I40E_VSI_FCOE) - tc_map = i40e_get_fcoe_tc_map(pf); -#endif /* #ifdef I40E_FCOE */ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); if (ret) { @@ -5277,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } @@ -5308,10 +5216,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { dev_info(&pf->pdev->dev, "DCBX offload is not supported or is disabled for this PF.\n"); - - if (pf->flags & I40E_FLAG_MFP_ENABLED) - goto out; - } else { /* When status is not DISABLED then DCBX in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | @@ -5449,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) if (err) return err; - clear_bit(__I40E_DOWN, &vsi->state); + clear_bit(__I40E_VSI_DOWN, vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); @@ -5472,13 +5376,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) /* replay FDIR SB filters */ if (vsi->type == I40E_VSI_FDIR) { /* reset fd counters */ - pf->fd_add_err = pf->fd_atr_cnt = 0; - if (pf->fd_tcp_rule > 0) { - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); - pf->fd_tcp_rule = 0; - } + pf->fd_add_err = 0; + pf->fd_atr_cnt = 0; i40e_fdir_filter_restore(vsi); } @@ -5503,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; WARN_ON(in_interrupt()); - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) usleep_range(1000, 2000); i40e_down(vsi); i40e_up(vsi); - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** @@ -5535,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi) int i; /* It is assumed that the caller of this function - * sets the vsi->state __I40E_DOWN bit. + * sets the vsi->state __I40E_VSI_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); @@ -5550,8 +5449,6 @@ void i40e_down(struct i40e_vsi *vsi) i40e_clean_rx_ring(vsi->rx_rings[i]); } - i40e_notify_client_of_netdev_close(vsi, false); - } /** @@ -5612,17 +5509,15 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) return ret; } -#ifdef I40E_FCOE -int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) -#else static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct tc_to_netdev *tc) -#endif { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return i40e_setup_tc(netdev, tc->tc); + + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return i40e_setup_tc(netdev, tc->mqprio->num_tc); } /** @@ -5645,8 +5540,8 @@ int i40e_open(struct net_device *netdev) int err; /* disallow open during test or if eeprom is broken */ - if (test_bit(__I40E_TESTING, &pf->state) || - test_bit(__I40E_BAD_EEPROM, &pf->state)) + if (test_bit(__I40E_TESTING, pf->state) || + test_bit(__I40E_BAD_EEPROM, pf->state)) return -EBUSY; netif_carrier_off(netdev); @@ -5675,6 +5570,8 @@ int i40e_open(struct net_device *netdev) * Finish initialization of the VSI. * * Returns 0 on success, negative value on failure + * + * Note: expects to be called while under rtnl_lock() **/ int i40e_vsi_open(struct i40e_vsi *vsi) { @@ -5738,7 +5635,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); return err; } @@ -5753,6 +5650,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) static void i40e_fdir_filter_exit(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; + struct i40e_flex_pit *pit_entry, *tmp; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, @@ -5760,7 +5658,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) hlist_del(&filter->fdir_node); kfree(filter); } + + list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { + list_del(&pit_entry->list); + kfree(pit_entry); + } + INIT_LIST_HEAD(&pf->l3_flex_pit_list); + + list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { + list_del(&pit_entry->list); + kfree(pit_entry); + } + INIT_LIST_HEAD(&pf->l4_flex_pit_list); + pf->fdir_pf_active_filters = 0; + pf->fd_tcp4_filter_cnt = 0; + pf->fd_udp4_filter_cnt = 0; + pf->fd_sctp4_filter_cnt = 0; + pf->fd_ip4_filter_cnt = 0; + + /* Reprogram the default input set for TCP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for UDP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for SCTP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for Other/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** @@ -5787,12 +5721,14 @@ int i40e_close(struct net_device *netdev) * i40e_do_reset - Start a PF or Core Reset sequence * @pf: board private structure * @reset_flags: which reset is requested + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. * * The essential difference in resets is that the PF Reset * doesn't clear the packet buffers, doesn't reset the PE * firmware, and doesn't bother the other PFs on the chip. **/ -void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { u32 val; @@ -5838,7 +5774,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) * for the Core Reset. */ dev_dbg(&pf->pdev->dev, "PFR requested\n"); - i40e_handle_reset_warning(pf); + i40e_handle_reset_warning(pf, lock_acquired); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -5850,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { + test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, + vsi->state)) i40e_vsi_reinit_locked(pf->vsi[v]); - clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); - } } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; @@ -5864,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { - set_bit(__I40E_DOWN, &vsi->state); + test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, + vsi->state)) { + set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); - clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } } else { @@ -6007,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; - set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); @@ -6016,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_resume_port_tx(pf); - clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; @@ -6025,12 +5960,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); - /* Notify the client for the DCB changes */ - i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); + pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | + I40E_FLAG_CLIENT_L2_CHANGE); } exit: @@ -6047,7 +5982,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) { rtnl_lock(); - i40e_do_reset(pf, reset_flags); + i40e_do_reset(pf, reset_flags, true); rtnl_unlock(); } @@ -6140,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) u32 fcnt_prog, fcnt_avail; struct hlist_node *node; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; - /* Check if, FD SB or ATR was auto disabled and if there is enough room - * to re-enable - */ + /* Check if we have enough room to re-enable FDir SB capability. */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } - /* Wait for some more space to be available to turn on ATR. We also - * must check that no existing ntuple rules for TCP are in effect + /* We should wait for even more space before re-enabling ATR. + * Additionally, we cannot enable ATR as long as we still have TCP SB + * rules active. */ - if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->fd_tcp_rule == 0)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && + (pf->fd_tcp4_filter_cnt == 0)) { + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } } @@ -6218,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } pf->fd_flush_timestamp = jiffies; - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); @@ -6237,9 +6171,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - if (!disable_atr) - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + if (!disable_atr && !pf->fd_tcp4_filter_cnt) + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -6269,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) return; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -6286,14 +6220,11 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) + if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) return; switch (vsi->type) { case I40E_VSI_MAIN: -#ifdef I40E_FCOE - case I40E_VSI_FCOE: -#endif if (!vsi->netdev || !vsi->netdev_registered) break; @@ -6382,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf) if (new_link == old_link && new_link_speed == old_link_speed && - (test_bit(__I40E_DOWN, &vsi->state) || + (test_bit(__I40E_VSI_DOWN, vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to @@ -6413,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ @@ -6452,44 +6383,44 @@ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; - rtnl_lock(); - if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { + if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); - clear_bit(__I40E_REINIT_REQUESTED, &pf->state); + clear_bit(__I40E_REINIT_REQUESTED, pf->state); } - if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); - clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); - clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); - clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT(__I40E_DOWN_REQUESTED); - clear_bit(__I40E_DOWN_REQUESTED, &pf->state); + if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { + reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); + clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ - if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { - i40e_handle_reset_warning(pf); - goto unlock; + if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + i40e_prep_for_reset(pf, false); + i40e_reset(pf); + i40e_rebuild(pf, false, false); } /* If we're already down or resetting, just bail */ if (reset_flags && - !test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_CONFIG_BUSY, &pf->state)) - i40e_do_reset(pf, reset_flags); - -unlock: - rtnl_unlock(); + !test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_CONFIG_BUSY, pf->state)) { + rtnl_lock(); + i40e_do_reset(pf, reset_flags, true); + rtnl_unlock(); + } } /** @@ -6534,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) u32 val; /* Do not run clean AQ when PF reset fails */ - if (test_bit(__I40E_RESET_FAILED, &pf->state)) + if (test_bit(__I40E_RESET_FAILED, pf->state)) return; /* check for error indications */ @@ -6635,9 +6566,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (pending && (i++ < pf->adminq_work_limit)); + } while (i++ < pf->adminq_work_limit); + + if (i < pf->adminq_work_limit) + clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); - clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; @@ -6662,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); - set_bit(__I40E_BAD_EEPROM, &pf->state); + set_bit(__I40E_BAD_EEPROM, pf->state); } } - if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); - clear_bit(__I40E_BAD_EEPROM, &pf->state); + clear_bit(__I40E_BAD_EEPROM, pf->state); } } @@ -6975,17 +6908,19 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. * * Close up the VFs and other things in prep for PF Reset. **/ -static void i40e_prep_for_reset(struct i40e_pf *pf) +static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) { struct i40e_hw *hw = &pf->hw; i40e_status ret = 0; u32 v; - clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); - if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); + if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); @@ -6993,7 +6928,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* quiesce the VSIs and their queues that are not already DOWN */ + /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */ + if (!lock_acquired) + rtnl_lock(); i40e_pf_quiesce_all_vsi(pf); + if (!lock_acquired) + rtnl_unlock(); for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) @@ -7028,31 +6968,41 @@ static void i40e_send_version(struct i40e_pf *pf) } /** - * i40e_reset_and_rebuild - reset and rebuild using a saved config + * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen * @pf: board private structure - * @reinit: if the Main VSI needs to re-initialized. **/ -static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - u8 set_fc_aq_fail = 0; i40e_status ret; - u32 val; - u32 v; - /* Now we wait for GRST to settle out. - * We don't have to delete the VEBs or VSIs from the hw switch - * because the reset will make them disappear. - */ ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); - set_bit(__I40E_RESET_FAILED, &pf->state); - goto clear_recovery; + set_bit(__I40E_RESET_FAILED, pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); + } else { + pf->pfr_count++; } - pf->pfr_count++; + return ret; +} + +/** + * i40e_rebuild - rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. + **/ +static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) +{ + struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; + i40e_status ret; + u32 val; + int v; - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); @@ -7066,7 +7016,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); i40e_clear_pxe_mode(hw); @@ -7075,8 +7025,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) goto end_core_reset; ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, - pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + hw->func_caps.num_rx_qp, 0, 0); if (ret) { dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); goto end_core_reset; @@ -7095,14 +7044,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ -#ifdef I40E_FCOE - i40e_init_pf_fcoe(pf); - -#endif /* do basic switch setup */ + if (!lock_acquired) + rtnl_lock(); ret = i40e_setup_pf_switch(pf, reinit); if (ret) - goto end_core_reset; + goto end_unlock; /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. @@ -7173,7 +7120,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); - goto end_core_reset; + goto end_unlock; } } @@ -7216,18 +7163,45 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); - if (pf->num_alloc_vfs) { - for (v = 0; v < pf->num_alloc_vfs; v++) - i40e_reset_vf(&pf->vf[v], true); - } + /* Release the RTNL lock before we start resetting VFs */ + if (!lock_acquired) + rtnl_unlock(); + + i40e_reset_all_vfs(pf, true); /* tell the firmware that we're starting */ i40e_send_version(pf); + /* We've already released the lock, so don't do it again */ + goto end_core_reset; + +end_unlock: + if (!lock_acquired) + rtnl_unlock(); end_core_reset: - clear_bit(__I40E_RESET_FAILED, &pf->state); + clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: - clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, + bool lock_acquired) +{ + int ret; + /* Now we wait for GRST to settle out. + * We don't have to delete the VEBs or VSIs from the hw switch + * because the reset will make them disappear. + */ + ret = i40e_reset(pf); + if (!ret) + i40e_rebuild(pf, reinit, lock_acquired); } /** @@ -7236,11 +7210,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) * * Close up the VFs and other things in prep for a Core Reset, * then get ready to rebuild the world. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. **/ -static void i40e_handle_reset_warning(struct i40e_pf *pf) +static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) { - i40e_prep_for_reset(pf); - i40e_reset_and_rebuild(pf, false); + i40e_prep_for_reset(pf, lock_acquired); + i40e_reset_and_rebuild(pf, false, lock_acquired); } /** @@ -7258,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) + if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ @@ -7310,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } /* Queue belongs to the PF, initiate a reset */ if (pf_mdd_detected) { - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } } @@ -7339,18 +7315,35 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) "Too many MDD events on VF %d, disabled\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); - set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); } +/** + * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters + * @pf: board private structure + **/ +static void i40e_sync_udp_filters(struct i40e_pf *pf) +{ + int i; + + /* loop through and set pending bit for all active UDP filters */ + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->udp_ports[i].port) + pf->pending_udp_bitmap |= BIT_ULL(i); + } + + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +} + /** * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure @@ -7359,7 +7352,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; - __be16 port; + u16 port; int i; if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) @@ -7370,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { pf->pending_udp_bitmap &= ~BIT_ULL(i); - port = pf->udp_ports[i].index; + port = pf->udp_ports[i].port; if (port) ret = i40e_aq_add_udp_tunnel(hw, port, pf->udp_ports[i].type, @@ -7383,11 +7376,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) "%s %s port %d, index %d failed, err %s aq_err %s\n", pf->udp_ports[i].type ? "vxlan" : "geneve", port ? "add" : "delete", - ntohs(port), i, + port, i, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->udp_ports[i].index = 0; + pf->udp_ports[i].port = 0; } } } @@ -7405,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work) unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; - } - if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) + if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; i40e_detect_recover_hung(pf); @@ -7419,23 +7411,34 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - i40e_client_subtask(pf); + if (pf->flags & I40E_FLAG_CLIENT_RESET) { + /* Client subtask will reopen next time through. */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); + pf->flags &= ~I40E_FLAG_CLIENT_RESET; + } else { + i40e_client_subtask(pf); + if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) { + i40e_notify_client_of_l2_param_changes( + pf->vsi[pf->lan_vsi]); + pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE; + } + } i40e_sync_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf); i40e_clean_adminq_subtask(pf); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || - test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || - test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || - test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || + test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || + test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) i40e_service_event_schedule(pf); } @@ -7492,15 +7495,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) I40E_REQ_DESCRIPTOR_MULTIPLE); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - vsi->alloc_queue_pairs = pf->num_fcoe_qps; - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, - I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = pf->num_fcoe_msix; - break; - -#endif /* I40E_FCOE */ default: WARN_ON(1); return -ENODATA; @@ -7593,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) } vsi->type = type; vsi->back = pf; - set_bit(__I40E_DOWN, &vsi->state); + set_bit(__I40E_VSI_DOWN, vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->int_rate_limit = 0; @@ -7817,6 +7811,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) static int i40e_init_msix(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; + int cpus, extra_vectors; int vectors_left; int v_budget, i; int v_actual; @@ -7835,9 +7830,6 @@ static int i40e_init_msix(struct i40e_pf *pf) * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs * - The CPU count within the NUMA node if iWARP is enabled -#ifdef I40E_FCOE - * - The number of FCOE qps. -#endif * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing @@ -7852,10 +7844,16 @@ static int i40e_init_msix(struct i40e_pf *pf) vectors_left--; } - /* reserve vectors for the main PF traffic queues */ - pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + /* reserve some vectors for the main PF traffic queues. Initially we + * only reserve at most 50% of the available vectors, in the case that + * the number of online CPUs is large. This ensures that we can enable + * extra features as well. Once we've enabled the other features, we + * will use any remaining vectors to reach as close as we can to the + * number of online CPUs. + */ + cpus = num_online_cpus(); + pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); vectors_left -= pf->num_lan_msix; - v_budget += pf->num_lan_msix; /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -7868,20 +7866,6 @@ static int i40e_init_msix(struct i40e_pf *pf) } } -#ifdef I40E_FCOE - /* can we reserve enough for FCoE? */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - if (!vectors_left) - pf->num_fcoe_msix = 0; - else if (vectors_left >= pf->num_fcoe_qps) - pf->num_fcoe_msix = pf->num_fcoe_qps; - else - pf->num_fcoe_msix = 1; - v_budget += pf->num_fcoe_msix; - vectors_left -= pf->num_fcoe_msix; - } - -#endif /* can we reserve enough for iWARP? */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { iwarp_requested = pf->num_iwarp_msix; @@ -7918,6 +7902,23 @@ static int i40e_init_msix(struct i40e_pf *pf) } } + /* On systems with a large number of SMP cores, we previously limited + * the number of vectors for num_lan_msix to be at most 50% of the + * available vectors, to allow for other features. Now, we add back + * the remaining vectors. However, we ensure that the total + * num_lan_msix will not exceed num_online_cpus(). To do this, we + * calculate the number of vectors we can add without going over the + * cap of CPUs. For systems with a small number of CPUs this will be + * zero. + */ + extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); + pf->num_lan_msix += extra_vectors; + vectors_left -= extra_vectors; + + WARN(vectors_left < 0, + "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); + + v_budget += pf->num_lan_msix; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!pf->msix_entries) @@ -7958,10 +7959,6 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; pf->num_vmdq_qps = 1; -#ifdef I40E_FCOE - pf->num_fcoe_qps = 0; - pf->num_fcoe_msix = 0; -#endif /* partition out the remaining vectors */ switch (vec) { @@ -7975,13 +7972,6 @@ static int i40e_init_msix(struct i40e_pf *pf) } else { pf->num_lan_msix = 2; } -#ifdef I40E_FCOE - /* give one vector to FCoE */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_lan_msix = 1; - pf->num_fcoe_msix = 1; - } -#endif break; default: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { @@ -8001,13 +7991,6 @@ static int i40e_init_msix(struct i40e_pf *pf) (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), pf->num_lan_msix); pf->num_lan_qps = pf->num_lan_msix; -#ifdef I40E_FCOE - /* give one vector to FCoE */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_fcoe_msix = 1; - vec--; - } -#endif break; } } @@ -8028,13 +8011,6 @@ static int i40e_init_msix(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } -#ifdef I40E_FCOE - - if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { - dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_FCOE_ENABLED; - } -#endif i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", pf->num_lan_msix, @@ -8133,9 +8109,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | @@ -8196,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) /* Only request the irq if this is the first time through, and * not when we're rebuilding after a Reset */ - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { @@ -8368,13 +8341,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), - seed_dw[i]); + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, - I40E_VFQF_HKEY1(i, vf_id), - seed_dw[i]); + wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); } @@ -8392,9 +8362,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, - I40E_VFQF_HLUT1(i, vf_id), - lut_dw[i]); + wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } @@ -8522,9 +8490,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ - if (!vsi->rss_size) - vsi->rss_size = min_t(int, pf->alloc_rss_size, - vsi->num_queue_pairs); + if (!vsi->rss_size) { + u16 qcount; + + qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; + vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); + } if (!vsi->rss_size) return -EINVAL; @@ -8558,6 +8529,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) * * returns 0 if rss is not enabled, if enabled returns the final rss queue * count which may be different from the requested queue count. + * Note: expects to be called while under rtnl_lock() **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { @@ -8570,12 +8542,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { + u16 qcount; + vsi->req_queue_pairs = queue_count; - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); pf->alloc_rss_size = new_rss_size; - i40e_reset_and_rebuild(pf, true); + i40e_reset_and_rebuild(pf, true, true); /* Discard the user configured hash keys and lut, if less * queues are enabled. @@ -8587,8 +8561,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /* Reset vsi->rss_size, as number of enabled queues changed */ - vsi->rss_size = min_t(int, pf->alloc_rss_size, - vsi->num_queue_pairs); + qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; + vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); i40e_pf_config_rss(pf); } @@ -8821,10 +8795,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->num_iwarp_msix = (int)num_online_cpus() + 1; } -#ifdef I40E_FCOE - i40e_init_pf_fcoe(pf); - -#endif /* I40E_FCOE */ #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; @@ -8851,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } else { - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } pf->eeprom_version = 0xDEAD; @@ -8913,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_SB_AUTO_DISABLED); /* reset fd counters */ - pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; - pf->fdir_pf_active_filters = 0; + pf->fd_add_err = 0; + pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } } @@ -8955,6 +8925,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() **/ static int i40e_set_features(struct net_device *netdev, netdev_features_t features) @@ -8978,7 +8949,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); return 0; } @@ -8990,12 +8961,12 @@ static int i40e_set_features(struct net_device *netdev, * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ -static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) +static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->udp_ports[i].index == port) + if (pf->udp_ports[i].port == port) return i; } @@ -9013,7 +8984,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - __be16 port = ti->port; + u16 port = ntohs(ti->port); u8 next_idx; u8 idx; @@ -9021,8 +8992,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "port %d already offloaded\n", - ntohs(port)); + netdev_info(netdev, "port %d already offloaded\n", port); return; } @@ -9031,7 +9001,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", - ntohs(port)); + port); return; } @@ -9049,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].port = port; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } @@ -9065,7 +9035,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - __be16 port = ti->port; + u16 port = ntohs(ti->port); u8 idx; idx = i40e_get_udp_port_idx(pf, port); @@ -9090,14 +9060,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->udp_ports[idx].index = 0; + pf->udp_ports[idx].port = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; return; not_found: netdev_warn(netdev, "UDP port %d was not found, not deleting\n", - ntohs(port)); + port); } static int i40e_get_phys_port_id(struct net_device *netdev, @@ -9174,6 +9144,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * is to change the mode then that requires a PF reset to * allow rebuild of the components with required hardware * bridge mode enabled. + * + * Note: expects to be called while under rtnl_lock() **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, @@ -9229,7 +9201,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), + true); break; } } @@ -9352,10 +9325,6 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, -#ifdef I40E_FCOE - .ndo_fcoe_enable = i40e_fcoe_enable, - .ndo_fcoe_disable = i40e_fcoe_disable, -#endif .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, @@ -9388,6 +9357,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) u8 broadcast[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; int etherdev_size; + netdev_features_t hw_enc_features; + netdev_features_t hw_features; etherdev_size = sizeof(struct i40e_netdev_priv); netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); @@ -9398,52 +9369,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA | - NETIF_F_SOFT_FEATURES | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL | - NETIF_F_SCTP_CRC | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - 0; + hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= hw_enc_features; + /* record features VLANs can make use of */ - netdev->vlan_features |= netdev->hw_enc_features | - NETIF_F_TSO_MANGLEID; + netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->hw_features |= NETIF_F_NTUPLE; + hw_features = hw_enc_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; - netdev->hw_features |= netdev->hw_enc_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= hw_features; - netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); - /* The following steps are necessary to prevent reception - * of tagged packets - some older NVM configurations load a - * default a MAC-VLAN filter that accepts any tagged packet - * which must be replaced by a normal filter. + /* The following steps are necessary for two reasons. First, + * some older NVM configurations load a default MAC-VLAN + * filter that will accept any tagged packet, and we want to + * replace this with a normal filter. Additionally, it is + * possible our MAC address was provided by the platform using + * Open Firmware or similar. + * + * Thus, we need to remove the default filter and install one + * specific to the MAC address. */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -9489,9 +9465,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); -#ifdef I40E_FCOE - i40e_fcoe_config_netdev(netdev, vsi); -#endif /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; @@ -9715,16 +9688,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - ret = i40e_fcoe_vsi_init(vsi, &ctxt); - if (ret) { - dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); - return ret; - } - break; - -#endif /* I40E_FCOE */ case I40E_VSI_IWARP: /* send down message to iWARP */ break; @@ -9751,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } vsi->active_filters = 0; - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { @@ -9804,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, &pf->state)) { + !test_bit(__I40E_VSI_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } @@ -10141,7 +10104,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } } case I40E_VSI_VMDQ2: - case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; @@ -10789,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_ptp_init(pf); + /* repopulate tunnel port filters */ + i40e_sync_udp_filters(pf); + return ret; } @@ -10801,9 +10766,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) int queues_left; pf->num_lan_qps = 0; -#ifdef I40E_FCOE - pf->num_fcoe_qps = 0; -#endif /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the @@ -10820,9 +10782,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | @@ -10839,9 +10798,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | @@ -10862,22 +10818,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; } -#ifdef I40E_FCOE - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - if (I40E_DEFAULT_FCOE <= queues_left) { - pf->num_fcoe_qps = I40E_DEFAULT_FCOE; - } else if (I40E_MINIMUM_FCOE <= queues_left) { - pf->num_fcoe_qps = I40E_MINIMUM_FCOE; - } else { - pf->num_fcoe_qps = 0; - pf->flags &= ~I40E_FLAG_FCOE_ENABLED; - dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); - } - - queues_left -= pf->num_fcoe_qps; - } - -#endif if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ @@ -10909,9 +10849,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); -#ifdef I40E_FCOE - dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); -#endif } /** @@ -10978,10 +10915,6 @@ static void i40e_print_features(struct i40e_pf *pf) i += snprintf(&buf[i], REMAIN(i), " Geneve"); if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); -#ifdef I40E_FCOE - if (pf->flags & I40E_FLAG_FCOE_ENABLED) - i += snprintf(&buf[i], REMAIN(i), " FCOE"); -#endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " VEB"); else @@ -10994,20 +10927,18 @@ static void i40e_print_features(struct i40e_pf *pf) /** * i40e_get_platform_mac_addr - get platform-specific MAC address - * * @pdev: PCI device information struct * @pf: board private structure * - * Look up the MAC address in Open Firmware on systems that support it, - * and use IDPROM on SPARC if no OF address is found. On return, the - * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value - * has been selected. + * Look up the MAC address for the device. First we'll try + * eth_platform_get_mac_address, which will check Open Firmware, or arch + * specific fallback. Otherwise, we'll default to the stored value in + * firmware. **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { - pf->flags &= ~I40E_FLAG_PF_MAC; - if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) - pf->flags |= I40E_FLAG_PF_MAC; + if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) + i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } /** @@ -11072,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } pf->next_vsi = 0; pf->pdev = pdev; - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); hw = &pf->hw; hw->back = pf; @@ -11098,6 +11029,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; + INIT_LIST_HEAD(&pf->l3_flex_pit_list); + INIT_LIST_HEAD(&pf->l4_flex_pit_list); + /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ @@ -11196,8 +11130,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, - pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + hw->func_caps.num_rx_qp, 0, 0); if (err) { dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); goto err_init_lan_hmc; @@ -11219,9 +11152,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_aq_stop_lldp(hw, true, NULL); } - i40e_get_mac_addr(hw, hw->mac.addr); /* allow a platform config to override the HW addr */ i40e_get_platform_mac_addr(pdev, pf); + if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -11232,18 +11165,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) pf->flags |= I40E_FLAG_PORT_ID_VALID; -#ifdef I40E_FCOE - err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); - if (err) - dev_info(&pdev->dev, - "(non-fatal) SAN MAC retrieval failed: %d\n", err); - if (!is_valid_ether_addr(hw->mac.san_addr)) { - dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", - hw->mac.san_addr); - ether_addr_copy(hw->mac.san_addr, hw->mac.addr); - } - dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); -#endif /* I40E_FCOE */ pci_set_drvdata(pdev, pf); pci_save_state(pdev); @@ -11261,8 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); - pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); @@ -11300,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } @@ -11373,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ - clear_bit(__I40E_DOWN, &pf->state); + clear_bit(__I40E_VSI_DOWN, pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI @@ -11393,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; @@ -11434,16 +11354,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ - err = i40e_lan_add_device(pf); - if (err) - dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", - err); - -#ifdef I40E_FCOE - /* create FCoE interface */ - i40e_fcoe_vsi_setup(pf); + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + err = i40e_lan_add_device(pf); + if (err) + dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", + err); + } -#endif #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information @@ -11531,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Unwind what we've done if something failed in the setup */ err_vsis: - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: @@ -11582,13 +11499,18 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->service_timer.data) del_timer_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; @@ -11615,10 +11537,11 @@ static void i40e_remove(struct pci_dev *pdev) i40e_vsi_release(pf->vsi[pf->lan_vsi]); /* remove attached clients */ - ret_code = i40e_lan_del_device(pf); - if (ret_code) { - dev_warn(&pdev->dev, "Failed to delete client device: %d\n", - ret_code); + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + ret_code = i40e_lan_del_device(pf); + if (ret_code) + dev_warn(&pdev->dev, "Failed to delete client device: %d\n", + ret_code); } /* shutdown and destroy the HMC */ @@ -11685,9 +11608,9 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, } /* shutdown all operations */ - if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + if (!test_bit(__I40E_SUSPENDED, pf->state)) { rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); } @@ -11752,11 +11675,11 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); - if (test_bit(__I40E_SUSPENDED, &pf->state)) + if (test_bit(__I40E_SUSPENDED, pf->state)) return; rtnl_lock(); - i40e_handle_reset_warning(pf); + i40e_handle_reset_warning(pf, true); rtnl_unlock(); } @@ -11816,10 +11739,10 @@ static void i40e_shutdown(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); @@ -11829,11 +11752,16 @@ static void i40e_shutdown(struct pci_dev *pdev) cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, @@ -11860,14 +11788,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) struct i40e_hw *hw = &pf->hw; int retval = 0; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); @@ -11912,10 +11840,10 @@ static int i40e_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); /* handling the reset will rebuild the device state */ - if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { - clear_bit(__I40E_DOWN, &pf->state); + if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { + clear_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); - i40e_reset_and_rebuild(pf, false); + i40e_reset_and_rebuild(pf, false, true); rtnl_unlock(); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 38ee18f1112444df1ad753f845d1dd1b1fd6ddcc..800bd55d0159c083c3d2267c5eda0289b1d6435c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { enum i40e_status_code ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_read_nvm_word_aq(hw, offset, data); - i40e_release_nvm(hw); + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); } - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + i40e_release_nvm(hw); } return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index fea81ed065db8a57a26d89e31b87758b5c069d20..80e66da6b145e07ac4faaf761b9788d67118fc40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -78,7 +78,4 @@ do { \ } while (0) typedef enum i40e_status_code i40e_status; -#ifdef CONFIG_I40E_FCOE -#define I40E_FCOE -#endif #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 2551fc8274441f81196c64d34c6eef7d9dbecad1..c56d976cf85aa8a73606fe6b45febbb758461bea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); i40e_status i40e_validate_mac_addr(u8 *mac_addr); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); -#ifdef I40E_FCOE -i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -#endif /* prototype for functions used for NVM access */ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, @@ -380,4 +377,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); +i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details); +struct i40e_generic_seg_header * +i40e_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_header); +enum i40e_status_code +i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code +i40e_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 2caee35528fad6151f2c6f1a4eeac4d336596a8f..18c1cc08da97da972daf97a8df01e69f8039d31c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -358,7 +358,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; - clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } /** @@ -768,7 +768,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) if (pf->ptp_tx_skb) { dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; - clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); } if (pf->ptp_clock) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..d3e55f54a05e2007399f457e01c7424b5196c9ed --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -0,0 +1,229 @@ +/******************************************************************************* + * + * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver + * Copyright(c) 2013 - 2017 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* Modeled on trace-events-sample.h */ + +/* The trace subsystem name for i40e will be "i40e". + * + * This file is named i40e_trace.h. + * + * Since this include file's name is different from the trace + * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end + * of this file. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i40e + +/* See trace-events-sample.h for a detailed description of why this + * guard clause is different from most normal include files. + */ +#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _I40E_TRACE_H_ + +#include + +/** + * i40e_trace() macro enables shared code to refer to trace points + * like: + * + * trace_i40e{,vf}_example(args...) + * + * ... as: + * + * i40e_trace(example, args...) + * + * ... to resolve to the PF or VF version of the tracepoint without + * ifdefs, and to allow tracepoints to be disabled entirely at build + * time. + * + * Trace point should always be referred to in the driver via this + * macro. + * + * Similarly, i40e_trace_enabled(trace_name) wraps references to + * trace_i40e{,vf}__enabled() functions. + */ +#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40e ## _ ## trace_name) +#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) + +#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) + +#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() + +/* Events common to PF and VF. Corresponding versions will be defined + * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() + * macro above will select the right trace point name for the driver + * being built from shared code. + */ + +/* Events related to a vsi & ring */ +DECLARE_EVENT_CLASS( + i40e_tx_template, + + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf), + + /* The convention here is to make the first fields in the + * TP_STRUCT match the TP_PROTO exactly. This enables the use + * of the args struct generated by the tplist tool (from the + * bcc-tools package) to be used for those fields. To access + * fields other than the tracepoint args will require the + * tplist output to be adjusted. + */ + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, buf) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->buf = buf; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p buf %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->buf) +); + +DEFINE_EVENT( + i40e_tx_template, i40e_clean_tx_irq, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DEFINE_EVENT( + i40e_tx_template, i40e_clean_tx_irq_unmap, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DECLARE_EVENT_CLASS( + i40e_rx_template, + + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb), + + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, skb) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->skb = skb; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p skb %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->skb) +); + +DEFINE_EVENT( + i40e_rx_template, i40e_clean_rx_irq, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DEFINE_EVENT( + i40e_rx_template, i40e_clean_rx_irq_rx, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DECLARE_EVENT_CLASS( + i40e_xmit_template, + + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring), + + TP_STRUCT__entry( + __field(void*, skb) + __field(void*, ring) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->skb = skb; + __entry->ring = ring; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s skb: %p ring: %p", + __get_str(devname), __entry->skb, + __entry->ring) +); + +DEFINE_EVENT( + i40e_xmit_template, i40e_xmit_frame_ring, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +DEFINE_EVENT( + i40e_xmit_template, i40e_xmit_frame_ring_drop, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +/* Events unique to the PF. */ + +#endif /* _I40E_TRACE_H_ */ +/* This must be outside ifdef _I40E_TRACE_H */ + +/* This trace include file is not located in the .../include/trace + * with the kernel tracepoint definitions, because we're a loadable + * module. + */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE i40e_trace +#include diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 97d46058d71d3c118dfa8f5408b888a63fd1a933..29321a6167a6675757e74ab4e3cd1a100cb423b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -27,6 +27,7 @@ #include #include #include "i40e.h" +#include "i40e_trace.h" #include "i40e_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, @@ -71,6 +72,9 @@ static void i40e_fdir(struct i40e_ring *tx_ring, flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); + flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & + (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); + /* Use LAN VSI Id if not programmed by user */ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << @@ -203,7 +207,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; struct udphdr *udp; struct iphdr *ip; - bool err = false; u8 *raw_packet; int ret; static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, @@ -219,18 +222,28 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET + sizeof(struct iphdr)); - ip->daddr = fd_data->dst_ip[0]; + ip->daddr = fd_data->dst_ip; udp->dest = fd_data->dst_port; - ip->saddr = fd_data->src_ip[0]; + ip->saddr = fd_data->src_ip; udp->source = fd_data->src_port; + if (fd_data->flex_filter) { + u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN; + __be16 pattern = fd_data->flex_word; + u16 off = fd_data->flex_offset; + + *((__force __be16 *)(payload + off)) = pattern; + } + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); if (ret) { dev_info(&pf->pdev->dev, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); - err = true; + /* Free the packet buffer since it wasn't added to the ring */ + kfree(raw_packet); + return -EOPNOTSUPP; } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, @@ -241,10 +254,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } - if (err) - kfree(raw_packet); - return err ? -EOPNOTSUPP : 0; + if (add) + pf->fd_udp4_filter_cnt++; + else + pf->fd_udp4_filter_cnt--; + + return 0; } #define I40E_TCPIP_DUMMY_PACKET_LEN 54 @@ -263,7 +279,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; struct tcphdr *tcp; struct iphdr *ip; - bool err = false; u8 *raw_packet; int ret; /* Dummy packet */ @@ -281,39 +296,110 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET + sizeof(struct iphdr)); - ip->daddr = fd_data->dst_ip[0]; + ip->daddr = fd_data->dst_ip; tcp->dest = fd_data->dst_port; - ip->saddr = fd_data->src_ip[0]; + ip->saddr = fd_data->src_ip; tcp->source = fd_data->src_port; + if (fd_data->flex_filter) { + u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN; + __be16 pattern = fd_data->flex_word; + u16 off = fd_data->flex_offset; + + *((__force __be16 *)(payload + off)) = pattern; + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + /* Free the packet buffer since it wasn't added to the ring */ + kfree(raw_packet); + return -EOPNOTSUPP; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + if (add) { - pf->fd_tcp_rule++; + pf->fd_tcp4_filter_cnt++; if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; } else { - pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? - (pf->fd_tcp_rule - 1) : 0; - if (pf->fd_tcp_rule == 0) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - } + pf->fd_tcp4_filter_cnt--; } - fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + return 0; +} + +#define I40E_SCTPIP_DUMMY_PACKET_LEN 46 +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct sctphdr *sctp; + struct iphdr *ip; + u8 *raw_packet; + int ret; + /* Dummy packet */ + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + ip->daddr = fd_data->dst_ip; + sctp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip; + sctp->source = fd_data->src_port; + + if (fd_data->flex_filter) { + u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN; + __be16 pattern = fd_data->flex_word; + u16 off = fd_data->flex_offset; + + *((__force __be16 *)(payload + off)) = pattern; + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); if (ret) { dev_info(&pf->pdev->dev, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); - err = true; + /* Free the packet buffer since it wasn't added to the ring */ + kfree(raw_packet); + return -EOPNOTSUPP; } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); else dev_info(&pf->pdev->dev, @@ -321,10 +407,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, fd_data->pctype, fd_data->fd_id); } - if (err) - kfree(raw_packet); + if (add) + pf->fd_sctp4_filter_cnt++; + else + pf->fd_sctp4_filter_cnt--; - return err ? -EOPNOTSUPP : 0; + return 0; } #define I40E_IP_DUMMY_PACKET_LEN 34 @@ -343,7 +431,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, { struct i40e_pf *pf = vsi->back; struct iphdr *ip; - bool err = false; u8 *raw_packet; int ret; int i; @@ -359,18 +446,29 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); - ip->saddr = fd_data->src_ip[0]; - ip->daddr = fd_data->dst_ip[0]; + ip->saddr = fd_data->src_ip; + ip->daddr = fd_data->dst_ip; ip->protocol = 0; + if (fd_data->flex_filter) { + u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN; + __be16 pattern = fd_data->flex_word; + u16 off = fd_data->flex_offset; + + *((__force __be16 *)(payload + off)) = pattern; + } + fd_data->pctype = i; ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); - if (ret) { dev_info(&pf->pdev->dev, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); - err = true; + /* The packet buffer wasn't added to the ring so we + * need to free it now. + */ + kfree(raw_packet); + return -EOPNOTSUPP; } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, @@ -383,10 +481,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, } } - if (err) - kfree(raw_packet); + if (add) + pf->fd_ip4_filter_cnt++; + else + pf->fd_ip4_filter_cnt--; - return err ? -EOPNOTSUPP : 0; + return 0; } /** @@ -409,6 +509,9 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, case UDP_V4_FLOW: ret = i40e_add_del_fdir_udpv4(vsi, input, add); break; + case SCTP_V4_FLOW: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; case IP_USER_FLOW: switch (input->ip4_proto) { case IPPROTO_TCP: @@ -417,19 +520,23 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, case IPPROTO_UDP: ret = i40e_add_del_fdir_udpv4(vsi, input, add); break; + case IPPROTO_SCTP: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; case IPPROTO_IP: ret = i40e_add_del_fdir_ipv4(vsi, input, add); break; default: /* We cannot support masking based on protocol */ - goto unsupported_flow; + dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", + input->ip4_proto); + return -EINVAL; } break; default: -unsupported_flow: - dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", + dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", input->flow_type); - ret = -EINVAL; + return -EINVAL; } /* The buffer allocated here will be normally be freed by @@ -476,7 +583,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, * progress do nothing, once flush is complete the state will * be cleared. */ - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; pf->fd_add_err++; @@ -484,9 +591,9 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && - (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } /* filter programming failed most likely due to table full */ @@ -498,12 +605,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->auto_disable_flags & - I40E_FLAG_FD_SB_ENABLED)) { + !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) { + pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); - pf->auto_disable_flags |= - I40E_FLAG_FD_SB_ENABLED; } } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { @@ -599,19 +704,15 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors - * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) +u32 i40e_get_tx_pending(struct i40e_ring *ring) { u32 head, tail; - if (!in_sw) - head = i40e_get_head(ring); - else - head = ring->next_to_clean; + head = i40e_get_head(ring); tail = readl(ring->tail); if (head != tail) @@ -657,6 +758,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* prevent any other reads prior to eop_desc */ read_barrier_depends(); + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ if (tx_head == tx_desc) break; @@ -683,6 +785,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* unmap remaining buffers */ while (tx_desc != eop_desc) { + i40e_trace(clean_tx_irq_unmap, + tx_ring, tx_desc, tx_buf); tx_buf++; tx_desc++; @@ -734,11 +838,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - unsigned int j = i40e_get_tx_pending(tx_ring, false); + unsigned int j = i40e_get_tx_pending(tx_ring); if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &vsi->state) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -756,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &vsi->state)) { + !test_bit(__I40E_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -929,10 +1033,30 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) return false; } +/** + * i40e_rx_is_programming_status - check for programming status descriptor + * @qw: qword representing status_error_len in CPU ordering + * + * The value of in the descriptor length field indicate if this + * is a programming status descriptor for flow director or FCoE + * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise + * it is a packet descriptor. + **/ +static inline bool i40e_rx_is_programming_status(u64 qw) +{ + /* The Rx filter programming status and SPH bit occupy the same + * spot in the descriptor. Since we don't support packet split we + * can just reuse the bit as an indication that this is a + * programming status descriptor. + */ + return qw & I40E_RXD_QW1_LENGTH_SPH_MASK; +} + /** * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW + * @qw: qword representing status_error_len in CPU ordering * * Flow director should handle FD_FILTER_STATUS to check its filter programming * status being successful or not and take actions accordingly. FCoE should @@ -940,22 +1064,23 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * **/ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) + union i40e_rx_desc *rx_desc, + u64 qw) { - u64 qw; + u32 ntc = rx_ring->next_to_clean + 1; u8 id; - qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, rx_desc, id); -#ifdef I40E_FCOE - else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || - (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS)) - i40e_fcoe_handle_status(rx_ring, rx_desc, id); -#endif } /** @@ -1010,7 +1135,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) **/ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { - struct device *dev = rx_ring->dev; unsigned long bi_size; u16 i; @@ -1030,8 +1154,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_bi->page) continue; - dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(rx_bi->page, 0); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); rx_bi->page = NULL; rx_bi->page_offset = 0; @@ -1131,6 +1269,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) writel(val, rx_ring->tail); } +/** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + /** * i40e_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use @@ -1152,27 +1301,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* alloc new page for storage */ - page = dev_alloc_page(); + page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, 0); + __free_pages(page, i40e_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = i40e_rx_offset(rx_ring); + + /* initialize pagecnt_bias to 1 representing we fully own page */ + bi->pagecnt_bias = 1; return true; } @@ -1219,6 +1374,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -1259,8 +1420,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * - * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, @@ -1422,12 +1581,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } /** @@ -1472,7 +1631,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } /** @@ -1493,8 +1655,6 @@ static inline bool i40e_page_is_reusable(struct page *page) * the adapter for another receive * * @rx_buffer: buffer containing the page - * @page: page address from rx_buffer - * @truesize: actual size of the buffer in this page * * If page is reusable, rx_buffer->page_offset is adjusted to point to * an unused region in the page. @@ -1517,13 +1677,10 @@ static inline bool i40e_page_is_reusable(struct page *page) * * In either case, if the page is reusable its refcount is increased. **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, - struct page *page, - const unsigned int truesize) +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) { -#if (PAGE_SIZE >= 8192) - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; -#endif + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* Is any reuse possible? */ if (unlikely(!i40e_page_is_reusable(page))) @@ -1531,21 +1688,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) +#define I40E_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) + if (rx_buffer->page_offset > I40E_LAST_OFFSET) return false; #endif - /* Inc ref count on page before passing it up to the stack */ - get_page(page); + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } return true; } @@ -1554,145 +1713,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @size: packet length from rx_desc * @skb: sk_buff to place the data into + * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. + * It will just attach the page as a frag to the skb. * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + * The function will then update the page offset. **/ -static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, +static void i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - unsigned int size, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = I40E_RXBUFFER_2048; + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); #endif - unsigned int pull_len; - - if (unlikely(skb_is_nonlinear(skb))) - goto add_tail_frag; - - /* will the data fit in the skb we allocated? if so, just - * copy it as it is pretty small anyway - */ - if (size <= I40E_RX_HDR_SIZE) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* page is reusable, we can reuse buffer as-is */ - if (likely(i40e_page_is_reusable(page))) - return true; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); - /* this page cannot be reused so discard it */ - __free_pages(page, 0); - return false; - } + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} - /* we need the header to contain the greater of either - * ETH_HLEN or 60 bytes if the skb->len is less than - * 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); +/** + * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * @rx_ring: rx descriptor ring to transact packets on + * @size: size of buffer to add to skb + * + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. + */ +static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *rx_buffer; - /* align pull length to size of long to optimize - * memcpy performance - */ - memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); - /* update all of the pointers */ - va += pull_len; - size -= pull_len; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); -add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (unsigned long)va & ~PAGE_MASK, size, truesize); + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; - return i40e_can_reuse_rx_page(rx_buffer, page, truesize); + return rx_buffer; } /** - * i40e_fetch_rx_buffer - Allocate skb and populate it + * i40e_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on - * @rx_desc: descriptor containing info written by hardware + * @rx_buffer: rx buffer to pull data from + * @size: size of buffer to add to skb * - * This function allocates an skb on the fly, and populates it with the page - * data from the current receive descriptor, taking care to set up the skb - * correctly, as well as handling calling the page recycle function if - * necessary. + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. */ -static inline -struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb) +static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) { - u64 local_status_error_len = - le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = - (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - struct i40e_rx_buffer *rx_buffer; - struct page *page; + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > I40E_RX_HDR_SIZE) + headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset + headlen, + size, truesize); - if (likely(!skb)) { - void *page_addr = page_address(page) + rx_buffer->page_offset; + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } - /* prefetch first cache line of first page */ - prefetch(page_addr); + return skb; +} + +/** + * i40e_build_skb - Build skb around an existing buffer + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buffer: Rx buffer to pull data from + * @size: size of buffer to add to skb + * + * This function builds an skb around an existing Rx buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. + */ +static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch(va + L1_CACHE_BYTES); #endif + /* build an skb around the page buffer */ + skb = build_skb(va - I40E_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_buff_failed++; - return NULL; - } + /* update pointers within the skb to store the data */ + skb_reserve(skb, I40E_SKB_PAD); + __skb_put(skb, size); - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - } + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); + return skb; +} - /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { +/** + * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * + * This function will clean up the contents of the rx_buffer. It will + * either recycle the bufer or unmap it and free the associated resources. + */ +static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer) +{ + if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); } /* clear contents of buffer_info */ rx_buffer->page = NULL; - - return skb; } /** @@ -1718,11 +1933,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, prefetch(I40E_RX_DESC(rx_ring, ntc)); -#define staterrlen rx_desc->wb.qword1.status_error_len - if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) { - i40e_clean_programming_status(rx_ring, rx_desc); - return true; - } /* if we are the last buffer then there is nothing else to do */ #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) @@ -1753,7 +1963,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) bool failure = false; while (likely(total_rx_packets < budget)) { + struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; + unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1770,22 +1982,44 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + * hardware wrote DD then the length will be non-zero */ - if (!i40e_test_staterr(rx_desc, - BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. */ dma_rmb(); - skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb); - if (!skb) + if (unlikely(i40e_rx_is_programming_status(qword))) { + i40e_clean_programming_status(rx_ring, rx_desc, qword); + continue; + } + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) break; + i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = i40e_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = i40e_build_skb(rx_ring, rx_buffer, size); + else + skb = i40e_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + i40e_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; if (i40e_is_non_eop(rx_ring, rx_desc, skb)) @@ -1798,6 +2032,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); + skb = NULL; continue; } @@ -1816,18 +2051,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); -#ifdef I40E_FCOE - if (unlikely( - i40e_rx_is_fcoe(rx_ptype) && - !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { - dev_kfree_skb_any(skb); - continue; - } -#endif - vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); i40e_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; @@ -1944,7 +2171,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, } enable_int: - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) wr32(hw, INTREG(vector - 1), txval); if (q_vector->itr_countdown) @@ -1973,13 +2200,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_DOWN, &vsi->state)) { + if (test_bit(__I40E_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } - /* Clear hung_detected bit */ - clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected); /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ @@ -2079,7 +2304,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) return; /* if sampling is disabled do nothing */ @@ -2113,10 +2338,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ - if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) return; - if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && - (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) { + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2178,8 +2402,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && - (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@ -2200,15 +2423,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -#ifdef I40E_FCOE -inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -#else static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) -#endif { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -2409,7 +2626,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, return 0; if (pf->ptp_tx && - !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { + !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; pf->ptp_tx_skb = skb_get(skb); } else { @@ -2716,15 +2933,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -#ifdef I40E_FCOE -inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -#else static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) -#endif { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -2925,6 +3136,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* prefetch the data, we'll need it later */ prefetch(skb->data); + i40e_trace(xmit_frame_ring, skb, tx_ring); + count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { @@ -3003,6 +3216,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: + i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index f80979025c0131a07e7b956826f17877b8b7081a..f5de51124caee489cac1db58826f8c28a7ecccb6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -117,10 +117,9 @@ enum i40e_dyn_idx_t { /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 +#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ #define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ -#define I40E_RXBUFFER_4096 4096 -#define I40E_RXBUFFER_8192 8192 +#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we @@ -133,6 +132,61 @@ enum i40e_dyn_idx_t { #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the legacy + * receive path. + */ +#if (PAGE_SIZE < 8192) +#define I40E_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) + +static inline int i40e_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int i40e_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (I40E_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = I40E_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return i40e_compute_pad(rx_buf_len); +} + +#define I40E_SKB_PAD i40e_skb_pad() +#else +#define I40E_2K_TOO_SMALL_WITH_PADDING false +#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + /** * i40e_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) @@ -255,7 +309,12 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct i40e_queue_stats { @@ -269,7 +328,6 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; - u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { @@ -335,7 +393,8 @@ struct i40e_ring { u8 packet_stride; u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) /* stats structs */ struct i40e_queue_stats stats; @@ -363,6 +422,21 @@ struct i40e_ring { */ } ____cacheline_internodealigned_in_smp; +static inline bool ring_uses_build_skb(struct i40e_ring *ring) +{ + return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); +} + +static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + +static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, @@ -384,6 +458,17 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) +static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) + bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); @@ -393,15 +478,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); -#ifdef I40E_FCOE -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset); -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, u32 *flags); -#endif void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); +u32 i40e_get_tx_pending(struct i40e_ring *ring); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); @@ -482,16 +560,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) return count != I40E_MAX_BUFFER_TXD; } -/** - * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE - * @ptype: the packet type field from Rx descriptor write-back - **/ -static inline bool i40e_rx_is_fcoe(u16 ptype) -{ - return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && - (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); -} - /** * txring_txq - Find the netdev Tx ring based on the i40e Tx ring * @ring: Tx ring to find the netdev equivalent of diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 939f9fdc8f8573fa35554d6917e1722348d2da29..3a18ed13edc4283ee3da71b47aa5f4757bc360b2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -78,6 +78,7 @@ enum i40e_debug_mask { I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, I40E_DEBUG_IWARP = 0x00F00000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, @@ -1213,25 +1214,6 @@ struct i40e_veb_tc_stats { u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; }; -#ifdef I40E_FCOE -/* Statistics collected per function for FCoE */ -struct i40e_fcoe_stats { - u64 rx_fcoe_packets; /* fcoeprc */ - u64 rx_fcoe_dwords; /* focedwrc */ - u64 rx_fcoe_dropped; /* fcoerpdc */ - u64 tx_fcoe_packets; /* fcoeptc */ - u64 tx_fcoe_dwords; /* focedwtc */ - u64 fcoe_bad_fccrc; /* fcoecrc */ - u64 fcoe_last_error; /* fcoelast */ - u64 fcoe_ddp_count; /* fcoeddpc */ -}; - -/* offset to per function FCoE statistics block */ -#define I40E_FCOE_VF_STAT_OFFSET 0 -#define I40E_FCOE_PF_STAT_OFFSET 128 -#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF) - -#endif /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ @@ -1319,125 +1301,6 @@ struct i40e_hw_port_stats { #define I40E_SRRD_SRCTL_ATTEMPTS 100000 -#ifdef I40E_FCOE -/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */ - -enum i40E_fcoe_tx_ctx_desc_cmd_bits { - I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */ - I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10, - I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20, - I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40, - I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80 -}; - -/* FCoE DDP Context descriptor */ -struct i40e_fcoe_ddp_context_desc { - __le64 rsvd; - __le64 type_cmd_foff_lsize; -}; - -#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \ - I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT) - -#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4 -#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \ - I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) - -enum i40e_fcoe_ddp_ctx_desc_cmd_bits { - I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */ - I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */ - I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */ - I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */ - I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */ - I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */ -}; - -#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16 -#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \ - I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) - -#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32 -#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \ - I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT) - -/* FCoE DDP/DWO Queue Context descriptor */ -struct i40e_fcoe_queue_context_desc { - __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */ - __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */ -}; - -#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0 -#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \ - I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT) - -#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12 -#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \ - I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT) - -#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0 -#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \ - I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) - -#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13 -#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \ - I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) - -enum i40e_fcoe_queue_ctx_desc_tph_bits { - I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1, - I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2 -}; - -#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30 -#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \ - I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT) - -/* FCoE DDP/DWO Filter Context descriptor */ -struct i40e_fcoe_filter_context_desc { - __le32 param; - __le16 seqn; - - /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */ - __le16 rsvd_dmaindx; - - /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */ - __le64 flags_rsvd_lanq; -}; - -#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4 -#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \ - I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT) - -enum i40e_fcoe_filter_ctx_desc_flags_bits { - I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00, - I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01, - I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00, - I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02, - I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00, - I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04 -}; - -#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0 -#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \ - I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT) - -#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 -#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \ - I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT) - -#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53 -#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \ - I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT) - -#endif /* I40E_FCOE */ enum i40e_switch_element_types { I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_PF = 2, @@ -1600,4 +1463,83 @@ struct i40e_lldp_variables { #define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) #define I40E_FLEX_57_SHIFT 6 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) + +/* Version format for PPP */ +struct i40e_ppp_version { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define I40E_PPP_NAME_SIZE 32 + +/* Package header */ +struct i40e_package_header { + struct i40e_ppp_version version; + u32 segment_count; + u32 segment_offset[1]; +}; + +/* Generic segment header */ +struct i40e_generic_seg_header { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_NOTES 0x00000002 +#define SEGMENT_TYPE_I40E 0x00000011 +#define SEGMENT_TYPE_X722 0x00000012 + u32 type; + struct i40e_ppp_version version; + u32 size; + char name[I40E_PPP_NAME_SIZE]; +}; + +struct i40e_metadata_segment { + struct i40e_generic_seg_header header; + struct i40e_ppp_version version; + u32 track_id; + char name[I40E_PPP_NAME_SIZE]; +}; + +struct i40e_device_id_entry { + u32 vendor_dev_id; + u32 sub_vendor_dev_id; +}; + +struct i40e_profile_segment { + struct i40e_generic_seg_header header; + struct i40e_ppp_version version; + char name[I40E_PPP_NAME_SIZE]; + u32 device_table_count; + struct i40e_device_id_entry device_table[1]; +}; + +struct i40e_section_table { + u32 section_count; + u32 section_offset[1]; +}; + +struct i40e_profile_section_header { + u16 tbl_size; + u16 data_end; + struct { +#define SECTION_TYPE_INFO 0x00000010 +#define SECTION_TYPE_MMIO 0x00000800 +#define SECTION_TYPE_AQ 0x00000801 +#define SECTION_TYPE_NOTE 0x80000000 +#define SECTION_TYPE_NAME 0x80000001 + u32 type; + u32 offset; + u32 size; + } section; +}; + +struct i40e_profile_info { + u32 track_id; + struct i40e_ppp_version version; + u8 op; +#define I40E_PPP_ADD_TRACKID 0x01 +#define I40E_PPP_REMOVE_TRACKID 0x02 + u8 reserved[7]; + u8 name[I40E_PPP_NAME_SIZE]; +}; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 974ba2baf6ea006d2f3dae4e7aa841e7bffb96d3..8552192a5bde7b804bb7f38c9142ac36f9031926 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -163,7 +163,8 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78460c52b7c445112cd777385f0e81058f26e918..95c23fbaa2113214d9cd34ddfa176a7d1cd8d89e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -50,8 +50,8 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; /* Not all vfs are enabled so skip the ones that are not */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) continue; /* Ignore return value on purpose - a given VF may fail, but @@ -137,8 +137,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) return; /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && - !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) return; abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; @@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) dev_info(&pf->pdev->dev, "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_hash_lock); - i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), - (u32)hena); - i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), - (u32)(hena >> 32)); + wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); + wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); } /* program mac filter */ @@ -811,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf) u32 reg_idx, reg; int i, msix_vf; + /* Start by disabling VF's configuration API to prevent the OS from + * accessing the VF's VSI after it's freed / invalidated. + */ + clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); + /* free vsi & disconnect it from the parent uplink */ if (vf->lan_vsi_idx) { i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); @@ -850,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf) /* reset some of the state variables keeping track of the resources */ vf->num_queue_pairs = 0; vf->vf_states = 0; - clear_bit(I40E_VF_STAT_INIT, &vf->vf_states); } /** @@ -882,7 +884,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) vf->num_queue_pairs = total_queue_pairs; /* VF is now completely initialized */ - set_bit(I40E_VF_STAT_INIT, &vf->vf_states); + set_bit(I40E_VF_STATE_INIT, &vf->vf_states); error_alloc: if (ret) @@ -921,25 +923,30 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) } /** - * i40e_reset_vf + * i40e_trigger_vf_reset * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * reset the VF + * Trigger hardware to start a reset for a particular VF. Expects the caller + * to wait the proper amount of time to allow hardware to reset the VF before + * it cleans up and restores VF functionality. **/ -void i40e_reset_vf(struct i40e_vf *vf, bool flr) +static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, reg_idx, bit_idx; - bool rsd = false; - int i; - - if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) - return; /* warn the VF */ - clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + + /* Disable VF's configuration API during reset. The flag is re-enabled + * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. + * It's normally disabled in i40e_free_vf_res(), but it's safer + * to do it earlier to give some time to finish to any VF config + * functions that may still be running at this point. + */ + clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -960,6 +967,79 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) if (i40e_quiesce_vf_pci(vf)) dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", vf->vf_id); +} + +/** + * i40e_cleanup_reset_vf + * @vf: pointer to the VF structure + * + * Cleanup a VF after the hardware reset is finished. Expects the caller to + * have verified whether the reset is finished properly, and ensure the + * minimum amount of wait time has passed. + **/ +static void i40e_cleanup_reset_vf(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg; + + /* free VF resources to begin resetting the VSI state */ + i40e_free_vf_res(vf); + + /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. + * By doing this we allow HW to access VF memory at any point. If we + * did it any sooner, HW could access memory while it was being freed + * in i40e_free_vf_res(), causing an IOMMU fault. + * + * On the other hand, this needs to be done ASAP, because the VF driver + * is waiting for this to happen and may report a timeout. It's + * harmless, but it gets logged into Guest OS kernel log, so best avoid + * it. + */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + + /* reallocate VF resources to finish resetting the VSI state */ + if (!i40e_alloc_vf_res(vf)) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); + /* Do not notify the client during VF init */ + if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) + i40e_notify_client_of_vf_reset(pf, abs_vf_id); + vf->num_vlan = 0; + } + + /* Tell the VF driver the reset is done. This needs to be done only + * after VF has been fully initialized, because the VF driver may + * request resources immediately after setting this flag. + */ + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); +} + +/** + * i40e_reset_vf + * @vf: pointer to the VF structure + * @flr: VFLR was issued or not + * + * reset the VF + **/ +void i40e_reset_vf(struct i40e_vf *vf, bool flr) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + bool rsd = false; + u32 reg; + int i; + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) + return; + + i40e_trigger_vf_reset(vf, flr); /* poll VPGEN_VFRSTAT reg to make sure * that reset is complete @@ -984,35 +1064,116 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) if (!rsd) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", vf->vf_id); - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); - /* clear the reset bit in the VPGEN_VFRTRIG reg */ - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + usleep_range(10000, 20000); - /* On initial reset, we won't have any queues */ - if (vf->lan_vsi_idx == 0) - goto complete_reset; + /* On initial reset, we don't have any queues to disable */ + if (vf->lan_vsi_idx != 0) + i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); - i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); -complete_reset: - /* reallocate VF resources to reset the VSI state */ - i40e_free_vf_res(vf); - if (!i40e_alloc_vf_res(vf)) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - i40e_enable_vf_mappings(vf); - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); - /* Do not notify the client during VF init */ - if (vf->pf->num_alloc_vfs) - i40e_notify_client_of_vf_reset(pf, abs_vf_id); - vf->num_vlan = 0; + i40e_cleanup_reset_vf(vf); + + i40e_flush(hw); + clear_bit(__I40E_VF_DISABLE, pf->state); +} + +/** + * i40e_reset_all_vfs + * @pf: pointer to the PF structure + * @flr: VFLR was issued or not + * + * Reset all allocated VFs in one go. First, tell the hardware to reset each + * VF, then do all the waiting in one chunk, and finally finish restoring each + * VF after the wait. This is useful during PF routines which need to reset + * all VFs, as otherwise it must perform these resets in a serialized fashion. + **/ +void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int i, v; + u32 reg; + + /* If we don't have any VFs, then there is nothing to reset */ + if (!pf->num_alloc_vfs) + return; + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) + return; + + /* Begin reset on all VFs at once */ + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_trigger_vf_reset(&pf->vf[v], flr); + + /* HW requires some time to make sure it can flush the FIFO for a VF + * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in + * sequence to make sure that it has completed. We'll keep track of + * the VFs using a simple iterator that increments once that VF has + * finished resetting. + */ + for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + usleep_range(10000, 20000); + + /* Check each VF in sequence, beginning with the VF to fail + * the previous check. + */ + while (v < pf->num_alloc_vfs) { + vf = &pf->vf[v]; + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) + break; + + /* If the current VF has finished resetting, move on + * to the next VF in sequence. + */ + v++; + } } - /* tell the VF the reset is done */ - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + + if (flr) + usleep_range(10000, 20000); + + /* Display a warning if at least one VF didn't manage to reset in + * time, but continue on with the operation. + */ + if (v < pf->num_alloc_vfs) + dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + pf->vf[v].vf_id); + usleep_range(10000, 20000); + + /* Begin disabling all the rings associated with VFs, but do not wait + * between each VF. + */ + for (v = 0; v < pf->num_alloc_vfs; v++) { + /* On initial reset, we don't have any queues to disable */ + if (pf->vf[v].lan_vsi_idx == 0) + continue; + + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); + } + + /* Now that we've notified HW to disable all of the VF rings, wait + * until they finish. + */ + for (v = 0; v < pf->num_alloc_vfs; v++) { + /* On initial reset, we don't have any queues to disable */ + if (pf->vf[v].lan_vsi_idx == 0) + continue; + + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); + } + + /* Hw may need up to 50ms to finish disabling the RX queues. We + * minimize the wait by delaying only once for all VFs. + */ + mdelay(50); + + /* Finish the reset on each VF */ + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_cleanup_reset_vf(&pf->vf[v]); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, &pf->state); + clear_bit(__I40E_VF_DISABLE, pf->state); } /** @@ -1029,13 +1190,25 @@ void i40e_free_vfs(struct i40e_pf *pf) if (!pf->vf) return; - while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) usleep_range(1000, 2000); i40e_notify_client_of_vf_enable(pf, 0); - for (i = 0; i < pf->num_alloc_vfs; i++) - if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) - i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + + /* Amortize wait time by stopping all VFs at the same time */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) + continue; + + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]); + } + + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) + continue; + + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]); + } /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -1046,13 +1219,11 @@ void i40e_free_vfs(struct i40e_pf *pf) else dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - msleep(20); /* let any messages in transit get finished up */ - /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) i40e_free_vf_res(&pf->vf[i]); /* disable qp mappings */ i40e_disable_vf_mappings(&pf->vf[i]); @@ -1075,7 +1246,7 @@ void i40e_free_vfs(struct i40e_pf *pf) wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } - clear_bit(__I40E_VF_DISABLE, &pf->state); + clear_bit(__I40E_VF_DISABLE, pf->state); } #ifdef CONFIG_PCI_IOV @@ -1120,12 +1291,15 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - /* VF resources get allocated during reset */ - i40e_reset_vf(&vfs[i], false); + + set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states); } pf->num_alloc_vfs = num_alloc_vfs; + /* VF resources get allocated during reset */ + i40e_reset_all_vfs(pf, false); + i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); err_alloc: @@ -1152,7 +1326,7 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; - if (test_bit(__I40E_TESTING, &pf->state)) { + if (test_bit(__I40E_TESTING, pf->state)) { dev_warn(&pdev->dev, "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); err = -EPERM; @@ -1258,7 +1432,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, "Number of invalid messages exceeded for VF %d\n", vf->vf_id); dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } else { vf->num_valid_msgs++; @@ -1333,7 +1507,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) int len = 0; int ret; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -1359,10 +1533,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; - if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) && + if (i40e_vf_client_capable(pf, vf->vf_id) && (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; - set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); + set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -1383,6 +1557,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP; + + if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, @@ -1416,7 +1597,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); } - set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); err: /* send the response back to the VF */ @@ -1439,7 +1620,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { - if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) i40e_reset_vf(vf, false); } @@ -1487,7 +1668,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, int bkt; vsi = i40e_find_vsi_from_id(pf, info->vsi_id); - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || !vsi) { aq_ret = I40E_ERR_PARAM; @@ -1548,9 +1729,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, "VF %d successfully set multicast promiscuous mode\n", vf->vf_id); if (allmulti) - set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); else - clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); } if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) @@ -1599,9 +1780,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, "VF %d successfully set unicast promiscuous mode\n", vf->vf_id); if (alluni) - set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); else - clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); } error_param: @@ -1630,7 +1811,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1687,7 +1868,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) unsigned long tempmap; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1747,7 +1928,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1786,7 +1967,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1828,7 +2009,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -1853,7 +2034,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } /* If the VF is not trusted restrict the number of MAC/VLAN it can program */ -#define I40E_VC_MAX_MAC_ADDR_PER_VF 8 +#define I40E_VC_MAX_MAC_ADDR_PER_VF 12 #define I40E_VC_MAX_VLAN_PER_VF 8 /** @@ -1915,7 +2096,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -1984,7 +2165,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2050,7 +2231,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); goto error_param; } - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2077,12 +2258,12 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!ret) vf->num_vlan++; - if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], NULL); - if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, true, vfl->vlan_id[i], @@ -2117,7 +2298,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -2140,12 +2321,12 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); vf->num_vlan--; - if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], NULL); - if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, false, vfl->vlan_id[i], @@ -2171,8 +2352,8 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2202,8 +2383,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, (struct i40e_virtchnl_iwarp_qvlist_info *)msg; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2240,7 +2421,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vrk->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2272,7 +2453,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) u16 vsi_id = vrl->vsi_id; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vsi_id) || (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { aq_ret = I40E_ERR_PARAM; @@ -2302,7 +2483,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2339,7 +2520,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2369,7 +2550,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, int valid_len = 0; /* Check if VF is disabled. */ - if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) + if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) return I40E_ERR_PARAM; /* Validate message length. */ @@ -2637,7 +2818,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) struct i40e_vf *vf; int vf_id; - if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) return 0; /* Re-enable the VFLR interrupt cause here, before looking for which @@ -2650,7 +2831,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); - clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; @@ -2693,7 +2874,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -2782,7 +2963,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -2914,7 +3095,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -2995,7 +3176,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, vf = &(pf->vf[vf_id]); /* first vsi is always the LAN vsi */ vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; @@ -3114,7 +3295,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); - if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); ret = -EAGAIN; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 4012d069939ab3211cd9f668a3de89dbe4561a07..20d7c8160e9ed9c7381ff178e13e85ecb612da9a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -56,13 +56,14 @@ enum i40e_queue_ctrl { /* VF states */ enum i40e_vf_states { - I40E_VF_STAT_INIT = 0, - I40E_VF_STAT_ACTIVE, - I40E_VF_STAT_IWARPENA, - I40E_VF_STAT_FCOEENA, - I40E_VF_STAT_DISABLED, - I40E_VF_STAT_MC_PROMISC, - I40E_VF_STAT_UC_PROMISC, + I40E_VF_STATE_INIT = 0, + I40E_VF_STATE_ACTIVE, + I40E_VF_STATE_IWARPENA, + I40E_VF_STATE_FCOEENA, + I40E_VF_STATE_DISABLED, + I40E_VF_STATE_MC_PROMISC, + I40E_VF_STATE_UC_PROMISC, + I40E_VF_STATE_PRE_ENABLE, }; /* VF capabilities */ @@ -87,7 +88,6 @@ struct i40e_vf { u16 stag; struct i40e_virtchnl_ether_addr default_lan_addr; - struct i40e_virtchnl_ether_addr default_fcoe_addr; u16 port_vlan_id; bool pf_set_mac; /* The VMM admin set the VF MAC address */ bool trusted; @@ -125,6 +125,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); +void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); /* VF configuration related iplink handlers */ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile index 3a423836a565294aa82dadbeb93e4d45619ccd86..a393f4a07f06dbf6f9ec18446f6d7f0c4c76ca23 100644 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ b/drivers/net/ethernet/intel/i40evf/Makefile @@ -29,8 +29,11 @@ # # +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + obj-$(CONFIG_I40EVF) += i40evf.o i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ - i40e_txrx.o i40e_common.o i40e_adminq.o + i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 96385156b82451e652eb95e0b5fbc2459b4836c3..8b0d4b255dea3b9f833aeff53091b00a9620f084 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -797,8 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (i40evf_asq_done(hw)) break; - usleep_range(1000, 2000); - total_delay++; + udelay(50); + total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 1f9b3b5d946d3e02ee1aff52d1671feaa7195b2c..e0bfaa3d4a21341efc4bcddf2b23290e9492a5e6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ +#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index eeb9864bc5b152a90508af879cd5f32b43c3bead..91d8786d386d7bbc6f8198004f6ef0bc6da88679 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -132,6 +132,10 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + /* LAA */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -139,6 +143,10 @@ enum i40e_admin_queue_opc { /* PXE */ i40e_aqc_opc_clear_pxe_mode = 0x0110, + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, i40e_aqc_opc_add_statistics = 0x0201, @@ -177,10 +185,15 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_control_packet_filter = 0x025B, i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, + /* Pipeline Personalization Profile */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, @@ -558,6 +571,56 @@ struct i40e_aqc_clear_pxe { I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); +/* Set WoL Filter (0x0120) */ + +struct i40e_aqc_set_wol_filter { + __le16 filter_index; +#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ + I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) + +#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 +#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ + I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; +#define I40E_AQC_SET_WOL_FILTER 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 +#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 + __le16 valid_flags; +#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); + +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + +/* Get Wake Reason (0x0121) */ + +struct i40e_aqc_get_wake_reason_completion { + u8 reserved_1[2]; + __le16 wake_reason; +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) + u8 reserved_2[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); + /* Switch configuration commands (0x02xx) */ /* Used by many indirect commands that only pass an seid and a buffer in the @@ -640,6 +703,8 @@ struct i40e_aqc_set_port_parameters { #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -691,6 +756,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); /* Set Switch Configuration (direct 0x0205) */ struct i40e_aqc_set_switch_config { __le16 flags; +/* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; @@ -1364,6 +1430,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); +/* Pipeline Personalization Profile */ +struct i40e_aqc_write_personalization_profile { + u8 flags; + u8 reserved[3]; + __le32 profile_track_id; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); + +struct i40e_aqc_write_ppp_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +struct i40e_aqc_get_applied_profiles { + u8 flags; +#define I40E_AQC_GET_PPP_GET_CONF 0x1 +#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 + u8 rsv[3]; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); + /* DCB 0x03xx*/ /* PFC Ignore (direct 0x0301) @@ -1839,11 +1935,12 @@ struct i40e_aqc_get_link_status { #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 external_power_ability; + u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 +#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 89dfdbca13db794afe5ac0cbecee9c5ee4720626..43f10761f4ba0ac966f0d4a01cd7ee666ff54201 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) int retry = 5; u32 val = 0; - use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, @@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) bool use_register; int retry = 5; - use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); if (!use_register) { do_retry: status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, @@ -1127,3 +1131,215 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); } + +/** + * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @track_id: package tracking id + * @error_offset: returns error offset + * @error_info: returns error information + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_write_personalization_profile *cmd = + (struct i40e_aqc_write_personalization_profile *) + &desc.params.raw; + struct i40e_aqc_write_ppp_resp *resp; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_write_personalization_profile); + + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(buff_size); + + cmd->profile_track_id = cpu_to_le32(track_id); + + status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_applied_profiles *cmd = + (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_personalization_profile_list); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(buff_size); + + cmd->flags = flags; + + status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40evf_find_segment_in_package + * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + **/ +struct i40e_generic_seg_header * +i40evf_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_hdr) +{ + struct i40e_generic_seg_header *segment; + u32 i; + + /* Search all package segments for the requested segment type */ + for (i = 0; i < pkg_hdr->segment_count; i++) { + segment = + (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + + pkg_hdr->segment_offset[i]); + + if (segment->type == segment_type) + return segment; + } + + return NULL; +} + +/** + * i40evf_write_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be downloaded + * @track_id: package tracking id + * + * Handles the download of a complete package. + */ +enum i40e_status_code +i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + i40e_status status = 0; + struct i40e_section_table *sec_tbl; + struct i40e_profile_section_header *sec = NULL; + u32 dev_cnt; + u32 vendor_dev_id; + u32 *nvm; + u32 section_size = 0; + u32 offset = 0, info = 0; + u32 i; + + if (!track_id) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); + return I40E_NOT_SUPPORTED; + } + + dev_cnt = profile->device_table_count; + + for (i = 0; i < dev_cnt; i++) { + vendor_dev_id = profile->device_table[i].vendor_dev_id; + if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) + if (hw->device_id == (vendor_dev_id & 0xFFFF)) + break; + } + if (i == dev_cnt) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + nvm = (u32 *)&profile->device_table[dev_cnt]; + sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; + + for (i = 0; i < sec_tbl->section_count; i++) { + sec = (struct i40e_profile_section_header *)((u8 *)profile + + sec_tbl->section_offset[i]); + + /* Skip 'AQ', 'note' and 'name' sections */ + if (sec->section.type != SECTION_TYPE_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write profile */ + status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: offset %d, info %d", + offset, info); + break; + } + } + return status; +} + +/** + * i40evf_add_pinfo_to_list + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package + * @profile_info_sec: buffer for information section + * @track_id: package tracking id + * + * Register a profile to the list of loaded profiles. + */ +enum i40e_status_code +i40evf_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + i40e_status status = 0; + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_info *pinfo; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_PPP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + + status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index ba6c6bda0e22b6f4c13fc800dd6de18e21b90cbd..741223d5d809dd902f77468d9ae734c47e3b4223 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -122,4 +122,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); +i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details); +struct i40e_generic_seg_header * +i40evf_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_header); +enum i40e_status_code +i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code +i40evf_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..9a5100b2b7c74fb4da000ab3182b3dcf21efe4ae --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h @@ -0,0 +1,229 @@ +/******************************************************************************* + * + * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver + * Copyright(c) 2013 - 2017 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* Modeled on trace-events-sample.h */ + +/* The trace subsystem name for i40evf will be "i40evf". + * + * This file is named i40e_trace.h. + * + * Since this include file's name is different from the trace + * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end + * of this file. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i40evf + +/* See trace-events-sample.h for a detailed description of why this + * guard clause is different from most normal include files. + */ +#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _I40E_TRACE_H_ + +#include + +/** + * i40e_trace() macro enables shared code to refer to trace points + * like: + * + * trace_i40e{,vf}_example(args...) + * + * ... as: + * + * i40e_trace(example, args...) + * + * ... to resolve to the PF or VF version of the tracepoint without + * ifdefs, and to allow tracepoints to be disabled entirely at build + * time. + * + * Trace point should always be referred to in the driver via this + * macro. + * + * Similarly, i40e_trace_enabled(trace_name) wraps references to + * trace_i40e{,vf}__enabled() functions. + */ +#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name) +#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) + +#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) + +#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() + +/* Events common to PF and VF. Corresponding versions will be defined + * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() + * macro above will select the right trace point name for the driver + * being built from shared code. + */ + +/* Events related to a vsi & ring */ +DECLARE_EVENT_CLASS( + i40evf_tx_template, + + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf), + + /* The convention here is to make the first fields in the + * TP_STRUCT match the TP_PROTO exactly. This enables the use + * of the args struct generated by the tplist tool (from the + * bcc-tools package) to be used for those fields. To access + * fields other than the tracepoint args will require the + * tplist output to be adjusted. + */ + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, buf) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->buf = buf; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p buf %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->buf) +); + +DEFINE_EVENT( + i40evf_tx_template, i40evf_clean_tx_irq, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DEFINE_EVENT( + i40evf_tx_template, i40evf_clean_tx_irq_unmap, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DECLARE_EVENT_CLASS( + i40evf_rx_template, + + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb), + + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, skb) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->skb = skb; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p skb %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->skb) +); + +DEFINE_EVENT( + i40evf_rx_template, i40evf_clean_rx_irq, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DEFINE_EVENT( + i40evf_rx_template, i40evf_clean_rx_irq_rx, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DECLARE_EVENT_CLASS( + i40evf_xmit_template, + + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring), + + TP_STRUCT__entry( + __field(void*, skb) + __field(void*, ring) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->skb = skb; + __entry->ring = ring; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s skb: %p ring: %p", + __get_str(devname), __entry->skb, + __entry->ring) +); + +DEFINE_EVENT( + i40evf_xmit_template, i40evf_xmit_frame_ring, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +DEFINE_EVENT( + i40evf_xmit_template, i40evf_xmit_frame_ring_drop, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +/* Events unique to the VF. */ + +#endif /* _I40E_TRACE_H_ */ +/* This must be outside ifdef _I40E_TRACE_H */ + +/* This trace include file is not located in the .../include/trace + * with the kernel tracepoint definitions, because we're a loadable + * module. + */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE i40e_trace +#include diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c91fcf43ccbc5eb7bfe95a9a3eec9c559519fcd0..dfe241a12ad0756d10a77b954486d2194a31c5ca 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -28,6 +28,7 @@ #include #include "i40evf.h" +#include "i40e_trace.h" #include "i40e_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, @@ -137,10 +138,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - if (!in_sw) - head = i40e_get_head(ring); - else - head = ring->next_to_clean; + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -165,7 +163,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; - struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = vsi->work_limit; @@ -174,8 +171,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; - tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); - do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -186,8 +181,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* we have caught up to head, no work left to do */ - if (tx_head == tx_desc) + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ @@ -212,6 +209,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* unmap remaining buffers */ while (tx_desc != eop_desc) { + i40e_trace(clean_tx_irq_unmap, + tx_ring, tx_desc, tx_buf); tx_buf++; tx_desc++; @@ -267,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, if (budget && ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &vsi->state) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -285,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &vsi->state)) { + !test_bit(__I40E_VSI_DOWN, vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -464,10 +463,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); - /* add u32 for head writeback, align after this takes care of - * guaranteeing this is at least one cache line in size - */ - tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -493,7 +488,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) **/ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) { - struct device *dev = rx_ring->dev; unsigned long bi_size; u16 i; @@ -513,8 +507,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_bi->page) continue; - dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(rx_bi->page, 0); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); rx_bi->page = NULL; rx_bi->page_offset = 0; @@ -614,6 +622,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) writel(val, rx_ring->tail); } +/** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + /** * i40e_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use @@ -635,27 +654,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* alloc new page for storage */ - page = dev_alloc_page(); + page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, 0); + __free_pages(page, i40e_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = i40e_rx_offset(rx_ring); + + /* initialize pagecnt_bias to 1 representing we fully own page */ + bi->pagecnt_bias = 1; return true; } @@ -702,6 +727,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -742,8 +773,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) * @vsi: the VSI we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * - * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, @@ -806,13 +835,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If there is an outer header present that might contain a checksum - * we need to bump the checksum level by 1 to reflect the fact that - * we are indicating we validated the inner checksum. - */ - if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) - skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: @@ -895,12 +917,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, { i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } /** @@ -945,7 +967,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } /** @@ -966,8 +991,6 @@ static inline bool i40e_page_is_reusable(struct page *page) * the adapter for another receive * * @rx_buffer: buffer containing the page - * @page: page address from rx_buffer - * @truesize: actual size of the buffer in this page * * If page is reusable, rx_buffer->page_offset is adjusted to point to * an unused region in the page. @@ -990,13 +1013,10 @@ static inline bool i40e_page_is_reusable(struct page *page) * * In either case, if the page is reusable its refcount is increased. **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, - struct page *page, - const unsigned int truesize) +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) { -#if (PAGE_SIZE >= 8192) - unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; -#endif + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* Is any reuse possible? */ if (unlikely(!i40e_page_is_reusable(page))) @@ -1004,21 +1024,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) +#define I40E_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) + if (rx_buffer->page_offset > I40E_LAST_OFFSET) return false; #endif - /* Inc ref count on page before passing it up to the stack */ - get_page(page); + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } return true; } @@ -1027,145 +1049,201 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @size: packet length from rx_desc * @skb: sk_buff to place the data into + * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. + * It will just attach the page as a frag to the skb. * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + * The function will then update the page offset. **/ -static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, +static void i40e_add_rx_frag(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, - unsigned int size, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = I40E_RXBUFFER_2048; + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); #endif - unsigned int pull_len; - - if (unlikely(skb_is_nonlinear(skb))) - goto add_tail_frag; - /* will the data fit in the skb we allocated? if so, just - * copy it as it is pretty small anyway - */ - if (size <= I40E_RX_HDR_SIZE) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is reusable, we can reuse buffer as-is */ - if (likely(i40e_page_is_reusable(page))) - return true; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); - /* this page cannot be reused so discard it */ - __free_pages(page, 0); - return false; - } + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} - /* we need the header to contain the greater of either - * ETH_HLEN or 60 bytes if the skb->len is less than - * 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); +/** + * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * @rx_ring: rx descriptor ring to transact packets on + * @size: size of buffer to add to skb + * + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. + */ +static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *rx_buffer; - /* align pull length to size of long to optimize - * memcpy performance - */ - memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); - /* update all of the pointers */ - va += pull_len; - size -= pull_len; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); -add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (unsigned long)va & ~PAGE_MASK, size, truesize); + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; - return i40e_can_reuse_rx_page(rx_buffer, page, truesize); + return rx_buffer; } /** - * i40evf_fetch_rx_buffer - Allocate skb and populate it + * i40e_construct_skb - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on - * @rx_desc: descriptor containing info written by hardware + * @rx_buffer: rx buffer to pull data from + * @size: size of buffer to add to skb * - * This function allocates an skb on the fly, and populates it with the page - * data from the current receive descriptor, taking care to set up the skb - * correctly, as well as handling calling the page recycle function if - * necessary. + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. */ -static inline -struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb) +static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) { - u64 local_status_error_len = - le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = - (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - struct i40e_rx_buffer *rx_buffer; - struct page *page; + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > I40E_RX_HDR_SIZE) + headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset + headlen, + size, truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} - if (likely(!skb)) { - void *page_addr = page_address(page) + rx_buffer->page_offset; +/** + * i40e_build_skb - Build skb around an existing buffer + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buffer: Rx buffer to pull data from + * @size: size of buffer to add to skb + * + * This function builds an skb around an existing Rx buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. + */ +static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(page_addr); + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch(va + L1_CACHE_BYTES); #endif + /* build an skb around the page buffer */ + skb = build_skb(va - I40E_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_buff_failed++; - return NULL; - } + /* update pointers within the skb to store the data */ + skb_reserve(skb, I40E_SKB_PAD); + __skb_put(skb, size); - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - } + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); + return skb; +} - /* pull page into skb */ - if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { +/** + * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * + * This function will clean up the contents of the rx_buffer. It will + * either recycle the bufer or unmap it and free the associated resources. + */ +static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer) +{ + if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); } /* clear contents of buffer_info */ rx_buffer->page = NULL; - - return skb; } /** @@ -1221,7 +1299,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) bool failure = false; while (likely(total_rx_packets < budget)) { + struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; + unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1238,22 +1318,40 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + * hardware wrote DD then the length will be non-zero */ - if (!i40e_test_staterr(rx_desc, - BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. */ dma_rmb(); - skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb); - if (!skb) + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) break; + i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = i40e_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = i40e_build_skb(rx_ring, rx_buffer, size); + else + skb = i40e_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + i40e_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; if (i40e_is_non_eop(rx_ring, rx_desc, skb)) @@ -1266,6 +1364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); + skb = NULL; continue; } @@ -1288,6 +1387,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); i40e_receive_skb(rx_ring, skb, vlan_tag); skb = NULL; @@ -1408,7 +1508,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, } enable_int: - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) wr32(hw, INTREG(vector - 1), txval); if (q_vector->itr_countdown) @@ -1437,7 +1537,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) int budget_per_ring; int work_done = 0; - if (test_bit(__I40E_DOWN, &vsi->state)) { + if (test_bit(__I40E_VSI_DOWN, vsi->state)) { napi_complete(napi); return 0; } @@ -1980,7 +2080,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u16 i = tx_ring->next_to_use; u32 td_tag = 0; dma_addr_t dma; - u16 desc_count = 1; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -2016,7 +2115,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; - desc_count++; if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); @@ -2038,7 +2136,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; - desc_count++; if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); @@ -2064,46 +2161,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); - /* write last descriptor with EOP bit */ - td_cmd |= I40E_TX_DESC_CMD_EOP; - - /* We can OR these values together as they both are checked against - * 4 below and at this point desc_count will be used as a boolean value - * after this if/else block. - */ - desc_count |= ++tx_ring->packet_stride; - - /* Algorithm to optimize tail and RS bit setting: - * if queue is stopped - * mark RS bit - * reset packet counter - * else if xmit_more is supported and is true - * advance packet counter to 4 - * reset desc_count to 0 - * - * if desc_count >= 4 - * mark RS bit - * reset packet counter - * if desc_count > 0 - * update tail - * - * Note: If there are less than 4 descriptors - * pending and interrupts were disabled the service task will - * trigger a force WB. - */ - if (netif_xmit_stopped(txring_txq(tx_ring))) { - goto do_rs; - } else if (skb->xmit_more) { - /* set stride to arm on next packet and reset desc_count */ - tx_ring->packet_stride = WB_STRIDE; - desc_count = 0; - } else if (desc_count >= WB_STRIDE) { -do_rs: - /* write last descriptor with RS bit set */ - td_cmd |= I40E_TX_DESC_CMD_RS; - tx_ring->packet_stride = 0; - } - + /* write last descriptor with RS and EOP bits */ + td_cmd |= I40E_TXD_CMD; tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); @@ -2119,7 +2178,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ - if (desc_count) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -2170,6 +2229,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* prefetch the data, we'll need it later */ prefetch(skb->data); + i40e_trace(xmit_frame_ring, skb, tx_ring); + count = i40e_xmit_descriptor_count(skb); if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) { @@ -2237,6 +2298,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: + i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8274ba68bd32a6583538f7af6fd80417dc25e6ec..901282c87cf6ca67f44edcc51b4f990eedbba1d9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -104,10 +104,9 @@ enum i40e_dyn_idx_t { /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 +#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ #define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ -#define I40E_RXBUFFER_4096 4096 -#define I40E_RXBUFFER_8192 8192 +#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we @@ -120,6 +119,61 @@ enum i40e_dyn_idx_t { #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the legacy + * receive path. + */ +#if (PAGE_SIZE < 8192) +#define I40E_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) + +static inline int i40e_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int i40e_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (I40E_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = I40E_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return i40e_compute_pad(rx_buf_len); +} + +#define I40E_SKB_PAD i40e_skb_pad() +#else +#define I40E_2K_TOO_SMALL_WITH_PADDING false +#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + /** * i40e_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) @@ -241,7 +295,12 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct i40e_queue_stats { @@ -321,7 +380,8 @@ struct i40e_ring { u8 packet_stride; u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) /* stats structs */ struct i40e_queue_stats stats; @@ -349,6 +409,21 @@ struct i40e_ring { */ } ____cacheline_internodealigned_in_smp; +static inline bool ring_uses_build_skb(struct i40e_ring *ring) +{ + return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); +} + +static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + +static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, @@ -370,6 +445,17 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) +static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) + bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); @@ -384,20 +470,6 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40evf_chk_linearize(struct sk_buff *skb); -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: Tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - /** * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed * @skb: send buffer @@ -460,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) /* we can support up to 8 data buffers for a single send */ return count != I40E_MAX_BUFFER_TXD; } - -/** - * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE - * @ptype: the packet type field from Rx descriptor write-back - **/ -static inline bool i40e_rx_is_fcoe(u16 ptype) -{ - return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && - (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); -} - /** - * txring_txq - Find the netdev Tx ring based on the i40e Tx ring * @ring: Tx ring to find the netdev equivalent of **/ static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 16bb88084bb977f900e91dcbce154e0e6ef447c8..bde7f24af1c6f3a01af9773f5e721819bd49eb1d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -78,6 +78,7 @@ enum i40e_debug_mask { I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, @@ -1396,4 +1397,83 @@ enum i40e_reset_type { #define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 #define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ I40E_FD_INSET_FLEX_WORD57_SHIFT) + +/* Version format for PPP */ +struct i40e_ppp_version { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define I40E_PPP_NAME_SIZE 32 + +/* Package header */ +struct i40e_package_header { + struct i40e_ppp_version version; + u32 segment_count; + u32 segment_offset[1]; +}; + +/* Generic segment header */ +struct i40e_generic_seg_header { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_NOTES 0x00000002 +#define SEGMENT_TYPE_I40E 0x00000011 +#define SEGMENT_TYPE_X722 0x00000012 + u32 type; + struct i40e_ppp_version version; + u32 size; + char name[I40E_PPP_NAME_SIZE]; +}; + +struct i40e_metadata_segment { + struct i40e_generic_seg_header header; + struct i40e_ppp_version version; + u32 track_id; + char name[I40E_PPP_NAME_SIZE]; +}; + +struct i40e_device_id_entry { + u32 vendor_dev_id; + u32 sub_vendor_dev_id; +}; + +struct i40e_profile_segment { + struct i40e_generic_seg_header header; + struct i40e_ppp_version version; + char name[I40E_PPP_NAME_SIZE]; + u32 device_table_count; + struct i40e_device_id_entry device_table[1]; +}; + +struct i40e_section_table { + u32 section_count; + u32 section_offset[1]; +}; + +struct i40e_profile_section_header { + u16 tbl_size; + u16 data_end; + struct { +#define SECTION_TYPE_INFO 0x00000010 +#define SECTION_TYPE_MMIO 0x00000800 +#define SECTION_TYPE_AQ 0x00000801 +#define SECTION_TYPE_NOTE 0x80000000 +#define SECTION_TYPE_NAME 0x80000001 + u32 type; + u32 offset; + u32 size; + } section; +}; + +struct i40e_profile_info { + u32 track_id; + struct i40e_ppp_version version; + u8 op; +#define I40E_PPP_ADD_TRACKID 0x01 +#define I40E_PPP_REMOVE_TRACKID 0x02 + u8 reserved[7]; + u8 name[I40E_PPP_NAME_SIZE]; +}; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index d38a2b2aea2b20d2b0bbf9700c90578ba9fc0197..c5ad0388c3d577be4991ff0b36283151f0be3824 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -81,7 +81,9 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_IWARP = 20, I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, + I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, @@ -161,7 +163,8 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -393,6 +396,37 @@ struct i40e_virtchnl_pf_event { int severity; }; +/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct i40e_virtchnl_iwarp_qv_info qv_info[1]; +}; + /* VF reset states - these are written into the RSTAT register: * I40E_VFGEN_RSTAT1 on the PF * I40E_VFGEN_RSTAT on the VF diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 00c42d80327668ef3cc8c614570c44ae58738791..b8ada6d8d890565bbbda69e4d50a52b980ede01a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -49,6 +49,13 @@ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "i40evf: " +/* VSI state flags shared with common code */ +enum i40evf_vsi_state_t { + __I40E_VSI_DOWN, + /* This must be last as it determines the size of the BITMAP */ + __I40E_VSI_STATE_SIZE__, +}; + /* dummy struct to make common code less painful */ struct i40e_vsi { struct i40evf_adapter *back; @@ -56,10 +63,11 @@ struct i40e_vsi { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u16 seid; u16 id; - unsigned long state; + DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); int base_vector; u16 work_limit; u16 qs_handle; + void *priv; /* client driver data reference. */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -71,10 +79,6 @@ struct i40e_vsi { #define I40EVF_MAX_RXD 4096 #define I40EVF_MIN_RXD 64 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 - -/* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_2048 2048 -#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_AQ_LEN 32 #define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ @@ -169,15 +173,15 @@ enum i40evf_state_t { enum i40evf_critical_section_t { __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __I40EVF_IN_CLIENT_TASK, }; -/* make common code happy */ -#define __I40E_DOWN __I40EVF_DOWN /* board specific private data structure */ struct i40evf_adapter { struct timer_list watchdog_timer; struct work_struct reset_task; struct work_struct adminq_task; + struct delayed_work client_task; struct delayed_work init_task; struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; @@ -195,15 +199,16 @@ struct i40evf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; + int num_iwarp_msix; + int iwarp_base_vector; u32 client_pending; + struct i40e_client_instance *cinst; struct msix_entry *msix_entries; u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_IN_NETPOLL BIT(4) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) -#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) #define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) #define I40EVF_FLAG_RESET_PENDING BIT(9) #define I40EVF_FLAG_RESET_NEEDED BIT(10) @@ -211,15 +216,18 @@ struct i40evf_adapter { #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) #define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) -#define I40EVF_FLAG_PROMISC_ON BIT(15) -#define I40EVF_FLAG_ALLMULTI_ON BIT(16) +#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15) +#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16) +#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17) +#define I40EVF_FLAG_PROMISC_ON BIT(18) +#define I40EVF_FLAG_ALLMULTI_ON BIT(19) +#define I40EVF_FLAG_LEGACY_RX BIT(20) /* duplicates for common code */ -#define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE +#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX /* flags for admin queue service task */ u32 aq_required; #define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) @@ -246,7 +254,6 @@ struct i40evf_adapter { /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; - struct net_device_stats net_stats; struct i40e_hw hw; /* defined in i40e_type.h */ @@ -258,10 +265,11 @@ struct i40evf_adapter { bool link_up; enum i40e_aq_link_speed link_speed; enum i40e_virtchnl_ops current_op; -#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ +#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ 0) +#define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -292,6 +300,12 @@ struct i40evf_adapter { /* Ethtool Private Flags */ +/* lan device */ +struct i40e_device { + struct list_head list; + struct i40evf_adapter *vf; +}; + /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; extern const char i40evf_driver_version[]; @@ -337,4 +351,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); int i40evf_config_rss(struct i40evf_adapter *adapter); +int i40evf_lan_add_device(struct i40evf_adapter *adapter); +int i40evf_lan_del_device(struct i40evf_adapter *adapter); +void i40evf_client_subtask(struct i40evf_adapter *adapter); +void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); +void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); +void i40evf_notify_client_open(struct i40e_vsi *vsi); +void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c new file mode 100644 index 0000000000000000000000000000000000000000..ee737680a0e9de4db7cd7db6f65575fabbe0b0e4 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -0,0 +1,564 @@ +#include +#include + +#include "i40evf.h" +#include "i40e_prototype.h" +#include "i40evf_client.h" + +static +const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR; +static struct i40e_client *vf_registered_client; +static LIST_HEAD(i40evf_devices); +static DEFINE_MUTEX(i40evf_device_mutex); + +static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); + +static int i40evf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info); + +static struct i40e_ops i40evf_lan_ops = { + .virtchnl_send = i40evf_client_virtchnl_send, + .setup_qvlist = i40evf_client_setup_qvlist, +}; + +/** + * i40evf_notify_client_message - call the client message receive callback + * @vsi: the VSI associated with this client + * @msg: message buffer + * @len: length of message + * + * If there is a client to this VSI, call the client + **/ +void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) +{ + struct i40e_client_instance *cinst; + + if (!vsi) + return; + + cinst = vsi->back->cinst; + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->virtchnl_receive) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance virtchnl_receive function\n"); + return; + } + cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, + msg, len); +} + +/** + * i40evf_notify_client_l2_params - call the client notify callback + * @vsi: the VSI with l2 param changes + * + * If there is a client to this VSI, call the client + **/ +void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) +{ + struct i40e_client_instance *cinst; + struct i40e_params params; + + if (!vsi) + return; + + cinst = vsi->back->cinst; + memset(¶ms, 0, sizeof(params)); + params.mtu = vsi->netdev->mtu; + params.link_up = vsi->back->link_up; + params.qos.prio_qos[0].qs_handle = vsi->qs_handle; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->l2_param_change) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance l2_param_change function\n"); + return; + } + cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, + ¶ms); +} + +/** + * i40evf_notify_client_open - call the client open callback + * @vsi: the VSI with netdev opened + * + * If there is a client to this netdev, call the client with open + **/ +void i40evf_notify_client_open(struct i40e_vsi *vsi) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_client_instance *cinst = adapter->cinst; + int ret; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->open) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance open function\n"); + return; + } + if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) { + ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + } +} + +/** + * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map + * @ldev: pointer to L2 context. + * + * Return 0 on success or < 0 on error + **/ +static int i40evf_client_release_qvlist(struct i40e_info *ldev) +{ + struct i40evf_adapter *adapter = ldev->vf; + i40e_status err; + + if (adapter->aq_required) + return -EAGAIN; + + err = i40e_aq_send_msg_to_pf(&adapter->hw, + I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + I40E_SUCCESS, NULL, 0, NULL); + + if (err) + dev_err(&adapter->pdev->dev, + "Unable to send iWarp vector release message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + + return err; +} + +/** + * i40evf_notify_client_close - call the client close callback + * @vsi: the VSI with netdev closed + * @reset: true when close called due to reset pending + * + * If there is a client to this netdev, call the client with close + **/ +void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_client_instance *cinst = adapter->cinst; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->close) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance close function\n"); + return; + } + cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); + i40evf_client_release_qvlist(&cinst->lan_info); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); +} + +/** + * i40evf_client_add_instance - add a client instance to the instance list + * @adapter: pointer to the board struct + * @client: pointer to a client struct in the client list. + * + * Returns cinst ptr on success, NULL on failure + **/ +static struct i40e_client_instance * +i40evf_client_add_instance(struct i40evf_adapter *adapter) +{ + struct i40e_client_instance *cinst = NULL; + struct netdev_hw_addr *mac = NULL; + struct i40e_vsi *vsi = &adapter->vsi; + int i; + + if (!vf_registered_client) + goto out; + + if (adapter->cinst) { + cinst = adapter->cinst; + goto out; + } + + cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); + if (!cinst) + goto out; + + cinst->lan_info.vf = (void *)adapter; + cinst->lan_info.netdev = vsi->netdev; + cinst->lan_info.pcidev = adapter->pdev; + cinst->lan_info.fid = 0; + cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; + cinst->lan_info.hw_addr = adapter->hw.hw_addr; + cinst->lan_info.ops = &i40evf_lan_ops; + cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; + cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; + cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; + set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); + + cinst->lan_info.msix_count = adapter->num_iwarp_msix; + cinst->lan_info.msix_entries = + &adapter->msix_entries[adapter->iwarp_base_vector]; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + cinst->lan_info.params.qos.prio_qos[i].tc = 0; + cinst->lan_info.params.qos.prio_qos[i].qs_handle = + vsi->qs_handle; + } + + mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, + struct netdev_hw_addr, list); + if (mac) + ether_addr_copy(cinst->lan_info.lanmac, mac->addr); + else + dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); + + cinst->client = vf_registered_client; + adapter->cinst = cinst; +out: + return cinst; +} + +/** + * i40evf_client_del_instance - removes a client instance from the list + * @adapter: pointer to the board struct + * @client: pointer to the client struct + * + **/ +static +void i40evf_client_del_instance(struct i40evf_adapter *adapter) +{ + kfree(adapter->cinst); + adapter->cinst = NULL; +} + +/** + * i40evf_client_subtask - client maintenance work + * @adapter: board private structure + **/ +void i40evf_client_subtask(struct i40evf_adapter *adapter) +{ + struct i40e_client *client = vf_registered_client; + struct i40e_client_instance *cinst; + int ret = 0; + + if (adapter->state < __I40EVF_DOWN) + return; + + /* first check client is registered */ + if (!client) + return; + + /* Add the client instance to the instance list */ + cinst = i40evf_client_add_instance(adapter); + if (!cinst) + return; + + dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", + client->name); + + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + /* Send an Open request to the client */ + + if (client->ops && client->ops->open) + ret = client->ops->open(&cinst->lan_info, client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cinst->state); + else + /* remove client instance */ + i40evf_client_del_instance(adapter); + } +} + +/** + * i40evf_lan_add_device - add a lan device struct to the list of lan devices + * @adapter: pointer to the board struct + * + * Returns 0 on success or none 0 on error + **/ +int i40evf_lan_add_device(struct i40evf_adapter *adapter) +{ + struct i40e_device *ldev; + int ret = 0; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + if (ldev->vf == adapter) { + ret = -EEXIST; + goto out; + } + } + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) { + ret = -ENOMEM; + goto out; + } + ldev->vf = adapter; + INIT_LIST_HEAD(&ldev->list); + list_add(&ldev->list, &i40evf_devices); + dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", + adapter->hw.bus.bus_id, adapter->hw.bus.device, + adapter->hw.bus.func); + + /* Since in some cases register may have happened before a device gets + * added, we can schedule a subtask to go initiate the clients. + */ + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + +out: + mutex_unlock(&i40evf_device_mutex); + return ret; +} + +/** + * i40evf_lan_del_device - removes a lan device from the device list + * @adapter: pointer to the board struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_lan_del_device(struct i40evf_adapter *adapter) +{ + struct i40e_device *ldev, *tmp; + int ret = -ENODEV; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) { + if (ldev->vf == adapter) { + dev_info(&adapter->pdev->dev, + "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", + adapter->hw.bus.bus_id, adapter->hw.bus.device, + adapter->hw.bus.func); + list_del(&ldev->list); + kfree(ldev); + ret = 0; + break; + } + } + + mutex_unlock(&i40evf_device_mutex); + return ret; +} + +/** + * i40evf_client_release - release client specific resources + * @client: pointer to the registered client + * + **/ +static void i40evf_client_release(struct i40e_client *client) +{ + struct i40e_client_instance *cinst; + struct i40e_device *ldev; + struct i40evf_adapter *adapter; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + adapter = ldev->vf; + cinst = adapter->cinst; + if (!cinst) + continue; + if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + if (client->ops && client->ops->close) + client->ops->close(&cinst->lan_info, client, + false); + i40evf_client_release_qvlist(&cinst->lan_info); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + + dev_warn(&adapter->pdev->dev, + "Client %s instance closed\n", client->name); + } + /* delete the client instance */ + i40evf_client_del_instance(adapter); + dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", + client->name); + } + mutex_unlock(&i40evf_device_mutex); +} + +/** + * i40evf_client_prepare - prepare client specific resources + * @client: pointer to the registered client + * + **/ +static void i40evf_client_prepare(struct i40e_client *client) +{ + struct i40e_device *ldev; + struct i40evf_adapter *adapter; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + adapter = ldev->vf; + /* Signal the watchdog to service the client */ + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + } + mutex_unlock(&i40evf_device_mutex); +} + +/** + * i40evf_client_virtchnl_send - send a message to the PF instance + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @msg: pointer to message buffer + * @len: message length + * + * Return 0 on success or < 0 on error + **/ +static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len) +{ + struct i40evf_adapter *adapter = ldev->vf; + i40e_status err; + + if (adapter->aq_required) + return -EAGAIN; + + err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP, + I40E_SUCCESS, msg, len, NULL); + if (err) + dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + + return err; +} + +/** + * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @qv_info: queue and vector list + * + * Return 0 on success or < 0 on error + **/ +static int i40evf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info) +{ + struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info; + struct i40evf_adapter *adapter = ldev->vf; + struct i40e_qv_info *qv_info; + i40e_status err; + u32 v_idx, i; + u32 msg_size; + + if (adapter->aq_required) + return -EAGAIN; + + /* A quick check on whether the vectors belong to the client */ + for (i = 0; i < qvlist_info->num_vectors; i++) { + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + v_idx = qv_info->v_idx; + if ((v_idx >= + (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || + (v_idx < adapter->iwarp_base_vector)) + return -EINVAL; + } + + v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info; + msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + + (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + (v_qvlist_info->num_vectors - 1)); + + adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); + err = i40e_aq_send_msg_to_pf(&adapter->hw, + I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, + I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); + + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to send iWarp vector config message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + goto out; + } + + err = -EBUSY; + for (i = 0; i < 5; i++) { + msleep(100); + if (!(adapter->client_pending & + BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { + err = 0; + break; + } + } +out: + return err; +} + +/** + * i40evf_register_client - Register a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_register_client(struct i40e_client *client) +{ + int ret = 0; + + if (!client) { + ret = -EIO; + goto out; + } + + if (strlen(client->name) == 0) { + pr_info("i40evf: Failed to register client with no name\n"); + ret = -EIO; + goto out; + } + + if (vf_registered_client) { + pr_info("i40evf: Client %s has already been registered!\n", + client->name); + ret = -EEXIST; + goto out; + } + + if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) || + (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) { + pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n", + client->name); + pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", + client->version.major, client->version.minor, + client->version.build, + i40evf_client_interface_version_str); + ret = -EIO; + goto out; + } + + vf_registered_client = client; + + i40evf_client_prepare(client); + + pr_info("i40evf: Registered client %s with return code %d\n", + client->name, ret); +out: + return ret; +} +EXPORT_SYMBOL(i40evf_register_client); + +/** + * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_unregister_client(struct i40e_client *client) +{ + int ret = 0; + + /* When a unregister request comes through we would have to send + * a close for each of the client instances that were opened. + * client_release function is called to handle this. + */ + i40evf_client_release(client); + + if (vf_registered_client != client) { + pr_info("i40evf: Client %s has not been registered\n", + client->name); + ret = -ENODEV; + goto out; + } + vf_registered_client = NULL; + pr_info("i40evf: Unregistered client %s\n", client->name); +out: + return ret; +} +EXPORT_SYMBOL(i40evf_unregister_client); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h new file mode 100644 index 0000000000000000000000000000000000000000..7d283c7506a595b870bc1045586c5384a98e834d --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h @@ -0,0 +1,166 @@ +#ifndef _I40E_CLIENT_H_ +#define _I40E_CLIENT_H_ + +#define I40EVF_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40EVF_CLIENT_VERSION_MAJOR 0 +#define I40EVF_CLIENT_VERSION_MINOR 01 +#define I40EVF_CLIENT_VERSION_BUILD 00 +#define I40EVF_CLIENT_VERSION_STR \ + __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \ + __stringify(I40EVF_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +struct i40e_ops; +struct i40e_client; + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + +#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; + u16 link_up; /* boolean */ +}; + +/* Structure to hold LAN device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 +#define I40E_CLIENT_FTYPE_VF 1 + u8 ftype; /* function type, PF or VF */ + void *vf; /* cast to i40evf_adapter */ + + /* All L2 params that could change during the life span of the device + * and needs to be communicated to the client when they change + */ + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ +}; + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever the driver is + * ready to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be closed when netdev is unavailable or when unregister + * call comes in. If the close happens due to a reset, set the reset + * bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mss */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + /* called when a message is received from the PF */ + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40EVF_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; +#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) + u8 type; +#define I40E_CLIENT_IWARP 0 + struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +/* used by clients */ +int i40evf_register_client(struct i40e_client *client); +int i40evf_unregister_client(struct i40e_client *client); +#endif /* _I40E_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 272d600c1ed06bf7b891bd88b4576c2964512ca4..9bb2cc7dd4e4afa5d98ac4f76b3794ffa359e7aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -63,52 +63,74 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) +/* For now we have one and only one private flag and it is only defined + * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead + * of leaving all this code sitting around empty we will strip it unless + * our one private flag is actually available. + */ +struct i40evf_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u32 flag; + bool read_only; +}; + +#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = { + I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0), +}; + +#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags) + /** - * i40evf_get_settings - Get Link Speed and Duplex settings + * i40evf_get_link_ksettings - Get Link Speed and Duplex settings * @netdev: network interface device structure - * @ecmd: ethtool command + * @cmd: ethtool command * * Reports speed/duplex settings. Because this is a VF, we don't know what * kind of link we really have, so we fake it. **/ -static int i40evf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int i40evf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct i40evf_adapter *adapter = netdev_priv(netdev); - ecmd->supported = 0; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; - ecmd->port = PORT_NONE; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; /* Set speed and duplex */ switch (adapter->link_speed) { case I40E_LINK_SPEED_40GB: - ethtool_cmd_speed_set(ecmd, SPEED_40000); + cmd->base.speed = SPEED_40000; break; case I40E_LINK_SPEED_25GB: #ifdef SPEED_25000 - ethtool_cmd_speed_set(ecmd, SPEED_25000); + cmd->base.speed = SPEED_25000; #else netdev_info(netdev, "Speed is 25G, display not supported by this version of ethtool.\n"); #endif break; case I40E_LINK_SPEED_20GB: - ethtool_cmd_speed_set(ecmd, SPEED_20000); + cmd->base.speed = SPEED_20000; break; case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; break; case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; break; default: break; } - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; return 0; } @@ -125,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); + else if (sset == ETH_SS_PRIV_FLAGS) + return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -190,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } + } else if (sset == ETH_SS_PRIV_FLAGS) { + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40evf_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } + } +} + +/** + * i40evf_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40evf_get_priv_flags(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 i, ret_flags = 0; + + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + const struct i40evf_priv_flags *priv_flags; + + priv_flags = &i40evf_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->flags) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +/** + * i40evf_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u64 changed_flags; + u32 i; + + changed_flags = adapter->flags; + + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + const struct i40evf_priv_flags *priv_flags; + + priv_flags = &i40evf_gstrings_priv_flags[i]; + + if (priv_flags->read_only) + continue; + + if (flags & BIT(i)) + adapter->flags |= priv_flags->flag; + else + adapter->flags &= ~(priv_flags->flag); + } + + /* check for flags that changed */ + changed_flags ^= adapter->flags; + + /* Process any additional changes needed as a result of flag changes. */ + + /* issue a reset to force legacy-rx change to take effect */ + if (changed_flags & I40EVF_FLAG_LEGACY_RX) { + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } } + + return 0; } /** @@ -239,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -643,7 +744,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, } static const struct ethtool_ops i40evf_ethtool_ops = { - .get_settings = i40evf_get_settings, .get_drvinfo = i40evf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = i40evf_get_ringparam, @@ -651,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, + .get_priv_flags = i40evf_get_priv_flags, + .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, @@ -663,6 +765,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, .get_rxfh_key_size = i40evf_get_rxfh_key_size, + .get_link_ksettings = i40evf_get_link_ksettings, }; /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index f35dcaac5bb7bd9bf86412c1bb40f8e971d086c7..ea110a730e1676fa2fad0ffd7ed6c959fc56bfcb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -26,6 +26,14 @@ #include "i40evf.h" #include "i40e_prototype.h" +#include "i40evf_client.h" +/* All i40evf tracepoints are defined by the include below, which must + * be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "i40e_trace.h" + static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); static int i40evf_close(struct net_device *netdev); @@ -36,9 +44,9 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" -#define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 27 +#define DRV_VERSION_MAJOR 2 +#define DRV_VERSION_MINOR 1 +#define DRV_VERSION_BUILD 14 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -489,7 +497,7 @@ static void i40evf_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &adapter->vsi.state)) + if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) return; for (i = 0; i < q_vectors; i++) @@ -685,12 +693,39 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) **/ static void i40evf_configure_rx(struct i40evf_adapter *adapter) { + unsigned int rx_buf_len = I40E_RXBUFFER_2048; struct i40e_hw *hw = &adapter->hw; int i; + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + struct net_device *netdev = adapter->netdev; + + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = I40E_RXBUFFER_3072; + + /* We use a 1536 buffer size for configurations with + * standard Ethernet mtu. On x86 this gives us enough room + * for shared info and 192 bytes of padding. + */ + if (!I40E_2K_TOO_SMALL_WITH_PADDING && + (netdev->mtu <= ETH_DATA_LEN)) + rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + } +#endif + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048; + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + + if (adapter->flags & I40EVF_FLAG_LEGACY_RX) + clear_ring_build_skb_enabled(&adapter->rx_rings[i]); + else + set_ring_build_skb_enabled(&adapter->rx_rings[i]); } } @@ -1053,11 +1088,13 @@ static void i40evf_configure(struct i40evf_adapter *adapter) static void i40evf_up_complete(struct i40evf_adapter *adapter) { adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_DOWN, &adapter->vsi.state); + clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); i40evf_napi_enable_all(adapter); adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + if (CLIENT_ENABLED(adapter)) + adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } @@ -1235,13 +1272,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) } pairs = adapter->num_active_queues; - /* It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) twice the number of vectors as there are CPU's. + /* It's easy to be greedy for MSI-X vectors, but it really doesn't do + * us much good if we have more vectors than CPUs. However, we already + * limit the total number of queues by the number of CPUs so we do not + * need any further limiting here. */ - v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; - v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); + v_budget = min_t(int, pairs + NONQ_VECS, + (int)adapter->vf_res->max_vectors); adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -1472,6 +1509,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) { int err; + err = i40evf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); rtnl_unlock(); @@ -1488,23 +1532,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) goto err_alloc_q_vectors; } - err = i40evf_alloc_queues(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", adapter->num_active_queues); return 0; -err_alloc_queues: - i40evf_free_q_vectors(adapter); err_alloc_q_vectors: i40evf_reset_interrupt_capability(adapter); err_set_interrupt: + i40evf_free_queues(adapter); +err_alloc_queues: return err; } @@ -1685,6 +1722,7 @@ static void i40evf_watchdog_task(struct work_struct *work) i40evf_set_promiscuous(adapter, 0); goto watchdog_done; } + schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); @@ -1716,7 +1754,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; if (netif_running(adapter->netdev)) { - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); adapter->link_up = false; @@ -1773,10 +1811,17 @@ static void i40evf_reset_task(struct work_struct *work) u32 reg_val; int i = 0, err; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) usleep_range(500, 1000); - + if (CLIENT_ENABLED(adapter)) { + adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | + I40EVF_FLAG_CLIENT_NEEDS_CLOSE | + I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); + cancel_delayed_work_sync(&adapter->client_task); + i40evf_notify_client_close(&adapter->vsi, true); + } i40evf_misc_irq_disable(adapter); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; @@ -1819,6 +1864,7 @@ static void i40evf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); i40evf_disable_vf(adapter); + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -1861,9 +1907,8 @@ static void i40evf_reset_task(struct work_struct *work) } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - /* Open RDMA Client again */ - adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1979,6 +2024,48 @@ static void i40evf_adminq_task(struct work_struct *work) i40evf_misc_irq_enable(adapter); } +/** + * i40evf_client_task - worker thread to perform client work + * @work: pointer to work_struct containing our data + * + * This task handles client interactions. Because client calls can be + * reentrant, we can't handle them in the watchdog. + **/ +static void i40evf_client_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = + container_of(work, struct i40evf_adapter, client_task.work); + + /* If we can't get the client bit, just give up. We'll be rescheduled + * later. + */ + + if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) + return; + + if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { + i40evf_client_subtask(adapter); + adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { + i40evf_notify_client_close(&adapter->vsi, false); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { + i40evf_notify_client_open(&adapter->vsi); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + i40evf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + } +out: + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); +} + /** * i40evf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure @@ -2147,7 +2234,9 @@ static int i40evf_close(struct net_device *netdev) return 0; - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + if (CLIENT_ENABLED(adapter)) + adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; i40evf_down(adapter); adapter->state = __I40EVF_DOWN_PENDING; @@ -2161,21 +2250,6 @@ static int i40evf_close(struct net_device *netdev) return 0; } -/** - * i40evf_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ -static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - /* only return the current stats */ - return &adapter->net_stats; -} - /** * i40evf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -2188,6 +2262,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) struct i40evf_adapter *adapter = netdev_priv(netdev); netdev->mtu = new_mtu; + if (CLIENT_ENABLED(adapter)) { + i40evf_notify_client_l2_params(&adapter->vsi); + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + } adapter->flags |= I40EVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); @@ -2278,7 +2356,6 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_open = i40evf_open, .ndo_stop = i40evf_close, .ndo_start_xmit = i40evf_xmit_frame, - .ndo_get_stats = i40evf_get_stats, .ndo_set_rx_mode = i40evf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40evf_set_mac, @@ -2328,6 +2405,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct i40e_vsi *vsi = &adapter->vsi; int i; + netdev_features_t hw_enc_features; + netdev_features_t hw_features; /* got VF config message back from PF, now we can parse it */ for (i = 0; i < vfres->num_vsis; i++) { @@ -2339,46 +2418,52 @@ int i40evf_process_config(struct i40evf_adapter *adapter) return -ENODEV; } - netdev->hw_enc_features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA | - NETIF_F_SOFT_FEATURES | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | + hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; + + /* advertise to stack only if offloads for encapsulated packets is + * supported + */ + if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) { + hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL | - NETIF_F_SCTP_CRC | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | 0; - if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)) - netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + if (!(vfres->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= hw_enc_features; + } /* record features VLANs can make use of */ - netdev->vlan_features |= netdev->hw_enc_features | - NETIF_F_TSO_MANGLEID; + netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; /* Write features and hw_features separately to avoid polluting - * with, or dropping, features that are set when we registgered. + * with, or dropping, features that are set when we registered. */ - netdev->hw_features |= netdev->hw_enc_features; + hw_features = hw_enc_features; - netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES; - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + netdev->hw_features |= hw_features; - /* disable VLAN features if not supported */ - if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)) - netdev->features ^= I40EVF_VLAN_FEATURES; + netdev->features |= hw_features | I40EVF_VLAN_FEATURES; adapter->vsi.id = adapter->vsi_res->vsi_id; @@ -2519,9 +2604,6 @@ static void i40evf_init_task(struct work_struct *work) goto err_alloc; } - if (hw->mac.type == I40E_MAC_X722_VF) - adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE; - if (i40evf_process_config(adapter)) goto err_alloc; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; @@ -2581,13 +2663,19 @@ static void i40evf_init_task(struct work_struct *work) adapter->netdev_registered = true; netif_tx_stop_all_queues(netdev); + if (CLIENT_ALLOWED(adapter)) { + err = i40evf_lan_add_device(adapter); + if (err) + dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", + err); + } dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); adapter->state = __I40EVF_DOWN; - set_bit(__I40E_DOWN, &adapter->vsi.state); + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); i40evf_misc_irq_enable(adapter); adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); @@ -2745,6 +2833,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, i40evf_reset_task); INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); + INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); @@ -2857,14 +2946,21 @@ static void i40evf_remove(struct pci_dev *pdev) struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_mac_filter *f, *ftmp; struct i40e_hw *hw = &adapter->hw; + int err; cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); - + cancel_delayed_work_sync(&adapter->client_task); if (adapter->netdev_registered) { unregister_netdev(netdev); adapter->netdev_registered = false; } + if (CLIENT_ALLOWED(adapter)) { + err = i40evf_lan_del_device(adapter); + if (err) + dev_warn(&pdev->dev, "Failed to delete client device: %d\n", + err); + } /* Shut down all the garbage mashers on the detention level */ adapter->state = __I40EVF_REMOVE; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index bee58af390e1c633006ddc38efddf256a307161a..deb2cb8dac6b2051ed99a05b11076fc561e141c0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -26,6 +26,7 @@ #include "i40evf.h" #include "i40e_prototype.h" +#include "i40evf_client.h" /* busy wait delay in msec */ #define I40EVF_BUSY_WAIT_DELAY 10 @@ -158,7 +159,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | + I40E_VIRTCHNL_VF_OFFLOAD_ENCAP | + I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; @@ -233,7 +236,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; int pairs = adapter->num_active_queues; - int i, len; + int i, len, max_frame = I40E_MAX_RXBUFFER; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -248,6 +251,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) if (!vqci) return; + /* Limit maximum frame size when jumbo frames is not enabled */ + if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && + (adapter->netdev->mtu <= ETH_DATA_LEN)) + max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; @@ -259,17 +267,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->txq.queue_id = i; vqpi->txq.ring_len = adapter->tx_rings[i].count; vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; - vqpi->txq.headwb_enabled = 1; - vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + - (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); - vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; - vqpi->rxq.max_pkt_size = adapter->netdev->mtu - + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; - vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; + vqpi->rxq.max_pkt_size = max_frame; + vqpi->rxq.databuffer_size = + ALIGN(adapter->rx_rings[i].rx_buf_len, + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); vqpi++; } @@ -955,17 +960,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case I40E_VIRTCHNL_OP_GET_STATS: { struct i40e_eth_stats *stats = (struct i40e_eth_stats *)msg; - adapter->net_stats.rx_packets = stats->rx_unicast + - stats->rx_multicast + - stats->rx_broadcast; - adapter->net_stats.tx_packets = stats->tx_unicast + - stats->tx_multicast + - stats->tx_broadcast; - adapter->net_stats.rx_bytes = stats->rx_bytes; - adapter->net_stats.tx_bytes = stats->tx_bytes; - adapter->net_stats.tx_errors = stats->tx_errors; - adapter->net_stats.rx_dropped = stats->rx_discards; - adapter->net_stats.tx_dropped = stats->tx_discards; + netdev->stats.rx_packets = stats->rx_unicast + + stats->rx_multicast + + stats->rx_broadcast; + netdev->stats.tx_packets = stats->tx_unicast + + stats->tx_multicast + + stats->tx_broadcast; + netdev->stats.rx_bytes = stats->rx_bytes; + netdev->stats.tx_bytes = stats->tx_bytes; + netdev->stats.tx_errors = stats->tx_errors; + netdev->stats.rx_dropped = stats->rx_discards; + netdev->stats.tx_dropped = stats->tx_discards; adapter->current_stats = *stats; } break; @@ -999,6 +1004,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_IWARP: + /* Gobble zero-length replies from the PF. They indicate that + * a previous message was received OK, and the client doesn't + * care about that. + */ + if (msglen && CLIENT_ENABLED(adapter)) + i40evf_notify_client_message(&adapter->vsi, + msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: adapter->client_pending &= ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); @@ -1014,7 +1029,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, } break; default: - if (v_opcode != adapter->current_op) + if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", adapter->current_op, v_opcode); break; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 8aee314332a87a9cfe696d8451aeb8b321b7411d..d8517779439bc87da745aadb78283da1350b4397 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -39,6 +39,27 @@ #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +/* Wake Up Status */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact */ +#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + E1000_WUS_EX | \ + E1000_WUS_ARPD | \ + E1000_WUS_IPV4 | \ + E1000_WUS_IPV6 | \ + E1000_WUS_NSD) + +/* Wake Up Packet Length */ +#define E1000_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define E1000_WUPM_BYTES 128 + /* Extended Device Control */ #define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index d20af6b2f581698098a972d557a0a93fe19d1d48..3e7fed73df15f09e24d447cbfce43be8b7d411af 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -55,6 +55,10 @@ #define E1000_VF_RESET 0x01 /* VF requests reset */ #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) #define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ #define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ #define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index acbc3abe2dddfc7bdf87a3724a95b136237a519e..bf9bf9056d0c8f320872507f18e3dc92bfa3e875 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -111,6 +111,16 @@ struct vf_data_storage { bool spoofchk_enabled; }; +/* Number of unicast MAC filters reserved for the PF in the RAR registers */ +#define IGB_PF_MAC_FILTERS_RESERVED 3 + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[ETH_ALEN]; +}; + #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ #define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ #define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ @@ -142,12 +152,24 @@ struct vf_data_storage { /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 -#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 +#define IGB_TS_HDR_LEN 16 + +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define IGB_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN) +#else +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN) +#endif /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IGB_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + #define AUTO_ALL_MODES 0 #define IGB_EEPROM_APME 0x0400 @@ -301,12 +323,51 @@ struct igb_q_vector { }; enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG }; +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_RX_DESC(R, i) \ @@ -398,6 +459,15 @@ struct igb_nfc_filter { u16 action; }; +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_IN_USE 0x2 + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -524,6 +594,10 @@ struct igb_adapter { /* lock for RX network flow classification filter */ spinlock_t nfc_lock; bool etype_bitmap[MAX_ETYPE_FILTER]; + + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; }; /* flags controlling PTP/1588 function */ @@ -545,6 +619,7 @@ struct igb_adapter { #define IGB_FLAG_HAS_MSIX BIT(13) #define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) +#define IGB_FLAG_RX_LEGACY BIT(16) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 @@ -558,7 +633,6 @@ struct igb_adapter { #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ #define IGB_82576_TSYNC_SHIFT 19 -#define IGB_TS_HDR_LEN 16 enum e1000_state_t { __IGB_TESTING, __IGB_RESETTING, @@ -591,7 +665,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); void igb_setup_tctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); -void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); bool igb_has_link(struct igb_adapter *adapter); @@ -604,7 +677,7 @@ void igb_ptp_reset(struct igb_adapter *adapter); void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 737b664d004cbb372222f6f63acc5c9f38ad4c95..0efb62db6efdd0fa2bfa7d531343790a3156f2be 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -144,7 +144,15 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) -static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings) + +static int igb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -152,76 +160,73 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; u32 status; u32 speed; + u32 supported, advertising; status = rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full| - SUPPORTED_Autoneg | - SUPPORTED_TP | - SUPPORTED_Pause); - ecmd->advertising = ADVERTISED_TP; + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; /* the e1000 autoneg seems to match ethtool nicely */ - ecmd->advertising |= hw->phy.autoneg_advertised; + advertising |= hw->phy.autoneg_advertised; } - ecmd->port = PORT_TP; - ecmd->phy_address = hw->phy.addr; - ecmd->transceiver = XCVR_INTERNAL; + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; } else { - ecmd->supported = (SUPPORTED_FIBRE | - SUPPORTED_1000baseKX_Full | - SUPPORTED_Autoneg | - SUPPORTED_Pause); - ecmd->advertising = (ADVERTISED_FIBRE | - ADVERTISED_1000baseKX_Full); + supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); if (hw->mac.type == e1000_i354) { if ((hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && !(status & E1000_STATUS_2P5_SKU_OVER)) { - ecmd->supported |= SUPPORTED_2500baseX_Full; - ecmd->supported &= - ~SUPPORTED_1000baseKX_Full; - ecmd->advertising |= ADVERTISED_2500baseX_Full; - ecmd->advertising &= - ~ADVERTISED_1000baseKX_Full; + supported |= SUPPORTED_2500baseX_Full; + supported &= ~SUPPORTED_1000baseKX_Full; + advertising |= ADVERTISED_2500baseX_Full; + advertising &= ~ADVERTISED_1000baseKX_Full; } } if (eth_flags->e100_base_fx) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full; + supported |= SUPPORTED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; } if (hw->mac.autoneg == 1) - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.port = PORT_FIBRE; } if (hw->mac.autoneg != 1) - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); switch (hw->fc.requested_mode) { case e1000_fc_full: - ecmd->advertising |= ADVERTISED_Pause; + advertising |= ADVERTISED_Pause; break; case e1000_fc_rx_pause: - ecmd->advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); break; case e1000_fc_tx_pause: - ecmd->advertising |= ADVERTISED_Asym_Pause; + advertising |= ADVERTISED_Asym_Pause; break; default: - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); } if (status & E1000_STATUS_LU) { if ((status & E1000_STATUS_2P5_SKU) && @@ -236,39 +241,46 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } if ((status & E1000_STATUS_FD) || hw->phy.media_type != e1000_media_type_copper) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; } else { speed = SPEED_UNKNOWN; - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(ecmd, speed); + cmd->base.speed = speed; if ((hw->phy.media_type == e1000_media_type_fiber) || hw->mac.autoneg) - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; else - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; /* MDI-X => 2; MDI =>1; Invalid =>0 */ if (hw->phy.media_type == e1000_media_type_copper) - ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; if (hw->phy.mdix == AUTO_ALL_MODES) - ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; else - ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int igb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 advertising; /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed @@ -283,12 +295,12 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ - if (ecmd->eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl) { if (hw->phy.media_type != e1000_media_type_copper) return -EOPNOTSUPP; - if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && - (ecmd->autoneg != AUTONEG_ENABLE)) { + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); return -EINVAL; } @@ -297,10 +309,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) usleep_range(1000, 2000); - if (ecmd->autoneg == AUTONEG_ENABLE) { + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) { - hw->phy.autoneg_advertised = ecmd->advertising | + hw->phy.autoneg_advertised = advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; switch (adapter->link_speed) { @@ -320,31 +335,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) break; } } else { - hw->phy.autoneg_advertised = ecmd->advertising | + hw->phy.autoneg_advertised = advertising | ADVERTISED_TP | ADVERTISED_Autoneg; } - ecmd->advertising = hw->phy.autoneg_advertised; + advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; } else { - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; /* calling this overrides forced MDI setting */ - if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { + if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { clear_bit(__IGB_RESETTING, &adapter->state); return -EINVAL; } } /* MDI-X => 2; MDI => 1; Auto => 3 */ - if (ecmd->eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl) { /* fix up the value for auto (3 => 0) as zero is mapped * internally to auto */ - if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) hw->phy.mdix = AUTO_ALL_MODES; else - hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; } /* reset the link */ @@ -852,6 +867,8 @@ static void igb_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; } static void igb_get_ringparam(struct net_device *netdev, @@ -1811,14 +1828,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); - while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { + while (rx_desc->wb.upper.length) { /* check Rx buffer */ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; /* sync Rx buffer for CPU read */ dma_sync_single_for_cpu(rx_ring->dev, rx_buffer_info->dma, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* verify contents of skb */ @@ -1828,12 +1845,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, rx_buffer_info->dma, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* unmap buffer on Tx side */ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer_info->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer_info, dma), + dma_unmap_len(tx_buffer_info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer_info, len, 0); /* increment Rx/Tx next to clean counters */ rx_ntc++; @@ -2271,6 +2297,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset) return IGB_STATS_LEN; case ETH_SS_TEST: return IGB_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGB_PRIV_FLAGS_STR_LEN; default: return -ENOTSUPP; } @@ -2376,6 +2404,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igb_priv_flags_strings, + IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; } } @@ -3388,9 +3420,38 @@ static int igb_set_channels(struct net_device *netdev, return 0; } +static u32 igb_get_priv_flags(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGB_FLAG_RX_LEGACY; + if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX) + flags |= IGB_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + } + + return 0; +} + static const struct ethtool_ops igb_ethtool_ops = { - .get_settings = igb_get_settings, - .set_settings = igb_set_settings, .get_drvinfo = igb_get_drvinfo, .get_regs_len = igb_get_regs_len, .get_regs = igb_get_regs, @@ -3426,8 +3487,12 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_rxfh = igb_set_rxfh, .get_channels = igb_get_channels, .set_channels = igb_set_channels, + .get_priv_flags = igb_get_priv_flags, + .set_priv_flags = igb_set_priv_flags, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, + .get_link_ksettings = igb_get_link_ksettings, + .set_link_ksettings = igb_set_link_ksettings, }; void igb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index be456bae816906e24338006a8b3597b539f86959..1cf74aa4ebd9ee9705ea9386d7ee755b348d44cb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -161,11 +161,16 @@ static void igb_vlan_mode(struct net_device *netdev, static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); static void igb_restore_vlan(struct igb_adapter *); -static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); +static void igb_rar_set_index(struct igb_adapter *, u32); static void igb_ping_all_vfs(struct igb_adapter *); static void igb_msg_task(struct igb_adapter *); static void igb_vmm_control(struct igb_adapter *); static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_flush_mac_table(struct igb_adapter *); +static int igb_available_rars(struct igb_adapter *, u8); +static void igb_set_default_mac_filter(struct igb_adapter *); +static int igb_uc_sync(struct net_device *, const unsigned char *); +static int igb_uc_unsync(struct net_device *, const unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, @@ -554,7 +559,7 @@ static void igb_dump(struct igb_adapter *adapter) 16, 1, page_address(buffer_info->page) + buffer_info->page_offset, - IGB_RX_BUFSZ, true); + igb_rx_bufsz(rx_ring), true); } } } @@ -1153,6 +1158,8 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) pci_disable_sriov(adapter->pdev); msleep(500); + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); @@ -1987,6 +1994,13 @@ void igb_reset(struct igb_adapter *adapter) if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); + /* RAR registers were cleared during init_hw, clear mac table */ + igb_flush_mac_table(adapter); + __dev_uc_unsync(adapter->netdev, NULL); + + /* Recover default RAR entry */ + igb_set_default_mac_filter(adapter); + /* Flow control settings reset on hardware reset, so guarantee flow * control is off when forcing speed. */ @@ -2095,11 +2109,9 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { struct igb_adapter *adapter = netdev_priv(dev); - struct e1000_hw *hw = &adapter->hw; int vfn = adapter->vfs_allocated_count; - int rar_entries = hw->mac.rar_entry_count - (vfn + 1); - if (netdev_uc_count(dev) >= rar_entries) + if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) return -ENOMEM; } @@ -2517,6 +2529,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } + igb_set_default_mac_filter(adapter); + /* get firmware version for ethtool -i */ igb_set_fw_version(adapter); @@ -2761,6 +2775,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->mac_table); kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV @@ -2796,6 +2811,8 @@ static int igb_disable_sriov(struct pci_dev *pdev) msleep(500); } + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; adapter->vfs_allocated_count = 0; @@ -2816,8 +2833,9 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); int old_vfs = pci_num_vf(pdev); + struct vf_mac_filter *mac_list; int err = 0; - int i; + int num_vf_mac_filters, i; if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { err = -EPERM; @@ -2845,6 +2863,38 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) goto out; } + /* Due to the limited number of RAR entries calculate potential + * number of MAC filters available for the VFs. Reserve entries + * for PF default MAC, PF MAC filters and at least one RAR entry + * for each VF for VF MAC. + */ + num_vf_mac_filters = adapter->hw.mac.rar_entry_count - + (1 + IGB_PF_MAC_FILTERS_RESERVED + + adapter->vfs_allocated_count); + + adapter->vf_mac_list = kcalloc(num_vf_mac_filters, + sizeof(struct vf_mac_filter), + GFP_KERNEL); + + mac_list = adapter->vf_mac_list; + INIT_LIST_HEAD(&adapter->vf_macs.l); + + if (adapter->vf_mac_list) { + /* Initialize list of VF MAC filters */ + for (i = 0; i < num_vf_mac_filters; i++) { + mac_list->vf = -1; + mac_list->free = true; + list_add(&mac_list->l, &adapter->vf_macs.l); + mac_list++; + } + } else { + /* If we could not allocate memory for the VF MAC filters + * we can continue without this feature but warn user. + */ + dev_err(&pdev->dev, + "Unable to allocate memory for VF MAC filter list\n"); + } + /* only call pci_enable_sriov() if no VFs are allocated already */ if (!old_vfs) { err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); @@ -2861,6 +2911,8 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) goto out; err_out: + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; adapter->vfs_allocated_count = 0; @@ -2937,6 +2989,7 @@ static void igb_remove(struct pci_dev *pdev) iounmap(hw->flash_address); pci_release_mem_regions(pdev); + kfree(adapter->mac_table); kfree(adapter->shadow_vfta); free_netdev(netdev); @@ -3099,6 +3152,11 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Assume MSI-X interrupts, will be checked during IRQ allocation */ adapter->flags |= IGB_FLAG_HAS_MSIX; + adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) * + hw->mac.rar_entry_count, GFP_ATOMIC); + if (!adapter->mac_table) + return -ENOMEM; + igb_probe_vfs(adapter); igb_init_queue_configuration(adapter); @@ -3293,7 +3351,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) size = sizeof(struct igb_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc(size); + tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -3404,6 +3462,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct igb_tx_buffer) * ring->count); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); } @@ -3435,7 +3497,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) size = sizeof(struct igb_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc(size); + rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; @@ -3720,6 +3782,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; + union e1000_adv_rx_desc *rx_desc; u64 rdba = ring->dma; int reg_idx = ring->reg_idx; u32 srrctl = 0, rxdctl = 0; @@ -3741,7 +3804,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* set descriptor configuration */ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; @@ -3758,11 +3824,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igb_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGB_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* enable receive descriptor fetching */ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; wr32(E1000_RXDCTL(reg_idx), rxdctl); } +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + return; + + set_ring_build_skb_enabled(rx_ring); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); +#endif +} + /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure @@ -3774,14 +3868,17 @@ static void igb_configure_rx(struct igb_adapter *adapter) int i; /* set the correct pool for the PF default MAC address in entry 0 */ - igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, - adapter->vfs_allocated_count); + igb_set_default_mac_filter(adapter); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) - igb_configure_rx_ring(adapter, adapter->rx_ring[i]); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } } /** @@ -3822,55 +3919,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) igb_free_tx_resources(adapter->tx_ring[i]); } -void igb_unmap_and_free_tx_resource(struct igb_ring *ring, - struct igb_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* buffer_info must be completely set up in the transmit path */ -} - /** * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void igb_clean_tx_ring(struct igb_ring *tx_ring) { - struct igb_tx_buffer *buffer_info; - unsigned long size; - u16 i; + u16 i = tx_ring->next_to_clean; + struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - if (!tx_ring->tx_buffer_info) - return; - /* Free all the Tx ring sk_buffs */ + while (i != tx_ring->next_to_use) { + union e1000_adv_tx_desc *eop_desc, *tx_desc; - for (i = 0; i < tx_ring->count; i++) { - buffer_info = &tx_ring->tx_buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, buffer_info); - } + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); - netdev_tx_reset_queue(txring_txq(tx_ring)); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - size = sizeof(struct igb_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGB_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } @@ -3932,50 +4037,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) **/ static void igb_clean_rx_ring(struct igb_ring *rx_ring) { - unsigned long size; - u16 i; + u16 i = rx_ring->next_to_clean; if (rx_ring->skb) dev_kfree_skb(rx_ring->skb); rx_ring->skb = NULL; - if (!rx_ring->rx_buffer_info) - return; - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { + while (i != rx_ring->next_to_alloc) { struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; - if (!buffer_info->page) - continue; - /* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory. */ dma_sync_single_range_for_cpu(rx_ring->dev, buffer_info->dma, buffer_info->page_offset, - IGB_RX_BUFSZ, + igb_rx_bufsz(rx_ring), DMA_FROM_DEVICE); /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, buffer_info->dma, - PAGE_SIZE, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); + IGB_RX_DMA_ATTR); __page_frag_cache_drain(buffer_info->page, buffer_info->pagecnt_bias); - buffer_info->page = NULL; + i++; + if (i == rx_ring->count) + i = 0; } - size = sizeof(struct igb_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -4014,8 +4108,7 @@ static int igb_set_mac(struct net_device *netdev, void *p) memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); /* set the correct pool for the new PF MAC address in entry 0 */ - igb_rar_set_qsel(adapter, hw->mac.addr, 0, - adapter->vfs_allocated_count); + igb_set_default_mac_filter(adapter); return 0; } @@ -4059,49 +4152,6 @@ static int igb_write_mc_addr_list(struct net_device *netdev) return netdev_mc_count(netdev); } -/** - * igb_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int igb_write_uc_addr_list(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - unsigned int vfn = adapter->vfs_allocated_count; - unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) - return -ENOMEM; - - if (!netdev_uc_empty(netdev) && rar_entries) { - struct netdev_hw_addr *ha; - - netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - igb_rar_set_qsel(adapter, ha->addr, - rar_entries--, - vfn); - count++; - } - } - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) { - wr32(E1000_RAH(rar_entries), 0); - wr32(E1000_RAL(rar_entries), 0); - } - wrfl(); - - return count; -} - static int igb_vlan_promisc_enable(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -4240,7 +4290,7 @@ static void igb_set_rx_mode(struct net_device *netdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; - u32 rctl = 0, vmolr = 0; + u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; int count; /* Check for Promiscuous and All Multicast modes */ @@ -4274,8 +4324,7 @@ static void igb_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = igb_write_uc_addr_list(netdev); - if (count < 0) { + if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } @@ -4298,6 +4347,14 @@ static void igb_set_rx_mode(struct net_device *netdev) E1000_RCTL_VFE); wr32(E1000_RCTL, rctl); +#if (PAGE_SIZE < 8192) + if (!adapter->vfs_allocated_count) { + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + rlpml = IGB_MAX_FRAME_BUILD_SKB; + } +#endif + wr32(E1000_RLPML, rlpml); + /* In order to support SR-IOV and eventually VMDq it is necessary to set * the VMOLR to enable the appropriate modes. Without this workaround * we will have issues with VLAN tag stripping not being done for frames @@ -4312,12 +4369,17 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); - /* enable Rx jumbo frames, no need for restriction */ + /* enable Rx jumbo frames, restrict as needed to support build_skb */ vmolr &= ~E1000_VMOLR_RLPML_MASK; - vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + vmolr |= IGB_MAX_FRAME_BUILD_SKB; + else +#endif + vmolr |= MAX_JUMBO_FRAME_SIZE; + vmolr |= E1000_VMOLR_LPE; wr32(E1000_VMOLR(vfn), vmolr); - wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE); igb_restore_vf_multicasts(adapter); } @@ -5256,18 +5318,32 @@ static void igb_tx_map(struct igb_ring *tx_ring, dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i--) + i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; } + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + tx_ring->next_to_use = i; } @@ -5339,7 +5415,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; out_drop: - igb_unmap_and_free_tx_resource(tx_ring, first); + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } @@ -6304,7 +6381,6 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - int rar_entry = hw->mac.rar_entry_count - (vf + 1); u32 reg, msgbuf[3]; u8 *addr = (u8 *)(&msgbuf[1]); @@ -6312,7 +6388,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) igb_vf_reset(adapter, vf); /* set vf mac address */ - igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + igb_set_vf_mac(adapter, vf, vf_mac); /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); @@ -6332,18 +6408,238 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) igb_write_mbx(hw, msgbuf, 3, vf); } +static void igb_flush_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + igb_rar_set_index(adapter, i); + } +} + +static int igb_available_rars(struct igb_adapter *adapter, u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + /* do not count rar entries reserved for VFs MAC addresses */ + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i, count = 0; + + for (i = 0; i < rar_entries; i++) { + /* do not count default entries */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) + continue; + + /* do not count "in use" entries for different queues */ + if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && + (adapter->mac_table[i].queue != queue)) + continue; + + count++; + } + + return count; +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igb_set_default_mac_filter(struct igb_adapter *adapter) +{ + struct igb_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->queue = adapter->vfs_allocated_count; + mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, 0); +} + +int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + + igb_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return min_t(int, ret, 0); +} + +static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return 0; +} + +int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, + const u32 info, const u8 *addr) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + struct list_head *pos; + struct vf_mac_filter *entry = NULL; + int ret = 0; + + switch (info) { + case E1000_VF_MAC_FILTER_CLR: + /* remove all unicast MAC filters related to the current VF */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + igb_del_mac_filter(adapter, entry->vf_mac, vf); + } + } + break; + case E1000_VF_MAC_FILTER_ADD: + if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + + /* try to find empty slot in the list */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->free) + break; + } + + if (entry && entry->free) { + entry->free = false; + entry->vf = vf; + ether_addr_copy(entry->vf_mac, addr); + + ret = igb_add_mac_filter(adapter, addr, vf); + ret = min_t(int, ret, 0); + } else { + ret = -ENOSPC; + } + + if (ret == -ENOSPC) + dev_warn(&pdev->dev, + "VF %d has requested MAC filter but there is no space for it\n", + vf); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) { + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 info = msg[0] & E1000_VT_MSGINFO_MASK; + /* The VF MAC Address is stored in a packed array of bytes * starting at the second 32 bit word of the msg array */ - unsigned char *addr = (char *)&msg[1]; - int err = -1; + unsigned char *addr = (unsigned char *)&msg[1]; + int ret = 0; - if (is_valid_ether_addr(addr)) - err = igb_set_vf_mac(adapter, vf, addr); + if (!info) { + if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) { + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + return -EINVAL; + } - return err; + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC\n", + vf); + return -EINVAL; + } + + ret = igb_set_vf_mac(adapter, vf, addr); + } else { + ret = igb_set_vf_mac_filter(adapter, vf, info, addr); + } + + return ret; } static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) @@ -6400,13 +6696,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) switch ((msgbuf[0] & 0xFFFF)) { case E1000_VF_SET_MAC_ADDR: - retval = -EINVAL; - if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) - retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); - else - dev_warn(&pdev->dev, - "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", - vf); + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); break; case E1000_VF_SET_PROMISC: retval = igb_set_vf_promisc(adapter, msgbuf, vf); @@ -6686,7 +6976,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) DMA_TO_DEVICE); /* clear tx_buffer data */ - tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* clear last DMA location and unmap remaining buffers */ @@ -6822,8 +7111,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool igb_page_is_reserved(struct page *page) @@ -6831,11 +7126,10 @@ static inline bool igb_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } -static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, - struct page *page, - unsigned int truesize) +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) { - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; /* avoid re-using remote pages */ if (unlikely(igb_page_is_reserved(page))) @@ -6843,16 +7137,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_ref_count(page) != pagecnt_bias)) + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buffer->page_offset ^= IGB_RX_BUFSZ; #else - /* move offset up to the next cache line */ - rx_buffer->page_offset += truesize; +#define IGB_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) - if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + if (rx_buffer->page_offset > IGB_LAST_OFFSET) return false; #endif @@ -6860,7 +7151,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(pagecnt_bias == 1)) { + if (unlikely(!pagecnt_bias)) { page_ref_add(page, USHRT_MAX); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -6872,34 +7163,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, * igb_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into + * @size: size of buffer to be added * * This function will add the data contained in rx_buffer->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. - * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. **/ -static bool igb_add_rx_frag(struct igb_ring *rx_ring, +static void igb_add_rx_frag(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, - unsigned int size, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned char *va = page_address(page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) - unsigned int truesize = IGB_RX_BUFSZ; + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + union e1000_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(size); #endif - unsigned int pull_len; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif - if (unlikely(skb_is_nonlinear(skb))) - goto add_tail_frag; + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); @@ -6907,95 +7220,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, size -= IGB_TS_HDR_LEN; } - if (likely(size <= IGB_RX_HDR_LEN)) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!igb_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - return false; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGB_RX_HDR_LEN) + headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); /* update all of the pointers */ - va += pull_len; - size -= pull_len; - -add_tail_frag: - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (unsigned long)va & ~PAGE_MASK, size, truesize); + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } - return igb_can_reuse_rx_page(rx_buffer, page, truesize); + return skb; } -static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + union e1000_adv_rx_desc *rx_desc, + unsigned int size) { - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); - struct igb_rx_buffer *rx_buffer; - struct page *page; - - rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGB_SKB_PAD + size); +#endif + struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(page_addr); + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); + prefetch(va + L1_CACHE_BYTES); #endif - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_failed++; - return NULL; - } + /* build an skb around the page buffer */ + skb = build_skb(va - IGB_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - } + /* update pointers within the skb to store the data */ + skb_reserve(skb, IGB_SKB_PAD); + __skb_put(skb, size); - /* pull page into skb */ - if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { - /* hand second half of page back to the ring */ - igb_reuse_rx_page(rx_ring, rx_buffer); - } else { - /* We are not reusing the buffer so unmap it and free - * any references we are holding to it - */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); + /* pull timestamp out of packet data */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); + __skb_pull(skb, IGB_TS_HDR_LEN); } - /* clear contents of rx_buffer */ - rx_buffer->page = NULL; + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif return skb; } @@ -7154,6 +7445,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } +static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, + const unsigned int size) +{ + struct igb_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igb_put_rx_buffer(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer) +{ + if (igb_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; @@ -7163,6 +7495,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) while (likely(total_packets < budget)) { union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { @@ -7171,8 +7505,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) } rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.status_error) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -7181,13 +7515,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) */ dma_rmb(); + rx_buffer = igb_get_rx_buffer(rx_ring, size); + /* retrieve a buffer from the ring */ - skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); + if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); + else + skb = igb_construct_skb(rx_ring, rx_buffer, + rx_desc, size); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; break; + } + igb_put_rx_buffer(rx_ring, rx_buffer); cleaned_count++; /* fetch next buffer in frame if non-eop */ @@ -7231,6 +7577,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) return total_packets; } +static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, struct igb_rx_buffer *bi) { @@ -7242,21 +7593,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; /* alloc new page for storage */ - page = dev_alloc_page(); + page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; } /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, - DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_page(page); + __free_pages(page, igb_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_failed++; return false; @@ -7264,7 +7617,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = igb_rx_offset(rx_ring); bi->pagecnt_bias = 1; return true; @@ -7279,6 +7632,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) union e1000_adv_rx_desc *rx_desc; struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; + u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -7288,14 +7642,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; + bufsz = igb_rx_bufsz(rx_ring); + do { if (!igb_alloc_mapped_page(rx_ring, bi)) break; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - IGB_RX_BUFSZ, + bi->page_offset, bufsz, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change @@ -7312,8 +7667,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -7630,6 +7985,36 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, return 0; } +static void igb_deliver_wake_packet(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) + return; + + skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP static int igb_suspend(struct device *dev) @@ -7659,7 +8044,7 @@ static int igb_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 err; + u32 err, val; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -7690,6 +8075,10 @@ static int igb_resume(struct device *dev) */ igb_get_hw_control(adapter); + val = rd32(E1000_WUS); + if (val & WAKE_PKT_WUS) + igb_deliver_wake_packet(netdev); + wr32(E1000_WUS, ~0); rtnl_lock(); @@ -7948,11 +8337,16 @@ static void igb_io_resume(struct pci_dev *pdev) igb_get_hw_control(adapter); } -static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, - u8 qsel) +/** + * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + **/ +static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) { struct e1000_hw *hw = &adapter->hw; u32 rar_low, rar_high; + u8 *addr = adapter->mac_table[index].addr; /* HW expects these to be in network order when they are plugged * into the registers which are little endian. In order to guarantee @@ -7963,12 +8357,16 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, rar_high = le16_to_cpup((__le16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ - rar_high |= E1000_RAH_AV; + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { + rar_high |= E1000_RAH_AV; - if (hw->mac.type == e1000_82575) - rar_high |= E1000_RAH_POOL_1 * qsel; - else - rar_high |= E1000_RAH_POOL_1 << qsel; + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; + else + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; + } wr32(E1000_RAL(index), rar_low); wrfl(); @@ -7984,10 +8382,13 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, * towards the first, as a result a collision should not be possible */ int rar_entry = hw->mac.rar_entry_count - (vf + 1); + unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; - memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - - igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); + ether_addr_copy(vf_mac_addr, mac_addr); + ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); + adapter->mac_table[rar_entry].queue = vf; + adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; + igb_rar_set_index(adapter, rar_entry); return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c4477552ce9ef2e153eb1678c783ff9e9fb08eb8..7a3fd4d745928c809961c589e05ce25abc0077a9 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) * incoming frame. The value is stored in little endian format starting on * byte 8. **/ -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, - unsigned char *va, +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, struct sk_buff *skb) { __le64 *regval = (__le64 *)va; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 8dea1b1367ef65603592d9949fe55af0521cbcf8..34faa113a8a018e1e42a5ff1eebc867be65ad10d 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -71,45 +71,45 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) -static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int igbvf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 status; - ecmd->supported = SUPPORTED_1000baseT_Full; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); - ecmd->advertising = ADVERTISED_1000baseT_Full; - - ecmd->port = -1; - ecmd->transceiver = XCVR_DUMMY1; + cmd->base.port = -1; status = er32(STATUS); if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_SPEED_1000) - ethtool_cmd_speed_set(ecmd, SPEED_1000); + cmd->base.speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; else - ethtool_cmd_speed_set(ecmd, SPEED_10); + cmd->base.speed = SPEED_10; if (status & E1000_STATUS_FD) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } -static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int igbvf_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { return -EOPNOTSUPP; } @@ -443,8 +443,6 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset, } static const struct ethtool_ops igbvf_ethtool_ops = { - .get_settings = igbvf_get_settings, - .set_settings = igbvf_set_settings, .get_drvinfo = igbvf_get_drvinfo, .get_regs_len = igbvf_get_regs_len, .get_regs = igbvf_get_regs, @@ -467,6 +465,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = { .get_ethtool_stats = igbvf_get_ethtool_stats, .get_coalesce = igbvf_get_coalesce, .set_coalesce = igbvf_set_coalesce, + .get_link_ksettings = igbvf_get_link_ksettings, + .set_link_ksettings = igbvf_set_link_ksettings, }; void igbvf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 6f4290d6dc9f3e8fec845b50597150b690210726..bf69f01f8467754cea8ecdfd7f2f8bc68c82dc21 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -101,6 +101,8 @@ enum latency_range { #define IGBVF_MNG_VLAN_NONE (-1) +#define IGBVF_MAX_MAC_FILTERS 3 + /* Number of packet split data buffers (not including the header buffer) */ #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) @@ -241,7 +243,6 @@ struct igbvf_adapter { /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; - struct net_device_stats net_stats; spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index f800bf8eedaedbbb3e0836828f2f0fe5e6edee2e..30d58c4a444ed8bec2089ce4c2c0951706b80926 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -62,6 +62,10 @@ #define E1000_VF_RESET 0x01 /* VF requests reset */ #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +/* VF requests PF to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests PF to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 839ba110f7fb93c185f41930f0ab539f6ece1627..1b9cbbe88f6f41f9dc74d5b27fd38af0cb6612e2 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -400,8 +400,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, adapter->total_rx_packets += total_packets; adapter->total_rx_bytes += total_bytes; - adapter->net_stats.rx_bytes += total_bytes; - adapter->net_stats.rx_packets += total_packets; + netdev->stats.rx_bytes += total_bytes; + netdev->stats.rx_packets += total_packets; return cleaned; } @@ -864,8 +864,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) } } - adapter->net_stats.tx_bytes += total_bytes; - adapter->net_stats.tx_packets += total_packets; + netdev->stats.tx_bytes += total_bytes; + netdev->stats.tx_packets += total_packets; return count < tx_ring->count; } @@ -1432,13 +1432,53 @@ static void igbvf_set_multi(struct net_device *netdev) kfree(mta_list); } +/** + * igbvf_set_uni - Configure unicast MAC filters + * @netdev: network interface device structure + * + * This routine is responsible for configuring the hardware for proper + * unicast filters. + **/ +static int igbvf_set_uni(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) { + pr_err("Too many unicast filters - No Space\n"); + return -ENOSPC; + } + + /* Clear all unicast MAC filters */ + hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + /* Add MAC filters one by one */ + netdev_for_each_uc_addr(ha, netdev) { + hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, + ha->addr); + udelay(200); + } + } + + return 0; +} + +static void igbvf_set_rx_mode(struct net_device *netdev) +{ + igbvf_set_multi(netdev); + igbvf_set_uni(netdev); +} + /** * igbvf_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ static void igbvf_configure(struct igbvf_adapter *adapter) { - igbvf_set_multi(adapter->netdev); + igbvf_set_rx_mode(adapter->netdev); igbvf_restore_vlan(adapter); @@ -1798,7 +1838,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); /* Fill out the OS statistics structure */ - adapter->net_stats.multicast = adapter->stats.mprc; + adapter->netdev->stats.multicast = adapter->stats.mprc; } static void igbvf_print_link_info(struct igbvf_adapter *adapter) @@ -2333,21 +2373,6 @@ static void igbvf_reset_task(struct work_struct *work) igbvf_reinit_locked(adapter); } -/** - * igbvf_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ -static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) -{ - struct igbvf_adapter *adapter = netdev_priv(netdev); - - /* only return the current stats */ - return &adapter->net_stats; -} - /** * igbvf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure @@ -2635,8 +2660,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_open = igbvf_open, .ndo_stop = igbvf_close, .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_rx_mode = igbvf_set_rx_mode, .ndo_set_mac_address = igbvf_set_mac, .ndo_change_mtu = igbvf_change_mtu, .ndo_do_ioctl = igbvf_ioctl, diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 335ba66421458232cdd73e7cf1b6141c95ce48e3..528be116184ee35f9f86e264c40310d704acf344 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -36,6 +36,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); +static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 subcmd, u8 *addr); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); /** @@ -66,6 +67,8 @@ static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) mac->ops.rar_set = e1000_rar_set_vf; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + /* set mac filter */ + mac->ops.set_uc_addr = e1000_set_uc_addr_vf; /* set vlan filter table array */ mac->ops.set_vfta = e1000_set_vfta_vf; @@ -337,6 +340,44 @@ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) return E1000_SUCCESS; } +/** + * e1000_set_uc_addr_vf - Set or clear unicast filters + * @hw: pointer to the HW structure + * @sub_cmd: add or clear filters + * @addr: pointer to the filter MAC address + **/ +static s32 e1000_set_uc_addr_vf(struct e1000_hw *hw, u32 sub_cmd, u8 *addr) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3], msgbuf_chk; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] |= sub_cmd; + msgbuf[0] |= E1000_VF_SET_MAC_ADDR; + msgbuf_chk = msgbuf[0]; + + if (addr) + memcpy(msg_addr, addr, ETH_ALEN); + + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + if (!ret_val) { + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + if (msgbuf[0] == (msgbuf_chk | E1000_VT_MSGTYPE_NACK)) + return -ENOSPC; + } + + return ret_val; +} + /** * e1000_check_for_link_vf - Check for link for a virtual interface * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index f00a41d9a1ca6528364e839e4240234b833cfdad..4cf78b0dec50baa73992743e0972726ddcf4f661 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -179,6 +179,7 @@ struct e1000_mac_operations { s32 (*get_bus_info)(struct e1000_hw *); s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + s32 (*set_uc_addr)(struct e1000_hw *, u32, u8 *); s32 (*reset_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *); s32 (*setup_link)(struct e1000_hw *); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index e5d72559cca9b060002525e5086ccfc008cc99d6..d10a0d242dda5db4f8c474a0814bc7a1b9eae491 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -94,24 +94,30 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { #define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) static int -ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +ixgb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ixgb_adapter *adapter = netdev_priv(netdev); - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; if (netif_carrier_ok(adapter->netdev)) { - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -126,13 +132,14 @@ void ixgb_set_speed_duplex(struct net_device *netdev) } static int -ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +ixgb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ixgb_adapter *adapter = netdev_priv(netdev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; - if (ecmd->autoneg == AUTONEG_ENABLE || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + if (cmd->base.autoneg == AUTONEG_ENABLE || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; if (netif_running(adapter->netdev)) { @@ -630,8 +637,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static const struct ethtool_ops ixgb_ethtool_ops = { - .get_settings = ixgb_get_settings, - .set_settings = ixgb_set_settings, .get_drvinfo = ixgb_get_drvinfo, .get_regs_len = ixgb_get_regs_len, .get_regs = ixgb_get_regs, @@ -649,6 +654,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = { .set_phys_id = ixgb_set_phys_id, .get_sset_count = ixgb_get_sset_count, .get_ethtool_stats = ixgb_get_ethtool_stats, + .get_link_ksettings = ixgb_get_link_ksettings, + .set_link_ksettings = ixgb_set_link_ksettings, }; void ixgb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index fbd220d137b34ed78e059ab4c1473d1129ef43a1..5a713199653c181e88cfbff7ff3964739826b6f5 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -86,7 +86,6 @@ static void ixgb_set_multi(struct net_device *netdev); static void ixgb_watchdog(unsigned long data); static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); static int ixgb_set_mac(struct net_device *netdev, void *p); static irqreturn_t ixgb_intr(int irq, void *data); @@ -367,7 +366,6 @@ static const struct net_device_ops ixgb_netdev_ops = { .ndo_open = ixgb_open, .ndo_stop = ixgb_close, .ndo_start_xmit = ixgb_xmit_frame, - .ndo_get_stats = ixgb_get_stats, .ndo_set_rx_mode = ixgb_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgb_set_mac, @@ -1596,20 +1594,6 @@ ixgb_tx_timeout_task(struct work_struct *work) ixgb_up(adapter); } -/** - * ixgb_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. - **/ - -static struct net_device_stats * -ixgb_get_stats(struct net_device *netdev) -{ - return &netdev->stats; -} - /** * ixgb_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index b1ecc2627a5aee9727382ba7bf7cbee4796129a0..76263762bea17eb27bd4f42c0064f4e8a9747b2d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -86,17 +86,62 @@ /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_1536 1536 #define IXGBE_RXBUFFER_2K 2048 #define IXGBE_RXBUFFER_3K 3072 #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ -#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ #if (PAGE_SIZE < 8192) -#define IXGBE_MAX_FRAME_BUILD_SKB \ - (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) +#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) +#define IXGBE_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) + +static inline int ixgbe_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int ixgbe_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IXGBE_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IXGBE_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return ixgbe_compute_pad(rx_buf_len); +} + +#define IXGBE_SKB_PAD ixgbe_skb_pad() #else -#define IXGBE_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /* @@ -190,7 +235,11 @@ struct vf_macvlans { struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + union { + struct sk_buff *skb; + /* XDP uses address ptr on irq_clean */ + void *data; + }; unsigned int bytecount; unsigned short gso_segs; __be16 protocol; @@ -243,6 +292,7 @@ enum ixgbe_ring_state_t { __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, + __IXGBE_TX_XDP_RING, }; #define ring_uses_build_skb(ring) \ @@ -269,10 +319,17 @@ struct ixgbe_fwd_adapter { set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) #define clear_ring_rsc_enabled(ring) \ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define ring_is_xdp(ring) \ + test_bit(__IXGBE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ + struct bpf_prog *xdp_prog; struct device *dev; /* device for DMA mapping */ struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ @@ -334,6 +391,7 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 31 @@ -361,7 +419,7 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) return IXGBE_RXBUFFER_3K; #if (PAGE_SIZE < 8192) if (ring_uses_build_skb(ring)) - return IXGBE_MAX_FRAME_BUILD_SKB; + return IXGBE_MAX_2K_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K; } @@ -510,6 +568,7 @@ struct ixgbe_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; /* OS defined structs */ struct net_device *netdev; + struct bpf_prog *xdp_prog; struct pci_dev *pdev; unsigned long state; @@ -576,6 +635,10 @@ struct ixgbe_adapter { __be16 vxlan_port; __be16 geneve_port; + /* XDP */ + int num_xdp_queues; + struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -622,6 +685,7 @@ struct ixgbe_adapter { u64 tx_busy; unsigned int tx_ring_count; + unsigned int xdp_ring_count; unsigned int rx_ring_count; u32 link_speed; @@ -705,7 +769,7 @@ struct ixgbe_adapter { u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + u32 *rss_key; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -762,6 +826,7 @@ enum ixgbe_boards { board_X540, board_X550, board_X550EM_x, + board_x550em_x_fw, board_x550em_a, board_x550em_a_fw, }; @@ -771,6 +836,7 @@ extern const struct ixgbe_info ixgbe_82599_info; extern const struct ixgbe_info ixgbe_X540_info; extern const struct ixgbe_info ixgbe_X550_info; extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_x_fw_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB @@ -790,7 +856,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter); void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); void ixgbe_reset(struct ixgbe_adapter *adapter); void ixgbe_set_ethtool_ops(struct net_device *netdev); -int ixgbe_setup_rx_resources(struct ixgbe_ring *); +int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); int ixgbe_setup_tx_resources(struct ixgbe_ring *); void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 90fa5bf23d1b5f6d636478626b7d7f45d6a8871c..7e5e336d7dcc0ec297a300e984fbb4ecda40be34 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -179,6 +179,7 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: return SUPPORTED_10000baseKR_Full; default: return SUPPORTED_10000baseKX4_Full | @@ -186,60 +187,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) } } -static int ixgbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int ixgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; bool autoneg = false; + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= ixgbe_get_supported_10gtypes(hw); + supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? SUPPORTED_1000baseKX_Full : SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; + supported |= SUPPORTED_100baseT_Full; if (supported_link & IXGBE_LINK_SPEED_10_FULL) - ecmd->supported |= SUPPORTED_10baseT_Full; + supported |= SUPPORTED_10baseT_Full; /* default advertised speed if phy.autoneg_advertised isn't set */ - ecmd->advertising = ecmd->supported; + advertising = supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { - ecmd->advertising = 0; + advertising = 0; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G; + advertising |= supported & ADVRTSD_MSK_10G; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { - if (ecmd->supported & SUPPORTED_1000baseKX_Full) - ecmd->advertising |= ADVERTISED_1000baseKX_Full; + if (supported & SUPPORTED_1000baseKX_Full) + advertising |= ADVERTISED_1000baseKX_Full; else - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } } else { if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising = ADVERTISED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } } if (autoneg) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; } else - ecmd->autoneg = AUTONEG_DISABLE; - - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.autoneg = AUTONEG_DISABLE; /* Determine the remaining settings based on the PHY type. */ switch (adapter->hw.phy.type) { @@ -248,14 +251,14 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_x550em_ext_t: case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; break; case ixgbe_phy_qt: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; break; case ixgbe_phy_nl: case ixgbe_phy_sfp_passive_tyco: @@ -273,9 +276,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_DA; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_DA; break; case ixgbe_sfp_type_sr: case ixgbe_sfp_type_lr: @@ -285,102 +288,113 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_NONE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; break; case ixgbe_sfp_type_unknown: default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_NONE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_OTHER; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; break; } /* Indicate pause support */ - ecmd->supported |= SUPPORTED_Pause; + supported |= SUPPORTED_Pause; switch (hw->fc.requested_mode) { case ixgbe_fc_full: - ecmd->advertising |= ADVERTISED_Pause; + advertising |= ADVERTISED_Pause; break; case ixgbe_fc_rx_pause: - ecmd->advertising |= ADVERTISED_Pause | + advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; break; case ixgbe_fc_tx_pause: - ecmd->advertising |= ADVERTISED_Asym_Pause; + advertising |= ADVERTISED_Asym_Pause; break; default: - ecmd->advertising &= ~(ADVERTISED_Pause | + advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); } if (netif_carrier_ok(netdev)) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; break; case IXGBE_LINK_SPEED_5GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_5000); + cmd->base.speed = SPEED_5000; break; case IXGBE_LINK_SPEED_2_5GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_2500); + cmd->base.speed = SPEED_2500; break; case IXGBE_LINK_SPEED_1GB_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; case IXGBE_LINK_SPEED_100_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_100); + cmd->base.speed = SPEED_100; break; case IXGBE_LINK_SPEED_10_FULL: - ethtool_cmd_speed_set(ecmd, SPEED_10); + cmd->base.speed = SPEED_10; break; default: break; } - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int ixgbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int ixgbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; s32 err = 0; + u32 supported, advertising; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if ((hw->phy.media_type == ixgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { @@ -388,12 +402,12 @@ static int ixgbe_set_settings(struct net_device *netdev, * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (ecmd->advertising & ~ecmd->supported) + if (advertising & ~supported) return -EINVAL; /* only allow one speed at a time if no autoneg */ - if (!ecmd->autoneg && hw->phy.multispeed_fiber) { - if (ecmd->advertising == + if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { + if (advertising == (ADVERTISED_10000baseT_Full | ADVERTISED_1000baseT_Full)) return -EINVAL; @@ -401,16 +415,16 @@ static int ixgbe_set_settings(struct net_device *netdev, old = hw->phy.autoneg_advertised; advertised = 0; - if (ecmd->advertising & ADVERTISED_10000baseT_Full) + if (advertising & ADVERTISED_10000baseT_Full) advertised |= IXGBE_LINK_SPEED_10GB_FULL; - if (ecmd->advertising & ADVERTISED_1000baseT_Full) + if (advertising & ADVERTISED_1000baseT_Full) advertised |= IXGBE_LINK_SPEED_1GB_FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) advertised |= IXGBE_LINK_SPEED_100_FULL; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) advertised |= IXGBE_LINK_SPEED_10_FULL; if (old == advertised) @@ -428,10 +442,11 @@ static int ixgbe_set_settings(struct net_device *netdev, clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } else { /* in this case we currently only support 10Gb/FULL */ - u32 speed = ethtool_cmd_speed(ecmd); - if ((ecmd->autoneg == AUTONEG_ENABLE) || - (ecmd->advertising != ADVERTISED_10000baseT_Full) || - (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + u32 speed = cmd->base.speed; + + if ((cmd->base.autoneg == AUTONEG_ENABLE) || + (advertising != ADVERTISED_10000baseT_Full) || + (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; } @@ -1056,15 +1071,19 @@ static int ixgbe_set_ringparam(struct net_device *netdev, if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_xdp_queues; i++) + adapter->xdp_ring[i]->count = new_tx_count; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->count = new_rx_count; adapter->tx_ring_count = new_tx_count; + adapter->xdp_ring_count = new_tx_count; adapter->rx_ring_count = new_rx_count; goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + i = max_t(int, i, adapter->num_xdp_queues); temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); if (!temp_ring) { @@ -1096,12 +1115,33 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } } + for (i = 0; i < adapter->num_xdp_queues; i++) { + memcpy(&temp_ring[i], adapter->xdp_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + for (i = 0; i < adapter->num_tx_queues; i++) { ixgbe_free_tx_resources(adapter->tx_ring[i]); memcpy(adapter->tx_ring[i], &temp_ring[i], sizeof(struct ixgbe_ring)); } + for (i = 0; i < adapter->num_xdp_queues; i++) { + ixgbe_free_tx_resources(adapter->xdp_ring[i]); + + memcpy(adapter->xdp_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } adapter->tx_ring_count = new_tx_count; } @@ -1113,7 +1153,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, sizeof(struct ixgbe_ring)); temp_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_ring[i]); + err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { while (i) { i--; @@ -1746,7 +1786,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; - err = ixgbe_setup_rx_resources(rx_ring); + err = ixgbe_setup_rx_resources(adapter, rx_ring); if (err) { ret_val = 4; goto err_nomem; @@ -2927,9 +2967,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - return sizeof(adapter->rss_key); + return IXGBE_RSS_KEY_SIZE; } static u32 ixgbe_rss_indir_size(struct net_device *netdev) @@ -3402,8 +3440,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) } static const struct ethtool_ops ixgbe_ethtool_ops = { - .get_settings = ixgbe_get_settings, - .set_settings = ixgbe_set_settings, .get_drvinfo = ixgbe_get_drvinfo, .get_regs_len = ixgbe_get_regs_len, .get_regs = ixgbe_get_regs, @@ -3442,6 +3478,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_ts_info = ixgbe_get_ts_info, .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, + .get_link_ksettings = ixgbe_get_link_ksettings, + .set_link_ksettings = ixgbe_set_link_ksettings, }; void ixgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 1b8be7d813bd992bc8e869f6c458d5197ac44fc4..b45fdc98033dc4b052ebdd6492ecbc437fe4071b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -267,12 +267,14 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) **/ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { - int i; + int i, reg_idx; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; + for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) + adapter->xdp_ring[i]->reg_idx = reg_idx; return true; } @@ -308,6 +310,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ixgbe_cache_ring_rss(adapter); } +static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) +{ + return adapter->xdp_prog ? nr_cpu_ids : 0; +} + #define IXGBE_RSS_64Q_MASK 0x3F #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 @@ -382,6 +389,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = vmdq_i * tcs; #ifdef IXGBE_FCOE @@ -479,6 +487,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) netdev_set_tc_queue(dev, i, rss_i, rss_i * i); adapter->num_tx_queues = rss_i * tcs; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = rss_i * tcs; return true; @@ -549,6 +558,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues = vmdq_i * rss_i; adapter->num_tx_queues = vmdq_i * rss_i; + adapter->num_xdp_queues = 0; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -669,6 +679,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ adapter->num_rx_queues = rss_i; adapter->num_tx_queues = rss_i; + adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); return true; } @@ -689,6 +700,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; + adapter->num_xdp_queues = 0; adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; @@ -719,8 +731,11 @@ static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i, vectors, vector_threshold; - /* We start by asking for one vector per queue pair */ + /* We start by asking for one vector per queue pair with XDP queues + * being stacked with TX queues. + */ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + vectors = max(vectors, adapter->num_xdp_queues); /* It is easy to be greedy for MSI-X vectors. However, it really * doesn't do much good if we have a lot more vectors than CPUs. We'll @@ -800,6 +815,8 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, * @v_idx: index of vector in adapter struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate + * @xdp_count: total number of XDP rings to allocate + * @xdp_idx: index of first XDP ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * @@ -808,6 +825,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_count, int v_idx, int txr_count, int txr_idx, + int xdp_count, int xdp_idx, int rxr_count, int rxr_idx) { struct ixgbe_q_vector *q_vector; @@ -817,7 +835,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int ring_count, size; u8 tcs = netdev_get_num_tc(adapter->netdev); - ring_count = txr_count + rxr_count; + ring_count = txr_count + rxr_count + xdp_count; size = sizeof(struct ixgbe_q_vector) + (sizeof(struct ixgbe_ring) * ring_count); @@ -909,6 +927,33 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring++; } + while (xdp_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = xdp_idx; + set_ring_xdp(ring); + + /* assign ring to adapter */ + adapter->xdp_ring[xdp_idx] = ring; + + /* update count and index */ + xdp_count--; + xdp_idx++; + + /* push pointer to next ring */ + ring++; + } + while (rxr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; @@ -1002,17 +1047,18 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; - int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int xdp_remaining = adapter->num_xdp_queues; + int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; int err; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) q_vectors = 1; - if (q_vectors >= (rxr_remaining + txr_remaining)) { + if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { for (; rxr_remaining; v_idx++) { err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, - 0, 0, 1, rxr_idx); + 0, 0, 0, 0, 1, rxr_idx); if (err) goto err_out; @@ -1026,8 +1072,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) for (; v_idx < q_vectors; v_idx++) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, + xqpv, xdp_idx, rqpv, rxr_idx); if (err) @@ -1036,14 +1085,17 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) /* update counts and index */ rxr_remaining -= rqpv; txr_remaining -= tqpv; + xdp_remaining -= xqpv; rxr_idx++; txr_idx++; + xdp_idx += xqpv; } return 0; err_out: adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; @@ -1066,6 +1118,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) int v_idx = adapter->num_q_vectors; adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; @@ -1172,9 +1225,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) ixgbe_cache_ring_register(adapter); - e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues); + adapter->num_rx_queues, adapter->num_tx_queues, + adapter->num_xdp_queues); set_bit(__IXGBE_DOWN, &adapter->state); @@ -1195,6 +1249,7 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) { adapter->num_tx_queues = 0; + adapter->num_xdp_queues = 0; adapter->num_rx_queues = 0; ixgbe_free_q_vectors(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a7a430a7be2cd9201cc36022249219e94bfb41ca..22a29df1d29eab871818353111e6abc9197e73bf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -49,6 +49,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -85,6 +88,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, [board_x550em_a] = &ixgbe_x550em_a_info, [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, }; @@ -131,9 +135,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, @@ -508,7 +514,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { */ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) { - int i = 0, j = 0; + int i; char rname[16]; u32 regs[64]; @@ -570,21 +576,38 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); break; default: - pr_info("%-15s %08x\n", reginfo->name, - IXGBE_READ_REG(hw, reginfo->ofs)); + pr_info("%-15s %08x\n", + reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); return; } - for (i = 0; i < 8; i++) { - snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); - pr_err("%-15s", rname); + i = 0; + while (i < 64) { + int j; + char buf[9 * 8 + 1]; + char *p = buf; + + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7); for (j = 0; j < 8; j++) - pr_cont(" %08x", regs[i*8+j]); - pr_cont("\n"); + p += sprintf(p, " %08x", regs[i++]); + pr_err("%-15s%s\n", rname, buf); } } +static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) +{ + struct ixgbe_tx_buffer *tx_buffer; + + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, ring->next_to_use, ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); +} + /* * ixgbe_dump - Print registers, tx-rings and rx-rings */ @@ -594,14 +617,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_reg_info *reginfo; int n = 0; - struct ixgbe_ring *tx_ring; + struct ixgbe_ring *ring; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct ixgbe_ring *rx_ring; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer_info; - u32 staterr; int i = 0; if (!netif_msg_hw(adapter)) @@ -635,14 +657,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) "Queue [NTU] [NTC] [bi(ntc)->dma ]", "leng", "ntw", "timestamp"); for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; - tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", - n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp); + ring = adapter->tx_ring[n]; + ixgbe_print_buffer(ring, n); + } + + for (n = 0; n < adapter->num_xdp_queues; n++) { + ring = adapter->xdp_ring[n]; + ixgbe_print_buffer(ring, n); } /* Print TX Rings */ @@ -687,21 +708,32 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) */ for (n = 0; n < adapter->num_tx_queues; n++) { - tx_ring = adapter->tx_ring[n]; + ring = adapter->tx_ring[n]; pr_info("------------------------------------\n"); - pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("TX QUEUE INDEX = %d\n", ring->queue_index); pr_info("------------------------------------\n"); pr_info("%s%s %s %s %s %s\n", "T [desc] [address 63:0 ] ", "[PlPOIdStDDt Ln] [bi->dma ] ", "leng", "ntw", "timestamp", "bi->skb"); - for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC(tx_ring, i); - tx_buffer = &tx_ring->tx_buffer_info[i]; + for (i = 0; ring->desc && (i < ring->count); i++) { + tx_desc = IXGBE_TX_DESC(ring, i); + tx_buffer = &ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; if (dma_unmap_len(tx_buffer, len) > 0) { - pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + const char *ring_desc; + + if (i == ring->next_to_use && + i == ring->next_to_clean) + ring_desc = " NTC/U"; + else if (i == ring->next_to_use) + ring_desc = " NTU"; + else if (i == ring->next_to_clean) + ring_desc = " NTC"; + else + ring_desc = ""; + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), @@ -709,16 +741,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, (u64)tx_buffer->time_stamp, - tx_buffer->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - pr_cont(" NTC/U\n"); - else if (i == tx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == tx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); + tx_buffer->skb, + ring_desc); if (netif_msg_pktdata(adapter) && tx_buffer->skb) @@ -797,34 +821,44 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("%s%s%s", + pr_info("%s%s%s\n", "R [desc] [ PktBuf A0] ", "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", - "<-- Adv Rx Read format\n"); - pr_info("%s%s%s", + "<-- Adv Rx Read format"); + pr_info("%s%s%s\n", "RWB[desc] [PcsmIpSHl PtRs] ", "[vl er S cks ln] ---------------- [bi->skb ] ", - "<-- Adv Rx Write-Back format\n"); + "<-- Adv Rx Write-Back format"); for (i = 0; i < rx_ring->count; i++) { + const char *ring_desc; + + if (i == rx_ring->next_to_use) + ring_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + ring_desc = " NTC"; + else + ring_desc = ""; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_desc = IXGBE_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - if (staterr & IXGBE_RXD_STAT_DD) { + if (rx_desc->wb.upper.length) { /* Descriptor Done */ - pr_info("RWB[0x%03X] %016llX " - "%016llX ---------------- %p", i, + pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", + i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - rx_buffer_info->skb); + rx_buffer_info->skb, + ring_desc); } else { - pr_info("R [0x%03X] %016llX " - "%016llX %016llX %p", i, + pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", + i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)rx_buffer_info->dma, - rx_buffer_info->skb); + rx_buffer_info->skb, + ring_desc); if (netif_msg_pktdata(adapter) && rx_buffer_info->dma) { @@ -835,14 +869,6 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) ixgbe_rx_bufsz(rx_ring), true); } } - - if (i == rx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == rx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - } } } @@ -972,6 +998,10 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); + + for (i = 0; i < adapter->num_xdp_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->xdp_ring[i]->state); } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) @@ -1016,6 +1046,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + tc = xdp_ring->dcb_tc; + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); + } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) @@ -1167,7 +1205,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1228,7 +1269,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; - e_err(drv, "Detected Tx Unit Hang\n" + e_err(drv, "Detected Tx Unit Hang %s\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" @@ -1236,13 +1277,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", + ring_is_xdp(tx_ring) ? "(XDP)" : "", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, i, tx_ring->tx_buffer_info[i].time_stamp, jiffies); - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + if (!ring_is_xdp(tx_ring)) + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", @@ -1255,6 +1299,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, return true; } + if (ring_is_xdp(tx_ring)) + return !!budget; + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@ -1846,6 +1893,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * + * Check if the skb is valid in the XDP case it will be an error pointer. + * Return true in this case to abort processing and advance to next + * descriptor. + * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. @@ -1864,6 +1915,10 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, { struct net_device *netdev = rx_ring->netdev; + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && @@ -2039,7 +2094,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { - if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { @@ -2060,21 +2115,22 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif /* allocate a skb to store the frags */ @@ -2087,7 +2143,7 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, IXGBE_CB(skb)->dma = rx_buffer->dma; skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset, + xdp->data - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; @@ -2095,7 +2151,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, rx_buffer->page_offset += truesize; #endif } else { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); rx_buffer->pagecnt_bias++; } @@ -2104,32 +2161,32 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union ixgbe_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(xdp->data + L1_CACHE_BYTES); #endif /* build an skb to around the page buffer */ - skb = build_skb(va - IXGBE_SKB_PAD, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IXGBE_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); /* record DMA address if this is the start of a chain of buffers */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) @@ -2145,6 +2202,65 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED 1 +#define IXGBE_XDP_TX 2 + +static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp); + +static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int result = IXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = ixgbe_xmit_xdp_ring(adapter, xdp); + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif +} + /** * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information @@ -2163,17 +2279,19 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; -#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; +#ifdef IXGBE_FCOE int ddp_bytes; unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); + bool xdp_xmit = false; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + struct xdp_buff xdp; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2196,14 +2314,34 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); /* retrieve a buffer from the ring */ - if (skb) + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_hard_start = xdp.data - + ixgbe_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + + skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -IXGBE_XDP_TX) { + xdp_xmit = true; + ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) + } else if (ring_uses_build_skb(rx_ring)) { skb = ixgbe_build_skb(rx_ring, rx_buffer, - rx_desc, size); - else + &xdp, rx_desc); + } else { skb = ixgbe_construct_skb(rx_ring, rx_buffer, - rx_desc, size); + &xdp, rx_desc); + } /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -2260,6 +2398,16 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } + if (xdp_xmit) { + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -3364,6 +3512,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); } static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, @@ -3488,6 +3638,28 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); } +/** + * ixgbe_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + /** * ixgbe_store_reta - Write the RETA table to HW * @adapter: device handle @@ -3590,7 +3762,7 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) /* Fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - adapter->rss_key[i]); + *(adapter->rss_key + i)); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3651,7 +3823,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -3802,7 +3973,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, /* Limit the maximum frame size so we don't overrun the skb */ if (ring_uses_build_skb(ring) && !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) - rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB | + rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | IXGBE_RXDCTL_RLPML_EN; #endif } @@ -3972,8 +4143,8 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) || - (max_frame > IXGBE_MAX_FRAME_BUILD_SKB)) + if (IXGBE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); #endif } @@ -5505,7 +5676,10 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) union ixgbe_adv_tx_desc *eop_desc, *tx_desc; /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + if (ring_is_xdp(tx_ring)) + page_frag_free(tx_buffer->data); + else + dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -5546,7 +5720,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) } /* reset BQL for queue */ - netdev_tx_reset_queue(txring_txq(tx_ring)); + if (!ring_is_xdp(tx_ring)) + netdev_tx_reset_queue(txring_txq(tx_ring)); /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; @@ -5575,6 +5750,8 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_clean_tx_ring(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + ixgbe_clean_tx_ring(adapter->xdp_ring[i]); } static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) @@ -5669,6 +5846,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) u8 reg_idx = adapter->tx_ring[i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } + for (i = 0; i < adapter->num_xdp_queues; i++) { + u8 reg_idx = adapter->xdp_ring[i]->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { @@ -5854,6 +6036,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (!adapter->mac_table) return -ENOMEM; + if (ixgbe_init_rss_key(adapter)) + return -ENOMEM; + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -5944,10 +6129,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* assign number of SR-IOV VFs */ if (hw->mac.type != ixgbe_mac_82598EB) { if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { - adapter->num_vfs = 0; + max_vfs = 0; e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); - } else { - adapter->num_vfs = max_vfs; } } #endif /* CONFIG_PCI_IOV */ @@ -6041,7 +6224,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) **/ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) { - int i, err = 0; + int i, j = 0, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); @@ -6051,10 +6234,20 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) e_err(probe, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } + for (j = 0; j < adapter->num_xdp_queues; j++) { + err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", j); + goto err_setup_tx; + } return 0; err_setup_tx: /* rewind the index freeing the rings as we go */ + while (j--) + ixgbe_free_tx_resources(adapter->xdp_ring[j]); while (i--) ixgbe_free_tx_resources(adapter->tx_ring[i]); return err; @@ -6066,7 +6259,8 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) * * Returns 0 on success, negative on failure **/ -int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); @@ -6105,6 +6299,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + rx_ring->xdp_prog = adapter->xdp_prog; + return 0; err: vfree(rx_ring->rx_buffer_info); @@ -6128,7 +6324,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); if (!err) continue; @@ -6184,6 +6380,9 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbe_free_tx_resources(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + if (adapter->xdp_ring[i]->desc) + ixgbe_free_tx_resources(adapter->xdp_ring[i]); } /** @@ -6196,6 +6395,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) { ixgbe_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6602,6 +6802,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; netdev->stats.tx_bytes = bytes; @@ -6795,6 +7003,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &adapter->xdp_ring[i]->state); /* re-enable flow director interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); } else { @@ -6828,6 +7039,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); + for (i = 0; i < adapter->num_xdp_queues; i++) + set_check_for_tx_hang(adapter->xdp_ring[i]); } if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { @@ -7058,6 +7271,13 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) return true; } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + + if (ring->next_to_use != ring->next_to_clean) + return true; + } + return false; } @@ -8015,6 +8235,62 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, #endif } +static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_buff *xdp) +{ + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + u32 len, cmd_type; + dma_addr_t dma; + u16 i; + + len = xdp->data_end - xdp->data; + + if (unlikely(!ixgbe_desc_unused(ring))) + return IXGBE_XDP_CONSUMED; + + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + return IXGBE_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = IXGBE_TX_DESC(ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->data = xdp->data; + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + + return IXGBE_XDP_TX; +} + netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -8306,6 +8582,23 @@ static void ixgbe_netpoll(struct net_device *netdev) #endif +static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, + struct ixgbe_ring *ring) +{ + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } +} + static void ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -8331,18 +8624,13 @@ static void ixgbe_get_stats64(struct net_device *netdev, for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); - u64 bytes, packets; - unsigned int start; - if (ring) { - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - stats->tx_packets += packets; - stats->tx_bytes += bytes; - } + ixgbe_get_ring_stats64(stats, ring); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); + + ixgbe_get_ring_stats64(stats, ring); } rcu_read_unlock(); @@ -8948,7 +9236,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return ixgbe_setup_tc(dev, tc->tc); + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return ixgbe_setup_tc(dev, tc->mqprio->num_tc); } #ifdef CONFIG_PCI_IOV @@ -9459,6 +9749,68 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return -EINVAL; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) + return -EINVAL; + + /* verify ixgbe ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring_is_rsc_enabled(ring)) + return -EINVAL; + + if (frame_size > ixgbe_rx_bufsz(ring)) + return -EINVAL; + } + + if (nr_cpu_ids > MAX_XDP_QUEUES) + return -ENOMEM; + + old_prog = xchg(&adapter->xdp_prog, prog); + + /* If transitioning XDP modes reconfigure rings */ + if (!!prog != !!old_prog) { + int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + + if (err) { + rcu_assign_pointer(adapter->xdp_prog, old_prog); + return -EINVAL; + } + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ixgbe_xdp_setup(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = !!(adapter->xdp_prog); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -9504,6 +9856,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, + .ndo_xdp = ixgbe_xdp, }; /** @@ -9808,7 +10161,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbe_init_mbx_params_pf(hw); hw->mbx.ops = ii->mbx_ops; pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); - ixgbe_enable_sriov(adapter); + ixgbe_enable_sriov(adapter, max_vfs); skip_sriov: #endif @@ -9934,6 +10287,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + for (i = 0; i < adapter->num_xdp_queues; i++) + u64_stats_init(&adapter->xdp_ring[i]->syncp); + /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); @@ -10059,6 +10415,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iounmap(adapter->io_addr); kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); + kfree(adapter->rss_key); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -10143,6 +10500,7 @@ static void ixgbe_remove(struct pci_dev *pdev) } kfree(adapter->mac_table); + kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e55b2602f37166d7eea6442fa3bc5797e97d470c..654a402f0e9e3b26989d5edc2de901a720e0a096 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -792,7 +792,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; /* Setup link based on the new speed settings */ - hw->phy.ops.setup_link(hw); + if (hw->phy.ops.setup_link) + hw->phy.ops.setup_link(hw); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 044cb44747cfe1909f1d5bbc399f1d7dabe80409..8baf298a85160c9d270e930083b5100c97f826c5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -46,88 +46,96 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, + unsigned int num_vfs) { struct ixgbe_hw *hw = &adapter->hw; - int num_vf_macvlans, i; struct vf_macvlans *mv_list; - - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; - e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); - - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; - if (!adapter->ring_feature[RING_F_VMDQ].limit) - adapter->ring_feature[RING_F_VMDQ].limit = 1; - adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + int num_vf_macvlans, i; num_vf_macvlans = hw->mac.num_rar_entries - - (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) + return; - adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, - sizeof(struct vf_macvlans), - GFP_KERNEL); + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); if (mv_list) { /* Initialize list of VF macvlans */ INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { - mv_list->vf = -1; - mv_list->free = true; - list_add(&mv_list->l, &adapter->vf_mvs.l); - mv_list++; + mv_list[i].vf = -1; + mv_list[i].free = true; + list_add(&mv_list[i].l, &adapter->vf_mvs.l); } + adapter->mv_list = mv_list; } +} + +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, + unsigned int num_vfs) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + + /* Allocate memory for per VF control structures */ + adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!adapter->vfinfo) + return -ENOMEM; + + adapter->num_vfs = num_vfs; + + ixgbe_alloc_vf_macvlans(adapter, num_vfs); + adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); adapter->bridge_mode = BRIDGE_MODE_VEB; - /* If call to enable VFs succeeded then allocate memory - * for per VF control structures. - */ - adapter->vfinfo = - kcalloc(adapter->num_vfs, - sizeof(struct vf_data_storage), GFP_KERNEL); - if (adapter->vfinfo) { - /* limit trafffic classes based on VFs enabled */ - if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && - (adapter->num_vfs < 16)) { - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - } else if (adapter->num_vfs < 32) { - adapter->dcb_cfg.num_tcs.pg_tcs = 4; - adapter->dcb_cfg.num_tcs.pfc_tcs = 4; - } else { - adapter->dcb_cfg.num_tcs.pg_tcs = 1; - adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - } - - /* Disable RSC when in SR-IOV mode */ - adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | - IXGBE_FLAG2_RSC_ENABLED); + /* limit trafffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + } else if (num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } - for (i = 0; i < adapter->num_vfs; i++) { - /* enable spoof checking for all VFs */ - adapter->vfinfo[i].spoofchk_enabled = true; + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); - /* We support VF RSS querying only for 82599 and x540 - * devices at the moment. These devices share RSS - * indirection table and RSS hash key with PF therefore - * we want to disable the querying by default. - */ - adapter->vfinfo[i].rss_query_enabled = 0; + for (i = 0; i < num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; - /* Untrust all VFs */ - adapter->vfinfo[i].trusted = false; + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; - /* set the default xcast mode */ - adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; - } + /* Untrust all VFs */ + adapter->vfinfo[i].trusted = false; - return 0; + /* set the default xcast mode */ + adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; } - return -ENOMEM; + e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); + return 0; } /** @@ -165,12 +173,13 @@ static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) /* Note this function is called when the user wants to enable SR-IOV * VFs using the now deprecated module parameter */ -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) { int pre_existing_vfs = 0; + unsigned int num_vfs; pre_existing_vfs = pci_num_vf(adapter->pdev); - if (!pre_existing_vfs && !adapter->num_vfs) + if (!pre_existing_vfs && !max_vfs) return; /* If there are pre-existing VFs then we have to force @@ -180,7 +189,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * have been created via the new PCI SR-IOV sysfs interface. */ if (pre_existing_vfs) { - adapter->num_vfs = pre_existing_vfs; + num_vfs = pre_existing_vfs; dev_warn(&adapter->pdev->dev, "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); } else { @@ -192,17 +201,16 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * physical function. If the user requests greater than * 63 VFs then it is an error - reset to default of zero. */ - adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); + num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); - err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) { e_err(probe, "Failed to enable PCI sriov: %d\n", err); - adapter->num_vfs = 0; return; } } - if (!__ixgbe_enable_sriov(adapter)) { + if (!__ixgbe_enable_sriov(adapter, num_vfs)) { ixgbe_get_vfs(adapter); return; } @@ -298,6 +306,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); int err = 0; + u8 num_tc; int i; int pre_existing_vfs = pci_num_vf(dev); @@ -310,23 +319,41 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) return err; /* While the SR-IOV capability structure reports total VFs to be 64, - * we have to limit the actual number allocated based on two factors. + * we limit the actual number allocated as below based on two factors. + * Num_TCs MAX_VFs + * 1 63 + * <=4 31 + * >4 15 * First, we reserve some transmit/receive resources for the PF. * Second, VMDQ also uses the same pools that SR-IOV does. We need to * account for this, so that we don't accidentally allocate more VFs * than we have available pools. The PCI bus driver already checks for * other values out of range. */ - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) - return -EPERM; + num_tc = netdev_get_num_tc(adapter->netdev); - adapter->num_vfs = num_vfs; + if (num_tc > 4) { + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); + return -EPERM; + } + } else if ((num_tc > 1) && (num_tc <= 4)) { + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); + return -EPERM; + } + } else { + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) { + e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); + return -EPERM; + } + } - err = __ixgbe_enable_sriov(adapter); + err = __ixgbe_enable_sriov(adapter, num_vfs); if (err) return err; - for (i = 0; i < adapter->num_vfs; i++) + for (i = 0; i < num_vfs; i++) ixgbe_vf_configuration(dev, (i | 0x10000000)); /* reset before enabling SRIOV to avoid mailbox issues */ @@ -650,58 +677,6 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) } } -static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; - u8 num_tcs = netdev_get_num_tc(adapter->netdev); - - /* remove VLAN filters beloning to this VF */ - ixgbe_clear_vf_vlans(adapter, vf); - - /* add back PF assigned VLAN or VLAN 0 */ - ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); - - /* reset offloads to defaults */ - ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); - - /* set outgoing tags for VFs */ - if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { - ixgbe_clear_vmvir(adapter, vf); - } else { - if (vfinfo->pf_qos || !num_tcs) - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - vfinfo->pf_qos, vf); - else - ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, - adapter->default_up, vf); - - if (vfinfo->spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - } - - /* reset multicast table array for vf */ - adapter->vfinfo[vf].num_vf_mc_hashes = 0; - - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); - - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - /* reset VF api back to unknown */ - adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; -} - -static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, - int vf, unsigned char *mac_addr) -{ - ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); - - return 0; -} - static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { @@ -757,6 +732,59 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, return 0; } +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* remove VLAN filters beloning to this VF */ + ixgbe_clear_vf_vlans(adapter, vf); + + /* add back PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + return 0; +} + int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); @@ -1085,7 +1113,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, return -EOPNOTSUPP; } - memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); return 0; } @@ -1319,18 +1347,26 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + + if (vf >= adapter->num_vfs) + return -EINVAL; + + if (is_zero_ether_addr(mac)) { + adapter->vfinfo[vf].pf_set_mac = false; + dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { return -EINVAL; - adapter->vfinfo[vf].pf_set_mac = true; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" - " change effective."); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, "Bring the PF device up before" - " attempting to use the VF device.\n"); } + return ixgbe_set_vf_mac(adapter, vf, mac); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 0c7977d27b710e6d57a566df6231c00826375bba..cf67b9b18ed7fa9b96a856d05ae35271b9ec8a17 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -33,6 +33,9 @@ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) */ #define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) +#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VF_FUNCTIONS +#define IXGBE_MAX_VFS_4TC 32 +#define IXGBE_MAX_VFS_8TC 16 #ifdef CONFIG_PCI_IOV void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); @@ -56,7 +59,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV -void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs); #endif int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 1d07f2ead914ccd361ed5084693e968114dd1514..9c2460c5ef1b1421b4729967942ab63dd0850f85 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -85,6 +85,7 @@ #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 @@ -1387,9 +1388,6 @@ struct ixgbe_thermal_sensor_data { #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 -/* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 - /* Special PHY Init Routine */ #define IXGBE_PHY_INIT_OFFSET_NL 0x002B #define IXGBE_PHY_INIT_END_NL 0xFFFF @@ -3128,7 +3126,9 @@ enum ixgbe_phy_type { ixgbe_phy_aq, ixgbe_phy_x550em_kr, ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_xfi, ixgbe_phy_x550em_ext_t, + ixgbe_phy_ext_1g_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -3754,15 +3754,6 @@ struct ixgbe_info { #define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) #define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) -#define IXGBE_KX4_LINK_CNTL_1 0x4C -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) -#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) - #define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 #define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 @@ -3779,12 +3770,14 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 #define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 -#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 -#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 -#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 #define IXGBE_NW_MNG_IF_SEL 0x00011178 #define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) +#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) #define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 84a467a8ed3d5f1aaab77b53dc158d3feba95c98..6ea0d6a5fb90c53df768d3532bd07691f8e48ca0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -95,6 +95,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; + u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -105,10 +106,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) ixgbe_clear_tx_pending(hw); mac_reset_top: + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear indicating reset is complete */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 200f847fd8f31e58005ac72f3d90d4e97b11f96d..2ba024b575ea84f619d719e7bb25676e9534a478 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -49,6 +49,18 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) return 0; } +static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + /* Start with X540 invariants, since so similar */ + ixgbe_get_invariants_X540(hw); + + phy->ops.set_phy_power = NULL; + + return 0; +} + static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -320,6 +332,9 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->phy.type = ixgbe_phy_x550em_xfi; + break; case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: @@ -331,9 +346,21 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; /* Fallthrough */ - case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_X_1G_T: + hw->phy.type = ixgbe_phy_ext_1g_t; + break; + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + hw->phy.type = ixgbe_phy_fw; + hw->phy.ops.read_reg = NULL; + hw->phy.ops.write_reg = NULL; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + break; default: break; } @@ -2145,6 +2172,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) + break; mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; @@ -2215,8 +2244,39 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_10GB_FULL; } else { - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + *speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_2_5GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + break; + case ixgbe_phy_x550em_xfi: + *speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + break; + case ixgbe_phy_ext_1g_t: + case ixgbe_phy_sgmii: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case ixgbe_phy_x550em_kr: + if (hw->mac.type == ixgbe_mac_x550em_a) { + /* check different backplane modes */ + if (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + break; + } else if (hw->device_id == + IXGBE_DEV_ID_X550EM_A_KR_L) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + /* fall through */ + default: + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + } *autoneg = true; } return 0; @@ -2473,44 +2533,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, return ixgbe_restart_an_internal_phy_x550em(hw); } -/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. - * @hw: pointer to hardware structure - * - * Configures the integrated KX4 PHY. - **/ -static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) -{ - s32 status; - u32 reg_val; - - status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, ®_val); - if (status) - return status; - - reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | - IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); - - reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; - - /* Advertise 10G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; - - /* Advertise 1G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; - - /* Restart auto-negotiation. */ - reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; - status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, reg_val); - - return status; -} - /** * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure @@ -2521,6 +2543,9 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return 0; + if (ixgbe_check_reset_blocked(hw)) + return 0; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } @@ -3134,7 +3159,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* Set functions pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_x550em_kx4: - phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.setup_link = NULL; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; @@ -3143,6 +3168,12 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; + case ixgbe_phy_x550em_xfi: + /* link is managed by HW */ + phy->ops.setup_link = NULL; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; case ixgbe_phy_x550em_ext_t: /* Save NW management interface connected on board. This is used * to determine internal PHY mode @@ -3164,10 +3195,18 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; + case ixgbe_phy_sgmii: + phy->ops.setup_link = NULL; + break; case ixgbe_phy_fw: phy->ops.setup_link = ixgbe_setup_fw_link; phy->ops.reset = ixgbe_reset_phy_fw; break; + case ixgbe_phy_ext_1g_t: + phy->ops.setup_link = NULL; + phy->ops.read_reg = NULL; + phy->ops.write_reg = NULL; + break; default: break; } @@ -3193,6 +3232,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_XFI: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; @@ -3300,6 +3340,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) u32 ctrl = 0; u32 i; bool link_up = false; + u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -3345,9 +3386,16 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) ctrl = IXGBE_CTRL_RST; } + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status) { + hw_dbg(hw, "semaphore failed with %d", status); + return IXGBE_ERR_SWFW_SYNC; + } + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); usleep_range(1000, 1200); /* Poll for reset bit to self-clear meaning reset is complete */ @@ -3780,7 +3828,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ + .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = ixgbe_get_link_capabilities_X550em, .get_bus_info = ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, @@ -3862,6 +3910,17 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = { .write_reg = &ixgbe_write_phy_reg_generic, }; +static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = { + X550_COMMON_PHY + .check_overtemp = NULL, + .init = ixgbe_init_phy_ops_X550em, + .identify = ixgbe_identify_phy_x550em, + .read_reg = NULL, + .write_reg = NULL, + .read_reg_mdi = NULL, + .write_reg_mdi = NULL, +}; + static const struct ixgbe_phy_operations phy_ops_x550em_a = { X550_COMMON_PHY .check_overtemp = &ixgbe_tn_check_overtemp, @@ -3924,6 +3983,16 @@ const struct ixgbe_info ixgbe_X550EM_x_info = { .link_ops = &link_ops_x550em_x, }; +const struct ixgbe_info ixgbe_x550em_x_fw_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = ixgbe_get_invariants_X550_x_fw, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_x_fw, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, +}; + const struct ixgbe_info ixgbe_x550em_a_info = { .mac = ixgbe_mac_x550em_a, .get_invariants = &ixgbe_get_invariants_X550_a, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 1f6c0ecd50bbbcf8e20fa9f883712003b055648f..ff9d05f308eef583cc3268e00bf7d089a07340d7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -80,7 +80,7 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { #define IXGBEVF_QUEUE_STATS_LEN ( \ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ - (sizeof(struct ixgbe_stats) / sizeof(u64))) + (sizeof(struct ixgbevf_stats) / sizeof(u64))) #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) @@ -91,18 +91,18 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) -static int ixgbevf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int ixgbevf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = 0; bool link_up; - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; - ecmd->port = -1; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = -1; hw->mac.get_link_status = 1; hw->mac.ops.check_link(hw, &link_speed, &link_up, false); @@ -122,11 +122,11 @@ static int ixgbevf_get_settings(struct net_device *netdev, break; } - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = DUPLEX_FULL; + cmd->base.speed = speed; + cmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } return 0; @@ -855,7 +855,8 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { if (key) - memcpy(key, adapter->rss_key, sizeof(adapter->rss_key)); + memcpy(key, adapter->rss_key, + ixgbevf_get_rxfh_key_size(netdev)); if (indir) { int i; @@ -885,7 +886,6 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, } static const struct ethtool_ops ixgbevf_ethtool_ops = { - .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, .get_regs = ixgbevf_get_regs, @@ -905,6 +905,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh = ixgbevf_get_rxfh, + .get_link_ksettings = ixgbevf_get_link_ksettings, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a8cbc2dda0dd65456c082247693a009b20504d9b..581f44bbd7b3bca08dde63f1b6ec1418453e03b9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -319,7 +319,7 @@ struct ixgbevf_adapter { spinlock_t mbx_lock; unsigned long last_reset; - u32 rss_key[IXGBEVF_VFRSSRK_REGS]; + u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 80bab261a0ec778f09ee8437f5e9618c4c8a8372..eee29bddddc117569788c73e4a93f0d2b6b14bfc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1660,6 +1660,28 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, reg_idx); } +/** + * ixgbevf_init_rss_key - Initialize adapter RSS key + * @adapter: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter) +{ + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +} + static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1668,9 +1690,8 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) @@ -2611,6 +2632,12 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->mbx.ops.init_params(hw); + if (hw->mac.type >= ixgbe_mac_X550_vf) { + err = ixgbevf_init_rss_key(adapter); + if (err) + goto out; + } + /* assume legacy case in which PF would only give VF 2 queues */ hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; @@ -4127,6 +4154,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); + kfree(adapter->rss_key); err_ioremap: disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); @@ -4173,6 +4201,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) hw_dbg(&adapter->hw, "Remove complete\n"); + kfree(adapter->rss_key); disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 8a5db9d7219d14d9845215e6fa7e76036350d5db..b6d0c01eab10d1bfade260b3f4cc34de1e0622d9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -333,7 +333,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) switch (hw->api_version) { case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: - if (hw->mac.type >= ixgbe_mac_X550_vf) + if (hw->mac.type < ixgbe_mac_X550_vf) break; default: return -EOPNOTSUPP; @@ -399,7 +399,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) switch (hw->api_version) { case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: - if (hw->mac.type >= ixgbe_mac_X550_vf) + if (hw->mac.type < ixgbe_mac_X550_vf) break; default: return -EOPNOTSUPP; @@ -419,7 +419,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK)) return -EPERM; /* If we didn't get an ACK there must have been diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index d2555e8b947ee2a93053f4ac54191193c4a50529..da6fb825afeafd28a56909aa3baa4e97c3704d58 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -82,13 +82,13 @@ config MVNETA_BM that all dependencies are met. config MVPP2 - tristate "Marvell Armada 375 network interface support" + tristate "Marvell Armada 375/7K/8K network interface support" depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA select MVMDIO ---help--- This driver supports the network interface units in the - Marvell ARMADA 375 SoC. + Marvell ARMADA 375, 7K and 8K SoCs. config PXA168_ETH tristate "Marvell pxa168 ethernet support" diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index a0d1b084ecec9820aead869a281289a2d4249d9c..90a60b98c28efcd58bb7222ff30734c7619162bc 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -53,7 +53,7 @@ struct orion_mdio_dev { struct mutex lock; void __iomem *regs; - struct clk *clk; + struct clk *clk[3]; /* * If we have access to the error interrupt pin (which is * somewhat misnamed as it not only reflects internal errors @@ -187,7 +187,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; - int ret; + int i, ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -216,11 +216,20 @@ static int orion_mdio_probe(struct platform_device *pdev) init_waitqueue_head(&dev->smi_busy_wait); - dev->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(dev->clk)) - clk_prepare_enable(dev->clk); + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + dev->clk[i] = of_clk_get(pdev->dev.of_node, i); + if (IS_ERR(dev->clk[i])) + break; + clk_prepare_enable(dev->clk[i]); + } dev->err_interrupt = platform_get_irq(pdev, 0); + if (dev->err_interrupt > 0 && + resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { + dev_err(&pdev->dev, + "disabling interrupt, resource size is too small\n"); + dev->err_interrupt = 0; + } if (dev->err_interrupt > 0) { ret = devm_request_irq(&pdev->dev, dev->err_interrupt, orion_mdio_err_irq, @@ -251,8 +260,16 @@ static int orion_mdio_probe(struct platform_device *pdev) return 0; out_mdio: - if (!IS_ERR(dev->clk)) - clk_disable_unprepare(dev->clk); + if (dev->err_interrupt > 0) + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); + + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + if (IS_ERR(dev->clk[i])) + break; + clk_disable_unprepare(dev->clk[i]); + clk_put(dev->clk[i]); + } + return ret; } @@ -260,11 +277,18 @@ static int orion_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct orion_mdio_dev *dev = bus->priv; + int i; - writel(0, dev->regs + MVMDIO_ERR_INT_MASK); + if (dev->err_interrupt > 0) + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); mdiobus_unregister(bus); - if (!IS_ERR(dev->clk)) - clk_disable_unprepare(dev->clk); + + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + if (IS_ERR(dev->clk[i])) + break; + clk_disable_unprepare(dev->clk[i]); + clk_put(dev->clk[i]); + } return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 61dd4462411c03511d6121d75ca65dc36c7f688f..d297011b535dc812beba6765033a3349abffb0a9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -431,6 +431,7 @@ struct mvneta_port { /* Flags for special SoC configurations */ bool neta_armada3700; u16 rx_offset_correction; + const struct mbus_dram_target_info *dram_target_info; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -1106,7 +1107,7 @@ static void mvneta_port_up(struct mvneta_port *pp) q_map = 0; for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; - if (txq->descs != NULL) + if (txq->descs) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); @@ -1115,7 +1116,7 @@ static void mvneta_port_up(struct mvneta_port *pp) for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; - if (rxq->descs != NULL) + if (rxq->descs) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_RXQ_CMD, q_map); @@ -2849,7 +2850,7 @@ static int mvneta_rxq_init(struct mvneta_port *pp, rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); - if (rxq->descs == NULL) + if (!rxq->descs) return -ENOMEM; rxq->last_desc = rxq->size - 1; @@ -2919,7 +2920,7 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); - if (txq->descs == NULL) + if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; @@ -2932,8 +2933,9 @@ static int mvneta_txq_init(struct mvneta_port *pp, mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); - txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); - if (txq->tx_skb == NULL) { + txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), + GFP_KERNEL); + if (!txq->tx_skb) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -2944,7 +2946,7 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * TSO_HEADER_SIZE, &txq->tso_hdrs_phys, GFP_KERNEL); - if (txq->tso_hdrs == NULL) { + if (!txq->tso_hdrs) { kfree(txq->tx_skb); dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -3317,6 +3319,7 @@ static void mvneta_adjust_link(struct net_device *ndev) static int mvneta_mdio_probe(struct mvneta_port *pp) { struct phy_device *phy_dev; + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, pp->phy_interface); @@ -3325,6 +3328,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) return -ENODEV; } + phy_ethtool_get_wol(phy_dev, &wol); + device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; @@ -3941,10 +3947,16 @@ static void mvneta_ethtool_get_wol(struct net_device *dev, static int mvneta_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { + int ret; + if (!dev->phydev) return -EOPNOTSUPP; - return phy_ethtool_set_wol(dev->phydev, wol); + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&dev->dev, !!wol->wolopts); + + return ret; } static const struct net_device_ops mvneta_netdev_ops = { @@ -3991,8 +4003,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp) /* Set port default values */ mvneta_defaults_set(pp); - pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), - GFP_KERNEL); + pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); if (!pp->txqs) return -ENOMEM; @@ -4004,8 +4015,7 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp) txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; } - pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), - GFP_KERNEL); + pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); if (!pp->rxqs) return -ENOMEM; @@ -4016,9 +4026,11 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp) rxq->size = pp->rx_ring_size; rxq->pkts_coal = MVNETA_RX_COAL_PKTS; rxq->time_coal = MVNETA_RX_COAL_USEC; - rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent, - rxq->size * sizeof(void *), - GFP_KERNEL); + rxq->buf_virt_addr + = devm_kmalloc_array(pp->dev->dev.parent, + rxq->size, + sizeof(*rxq->buf_virt_addr), + GFP_KERNEL); if (!rxq->buf_virt_addr) return -ENOMEM; } @@ -4098,6 +4110,8 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: ctrl |= MVNETA_GMAC2_PORT_RGMII; break; default: @@ -4118,7 +4132,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* Device initialization routine */ static int mvneta_probe(struct platform_device *pdev) { - const struct mbus_dram_target_info *dram_target_info; struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; @@ -4267,13 +4280,13 @@ static int mvneta_probe(struct platform_device *pdev) pp->tx_csum_limit = tx_csum_limit; - dram_target_info = mv_mbus_dram_info(); + pp->dram_target_info = mv_mbus_dram_info(); /* Armada3700 requires setting default configuration of Mbus * windows, however without using filled mbus_dram_target_info * structure. */ - if (dram_target_info || pp->neta_armada3700) - mvneta_conf_mbus_windows(pp, dram_target_info); + if (pp->dram_target_info || pp->neta_armada3700) + mvneta_conf_mbus_windows(pp, pp->dram_target_info); pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; @@ -4405,6 +4418,61 @@ static int mvneta_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int mvneta_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct mvneta_port *pp = netdev_priv(dev); + + if (netif_running(dev)) + mvneta_stop(dev); + netif_device_detach(dev); + clk_disable_unprepare(pp->clk_bus); + clk_disable_unprepare(pp->clk); + return 0; +} + +static int mvneta_resume(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *dev = dev_get_drvdata(device); + struct mvneta_port *pp = netdev_priv(dev); + int err; + + clk_prepare_enable(pp->clk); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + if (pp->dram_target_info || pp->neta_armada3700) + mvneta_conf_mbus_windows(pp, pp->dram_target_info); + if (pp->bm_priv) { + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, "use SW buffer management\n"); + pp->bm_priv = NULL; + } + } + mvneta_defaults_set(pp); + err = mvneta_port_power_up(pp, pp->phy_interface); + if (err < 0) { + dev_err(device, "can't power up port\n"); + return err; + } + + if (pp->use_inband_status) + mvneta_fixed_link_update(pp, dev->phydev); + + netif_device_attach(dev); + if (netif_running(dev)) { + mvneta_open(dev); + mvneta_set_rx_mode(dev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); + static const struct of_device_id mvneta_match[] = { { .compatible = "marvell,armada-370-neta" }, { .compatible = "marvell,armada-xp-neta" }, @@ -4419,6 +4487,7 @@ static struct platform_driver mvneta_driver = { .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, + .pm = &mvneta_pm_ops, }, }; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d00421b9ffea7c0569417e4ee3814469532802c8..9b875d776b296d165c4ae3a994bd527a8dafb698 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -49,9 +50,11 @@ #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) #define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 #define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) @@ -99,6 +102,7 @@ /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) @@ -117,9 +121,6 @@ #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 -#define MVPP2_TXQ_THRESH_REG 0x2094 -#define MVPP2_TRANSMITTED_THRESH_OFFSET 16 -#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) @@ -140,6 +141,7 @@ #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) @@ -152,10 +154,52 @@ #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) #define MVPP2_BASE_ADDR_ENABLE 0x4060 +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) @@ -210,14 +254,19 @@ #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) #define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP2_BM_MC_RLS_REG 0x64c4 -#define MVPP2_BM_MC_ID_MASK 0xfff -#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 @@ -287,6 +336,24 @@ #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff @@ -335,15 +402,9 @@ /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 -/* Maximum number of RXQs used by single port */ -#define MVPP2_MAX_RXQ 8 - /* Dfault number of RXQs in use */ #define MVPP2_DEFAULT_RXQ 4 -/* Total number of RXQs available to all ports */ -#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) - /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD 128 @@ -615,6 +676,11 @@ enum mvpp2_prs_l3_cast { */ #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_CPUS 4 + enum mvpp2_bm_type { MVPP2_BM_FREE, MVPP2_BM_SWF_LONG, @@ -626,12 +692,19 @@ enum mvpp2_bm_type { /* Shared Packet Processor resources */ struct mvpp2 { /* Shared registers' base addresses */ - void __iomem *base; void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each CPU can access the base register through a + * separate address space, each 64 KB apart from each + * other. + */ + void __iomem *cpu_base[MVPP2_MAX_CPUS]; /* Common clocks */ struct clk *pp_clk; struct clk *gop_clk; + struct clk *mg_clk; /* List of pointers to port structures */ struct mvpp2_port **port_list; @@ -649,6 +722,12 @@ struct mvpp2 { /* Tclk value */ u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; }; struct mvpp2_pcpu_stats { @@ -670,6 +749,11 @@ struct mvpp2_port_pcpu { struct mvpp2_port { u8 id; + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + int irq; struct mvpp2 *priv; @@ -741,22 +825,24 @@ struct mvpp2_port { #define MVPP2_RXD_L3_IP6 BIT(30) #define MVPP2_RXD_BUF_HDR BIT(31) -struct mvpp2_tx_desc { +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* physical addr of transmitted buffer */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ u32 buf_cookie; /* cookie for access to TX buffer in tx path */ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ u32 reserved2; /* reserved (for future use) */ }; -struct mvpp2_rx_desc { +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { u32 status; /* info about received packet */ u16 reserved1; /* parser_info (for future use, PnC) */ u16 data_size; /* size of received packet in bytes */ - u32 buf_phys_addr; /* physical address of the buffer */ + u32 buf_dma_addr; /* physical address of the buffer */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved2; /* gem_port_id (for future use, PON) */ u16 reserved3; /* csum_l4 (for future use, PnC) */ @@ -767,12 +853,51 @@ struct mvpp2_rx_desc { u32 reserved8; }; +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + struct mvpp2_txq_pcpu_buf { /* Transmitted SKB */ struct sk_buff *skb; /* Physical address of transmitted buffer */ - dma_addr_t phys; + dma_addr_t dma; /* Size transmitted */ size_t size; @@ -825,7 +950,7 @@ struct mvpp2_tx_queue { struct mvpp2_tx_desc *descs; /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last Tx DMA descriptor */ int last_desc; @@ -848,7 +973,7 @@ struct mvpp2_rx_queue { struct mvpp2_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last RX DMA descriptor */ int last_desc; @@ -912,6 +1037,8 @@ struct mvpp2_bm_pool { /* Buffer Pointers Pool External (BPPE) size */ int size; + /* BPPE size in bytes */ + int size_bytes; /* Number of buffers for this pool */ int buf_num; /* Pool buffer size */ @@ -922,29 +1049,13 @@ struct mvpp2_bm_pool { /* BPPE virtual base address */ u32 *virt_addr; - /* BPPE physical base address */ - dma_addr_t phys_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; /* Ports using BM pool */ u32 port_map; }; -struct mvpp2_buff_hdr { - u32 next_buff_phys_addr; - u32 next_buff_virt_addr; - u16 byte_count; - u16 info; - u8 reserved1; /* bm_qset (for future use, BM) */ -}; - -/* Buffer header info bits */ -#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff -#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) -#define MVPP2_B_HDR_INFO_LAST_OFFS 12 -#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) -#define MVPP2_B_HDR_INFO_IS_LAST(info) \ - ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) - /* Static declaractions */ /* Number of RXQs used by single port */ @@ -959,12 +1070,177 @@ static int txq_number = MVPP2_MAX_TXQ; static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { - writel(data, priv->base + offset); + writel(data, priv->cpu_base[0] + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { - return readl(priv->base + offset); + return readl(priv->cpu_base[0] + offset); +} + +/* These accessors should be used to access: + * + * - per-CPU registers, where each CPU has its own copy of the + * register. + * + * MVPP2_BM_VIRT_ALLOC_REG + * MVPP2_BM_ADDR_HIGH_ALLOC + * MVPP22_BM_ADDR_HIGH_RLS_REG + * MVPP2_BM_VIRT_RLS_REG + * MVPP2_ISR_RX_TX_CAUSE_REG + * MVPP2_ISR_RX_TX_MASK_REG + * MVPP2_TXQ_NUM_REG + * MVPP2_AGGR_TXQ_UPDATE_REG + * MVPP2_TXQ_RSVD_REQ_REG + * MVPP2_TXQ_RSVD_RSLT_REG + * MVPP2_TXQ_SENT_REG + * MVPP2_RXQ_NUM_REG + * + * - global registers that must be accessed through a specific CPU + * window, because they are related to an access to a per-CPU + * register + * + * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) + * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) + * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + */ +static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel(data, priv->cpu_base[cpu] + offset); +} + +static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl(priv->cpu_base[cpu] + offset); +} + +static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.buf_dma_addr; + else + return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); +} + +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = dma_addr; + } else { + u64 val = (u64)dma_addr; + + tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); + tx_desc->pp22.buf_dma_addr_ptp |= val; + } +} + +static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.data_size; + else + return tx_desc->pp22.data_size; +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = size; + else + tx_desc->pp22.data_size = size; +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = command; + else + tx_desc->pp22.command = command; +} + +static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int offset) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.packet_offset = offset; + else + tx_desc->pp22.packet_offset = offset; +} + +static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.packet_offset; + else + return tx_desc->pp22.packet_offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_dma_addr; + else + return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_cookie; + else + return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.data_size; + else + return rx_desc->pp22.data_size; +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.status; + else + return rx_desc->pp22.status; } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) @@ -974,15 +1250,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) txq_pcpu->txq_get_index = 0; } -static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, +static void mvpp2_txq_inc_put(struct mvpp2_port *port, + struct mvpp2_txq_pcpu *txq_pcpu, struct sk_buff *skb, struct mvpp2_tx_desc *tx_desc) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_put_index; tx_buf->skb = skb; - tx_buf->size = tx_desc->data_size; - tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset; + tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); + tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + + mvpp2_txdesc_offset_get(port, tx_desc); txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -1411,7 +1689,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_DROP_ALL; @@ -1448,7 +1726,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_MAC_PROMISCUOUS; @@ -1494,7 +1772,7 @@ static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = index; @@ -1546,7 +1824,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; @@ -1609,7 +1887,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; @@ -1743,10 +2021,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, if (tid <= tid_aux) { ret = -EINVAL; - goto error; + goto free_pe; } - memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); + memset(pe, 0, sizeof(*pe)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; @@ -1775,8 +2053,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); - -error: +free_pe: kfree(pe); return ret; @@ -1861,7 +2138,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, ai = mvpp2_prs_double_vlan_ai_free_get(priv); if (ai < 0) { ret = ai; - goto error; + goto free_pe; } /* Get first single/triple vlan tid */ @@ -1884,10 +2161,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, if (tid >= tid_aux) { ret = -ERANGE; - goto error; + goto free_pe; } - memset(pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(pe, 0, sizeof(*pe)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; @@ -1911,8 +2188,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); - -error: +free_pe: kfree(pe); return ret; } @@ -1934,7 +2210,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; @@ -1992,7 +2268,7 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; @@ -2048,7 +2324,7 @@ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2087,7 +2363,7 @@ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2147,7 +2423,7 @@ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; @@ -2169,7 +2445,7 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); pe.index = MVPP2_PE_MH_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); @@ -2192,7 +2468,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); /* Non-promiscuous mode for all ports - DROP unknown packets */ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; @@ -2253,7 +2529,7 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* Set default entry, in case DSA or EDSA tag not found */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = MVPP2_PE_DSA_DEFAULT; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); @@ -2283,7 +2559,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2309,7 +2585,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2339,7 +2615,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2373,7 +2649,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2438,7 +2714,7 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; @@ -2535,7 +2811,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) return err; /* Set default double vlan entry */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; @@ -2555,7 +2831,7 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Set default vlan none entry */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_NONE; @@ -2585,7 +2861,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2635,7 +2911,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2662,7 +2938,7 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; @@ -2720,7 +2996,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) return err; /* Default IPv4 entry for unknown protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_PROTO_UN; @@ -2745,7 +3021,7 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Default IPv4 entry for unicast address */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_ADDR_UN; @@ -2813,7 +3089,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) if (tid < 0) return tid; - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; @@ -2834,7 +3110,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_PROTO_UN; @@ -2927,7 +3203,7 @@ static int mvpp2_prs_default_init(struct platform_device *pdev, mvpp2_prs_hw_inv(priv, index); priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(struct mvpp2_prs_shadow), + sizeof(*priv->prs_shadow), GFP_KERNEL); if (!priv->prs_shadow) return -ENOMEM; @@ -3378,27 +3654,39 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int size) { - int size_bytes; u32 val; - size_bytes = sizeof(u32) * size; - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, - &bm_pool->phys_addr, + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + + /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 + * bytes per buffer pointer + */ + if (priv->hw_version == MVPP21) + bm_pool->size_bytes = 2 * sizeof(u32) * size; + else + bm_pool->size_bytes = 2 * sizeof(u64) * size; + + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, + &bm_pool->dma_addr, GFP_KERNEL); if (!bm_pool->virt_addr) return -ENOMEM; if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { - dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, - bm_pool->phys_addr); + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, bm_pool->dma_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - bm_pool->phys_addr); + lower_32_bits(bm_pool->dma_addr)); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); @@ -3426,6 +3714,34 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); } +static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *dma_addr, + phys_addr_t *phys_addr) +{ + int cpu = smp_processor_id(); + + *dma_addr = mvpp2_percpu_read(priv, cpu, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + + if (priv->hw_version == MVPP22) { + u32 val; + u32 dma_addr_highbits, phys_addr_highbits; + + val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); + phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> + MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; + + if (sizeof(dma_addr_t) == 8) + *dma_addr |= (u64)dma_addr_highbits << 32; + + if (sizeof(phys_addr_t) == 8) + *phys_addr |= (u64)phys_addr_highbits << 32; + } +} + /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) @@ -3433,21 +3749,21 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, int i; for (i = 0; i < bm_pool->buf_num; i++) { - dma_addr_t buf_phys_addr; - unsigned long vaddr; + dma_addr_t buf_dma_addr; + phys_addr_t buf_phys_addr; + void *data; - /* Get buffer virtual address (indirect access) */ - buf_phys_addr = mvpp2_read(priv, - MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, + &buf_dma_addr, &buf_phys_addr); - dma_unmap_single(dev, buf_phys_addr, + dma_unmap_single(dev, buf_dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE); - if (!vaddr) + data = (void *)phys_to_virt(buf_phys_addr); + if (!data) break; - mvpp2_frag_free(bm_pool, (void *)vaddr); + mvpp2_frag_free(bm_pool, data); } /* Update BM driver with number of buffers removed from pool */ @@ -3471,9 +3787,9 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size, + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, bm_pool->virt_addr, - bm_pool->phys_addr); + bm_pool->dma_addr); return 0; } @@ -3515,7 +3831,7 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) /* Allocate and initialize BM pools */ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, - sizeof(struct mvpp2_bm_pool), GFP_KERNEL); + sizeof(*priv->bm_pools), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; @@ -3529,17 +3845,20 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) { - u32 val; + u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_POOL_LONG_MASK; - val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & - MVPP2_RXQ_POOL_LONG_MASK); + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } @@ -3547,40 +3866,45 @@ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int short_pool) { - u32 val; + u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_POOL_SHORT_MASK; - val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & - MVPP2_RXQ_POOL_SHORT_MASK); + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_SHORT_MASK; + else + mask = MVPP22_RXQ_POOL_SHORT_MASK; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } static void *mvpp2_buf_alloc(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_phys_addr, + dma_addr_t *buf_dma_addr, + phys_addr_t *buf_phys_addr, gfp_t gfp_mask) { - dma_addr_t phys_addr; + dma_addr_t dma_addr; void *data; data = mvpp2_frag_alloc(bm_pool); if (!data) return NULL; - phys_addr = dma_map_single(port->dev->dev.parent, data, - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { mvpp2_frag_free(bm_pool, data); return NULL; } - *buf_phys_addr = phys_addr; + *buf_dma_addr = dma_addr; + *buf_phys_addr = virt_to_phys(data); return data; } @@ -3604,37 +3928,46 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - dma_addr_t buf_phys_addr, - unsigned long buf_virt_addr) + dma_addr_t buf_dma_addr, + phys_addr_t buf_phys_addr) { - mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); - mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); -} + int cpu = smp_processor_id(); -/* Release multicast buffer */ -static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, - dma_addr_t buf_phys_addr, - unsigned long buf_virt_addr, - int mc_id) -{ - u32 val = 0; + if (port->priv->hw_version == MVPP22) { + u32 val = 0; + + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; - val |= (mc_id & MVPP2_BM_MC_ID_MASK); - mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - mvpp2_bm_pool_put(port, pool, - buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, - buf_virt_addr); + mvpp2_percpu_write(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_percpu_write(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); } /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - dma_addr_t phys_addr, - unsigned long cookie) + dma_addr_t dma_addr, + phys_addr_t phys_addr) { int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, phys_addr, cookie); + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } /* Allocate buffers for the pool */ @@ -3642,7 +3975,8 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { int i, buf_size, total_size; - dma_addr_t phys_addr; + dma_addr_t dma_addr; + phys_addr_t phys_addr; void *buf; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); @@ -3657,12 +3991,13 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, } for (i = 0; i < buf_num; i++) { - buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, + &phys_addr, GFP_KERNEL); if (!buf) break; - mvpp2_bm_pool_put(port, bm_pool->id, phys_addr, - (unsigned long)buf); + mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, + phys_addr); } /* Update BM driver with number of buffers added to pool */ @@ -3830,7 +4165,8 @@ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); } /* Unmask the current CPU's Rx/Tx interrupts */ @@ -3838,17 +4174,46 @@ static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), - (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), + (MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } /* Port configuration routines */ +static void mvpp22_port_mii_set(struct mvpp2_port *port) +{ + u32 val; + + return; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_CTRL3_REG); + val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + writel(val, port->base + MVPP22_XLG_CTRL3_REG); + } + + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + if (port->phy_interface == PHY_INTERFACE_MODE_RGMII) + val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL; + else + val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + val &= ~MVPP22_CTRL4_DP_CLK_SEL; + val |= MVPP22_CTRL4_SYNC_BYPASS; + val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); +} + static void mvpp2_port_mii_set(struct mvpp2_port *port) { u32 val; + if (port->priv->hw_version == MVPP22) + mvpp22_port_mii_set(port); + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); switch (port->phy_interface) { @@ -3952,16 +4317,18 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) { int tx_port_num, val, queue, ptxq, lrxq; - /* Configure port to loopback if needed */ - if (port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port); + if (port->priv->hw_version == MVPP21) { + /* Configure port to loopback if needed */ + if (port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port); - /* Update TX FIFO MIN Threshold */ - val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; - /* Min. TX threshold must be less than minimal packet length */ - val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); - writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } /* Disable Legacy WRR, Disable EJP, Release from reset */ tx_port_num = mvpp2_egress_port(port); @@ -4048,7 +4415,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port) for (queue = 0; queue < txq_number; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; - if (txq->descs != NULL) + if (txq->descs) qmap |= (1 << queue); } @@ -4149,11 +4516,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, } /* Obtain BM cookie information from descriptor */ -static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) +static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) { - int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; int cpu = smp_processor_id(); + int pool; + + pool = (mvpp2_rxdesc_status_get(port, rx_desc) & + MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); @@ -4161,18 +4532,6 @@ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) /* Tx descriptors helper methods */ -/* Get number of Tx descriptors waiting to be transmitted by HW */ -static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); - - return val & MVPP2_TXQ_PENDING_MASK; -} - /* Get pointer to next Tx descriptor to be processed (send) by HW */ static struct mvpp2_tx_desc * mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) @@ -4187,7 +4546,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_AGGR_TXQ_UPDATE_REG, pending); } @@ -4216,11 +4576,12 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, struct mvpp2_tx_queue *txq, int num) { u32 val; + int cpu = smp_processor_id(); val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@ -4321,7 +4682,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, u32 val; /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); + val = mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; @@ -4335,7 +4697,8 @@ static void mvpp2_txq_sent_counter_clear(void *arg) for (queue = 0; queue < txq_number; queue++) { int id = port->txqs[queue]->id; - mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); + mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(id)); } } @@ -4394,12 +4757,14 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + int cpu = smp_processor_id(); + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, - rxq->pkts_coal); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); } static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) @@ -4449,7 +4814,7 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; - dma_unmap_single(port->dev->dev.parent, tx_buf->phys, + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, tx_buf->size, DMA_TO_DEVICE); if (tx_buf->skb) dev_kfree_skb_any(tx_buf->skb); @@ -4527,10 +4892,12 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, int desc_num, int cpu, struct mvpp2 *priv) { + u32 txq_dma; + /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, desc_num * MVPP2_DESC_ALIGNED_SIZE, - &aggr_txq->descs_phys, GFP_KERNEL); + &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; @@ -4540,10 +4907,16 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, aggr_txq->next_desc_to_proc = mvpp2_read(priv, MVPP2_AGGR_TXQ_INDEX_REG(cpu)); - /* Set Tx descriptors queue starting address */ - /* indirect access */ - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), - aggr_txq->descs_phys); + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); return 0; @@ -4554,12 +4927,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + u32 rxq_dma; + int cpu; + rxq->size = port->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, - &rxq->descs_phys, GFP_KERNEL); + &rxq->descs_dma, GFP_KERNEL); if (!rxq->descs) return -ENOMEM; @@ -4569,10 +4945,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); + cpu = smp_processor_id(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); @@ -4599,10 +4980,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(rx_desc); + u32 bm = mvpp2_bm_cookie_build(port, rx_desc); - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_pool_refill(port, bm, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); } @@ -4611,26 +4993,29 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + int cpu; + mvpp2_rxq_drop_pkts(port, rxq); if (rxq->descs) dma_free_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, rxq->descs, - rxq->descs_phys); + rxq->descs_dma); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; - rxq->descs_phys = 0; + rxq->descs_dma = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); + cpu = smp_processor_id(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); } /* Create and initialize a Tx queue */ @@ -4646,23 +5031,25 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Allocate memory for Tx descriptors */ txq->descs = dma_alloc_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - &txq->descs_phys, GFP_KERNEL); + &txq->descs_dma, GFP_KERNEL); if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & - MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, - txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + cpu = smp_processor_id(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + txq->descs_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + txq->size & MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@ -4673,9 +5060,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port, desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, - MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | - MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@ -4694,11 +5081,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu->size = txq->size; - txq_pcpu->buffs = kmalloc(txq_pcpu->size * - sizeof(struct mvpp2_txq_pcpu_buf), - GFP_KERNEL); + txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, + sizeof(*txq_pcpu->buffs), + GFP_KERNEL); if (!txq_pcpu->buffs) - goto error; + goto cleanup; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4707,8 +5094,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; - -error: +cleanup: for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); @@ -4716,7 +5102,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); + txq->descs, txq->descs_dma); return -ENOMEM; } @@ -4736,20 +5122,21 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, if (txq->descs) dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); + txq->descs, txq->descs_dma); txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; - txq->descs_phys = 0; + txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); + cpu = smp_processor_id(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); } /* Cleanup Tx ports */ @@ -4759,10 +5146,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) int delay, pending, cpu; u32 val; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); + cpu = smp_processor_id(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. @@ -4778,11 +5166,13 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) mdelay(1); delay++; - pending = mvpp2_txq_pend_desc_num_get(port, txq); + pending = mvpp2_percpu_read(port->priv, cpu, + MVPP2_TXQ_PENDING_REG); + pending &= MVPP2_TXQ_PENDING_MASK; } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); @@ -4991,20 +5381,21 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) static void mvpp2_rx_error(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { - u32 status = rx_desc->status; + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: - netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_OVERRUN: - netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_RESOURCE: - netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", + status, sz); break; } } @@ -5031,15 +5422,17 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, u32 bm) { - dma_addr_t phys_addr; + dma_addr_t dma_addr; + phys_addr_t phys_addr; void *buf; /* No recycle or too many buffers are in use, so allocate a new skb */ - buf = mvpp2_buf_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, + GFP_ATOMIC); if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)buf); + mvpp2_pool_refill(port, bm, dma_addr, phys_addr); return 0; } @@ -5075,43 +5468,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } -static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - struct mvpp2_buff_hdr *buff_hdr; - struct sk_buff *skb; - u32 rx_status = rx_desc->status; - dma_addr_t buff_phys_addr; - unsigned long buff_virt_addr; - dma_addr_t buff_phys_addr_next; - unsigned long buff_virt_addr_next; - int mc_id; - int pool_id; - - pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - buff_phys_addr = rx_desc->buf_phys_addr; - buff_virt_addr = rx_desc->buf_cookie; - - do { - skb = (struct sk_buff *)buff_virt_addr; - buff_hdr = (struct mvpp2_buff_hdr *)skb->head; - - mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info); - - buff_phys_addr_next = buff_hdr->next_buff_phys_addr; - buff_virt_addr_next = buff_hdr->next_buff_virt_addr; - - /* Release buffer */ - mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr, - buff_virt_addr, mc_id); - - buff_phys_addr = buff_phys_addr_next; - buff_virt_addr = buff_virt_addr_next; - - } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info)); -} - /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) @@ -5132,25 +5488,23 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; unsigned int frag_size; - dma_addr_t phys_addr; + dma_addr_t dma_addr; + phys_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; void *data; rx_done++; - rx_status = rx_desc->status; - rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; - phys_addr = rx_desc->buf_phys_addr; - data = (void *)(uintptr_t)rx_desc->buf_cookie; - - bm = mvpp2_bm_cookie_build(rx_desc); + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + + bm = mvpp2_bm_cookie_build(port, rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); bm_pool = &port->priv->bm_pools[pool]; - /* Check if buffer header is used */ - if (rx_status & MVPP2_RXD_BUF_HDR) { - mvpp2_buff_hdr_rx(port, rx_desc); - continue; - } /* In case of an error, release the requested buffer pointer * to the Buffer Manager. This request process is controlled @@ -5158,13 +5512,11 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { - err_drop_frame: +err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_pool_refill(port, bm, dma_addr, phys_addr); continue; } @@ -5185,7 +5537,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, goto err_drop_frame; } - dma_unmap_single(dev->dev.parent, phys_addr, + dma_unmap_single(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE); rcvd_pkts++; @@ -5216,11 +5568,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, } static inline void -tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq, +tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - dma_unmap_single(dev, desc->buf_phys_addr, - desc->data_size, DMA_TO_DEVICE); + dma_addr_t buf_dma_addr = + mvpp2_txdesc_dma_addr_get(port, desc); + size_t buf_sz = + mvpp2_txdesc_size_get(port, desc); + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, + buf_sz, DMA_TO_DEVICE); mvpp2_txq_desc_put(txq); } @@ -5232,47 +5588,49 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); struct mvpp2_tx_desc *tx_desc; int i; - dma_addr_t buf_phys_addr; + dma_addr_t buf_dma_addr; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - tx_desc->phys_txq = txq->id; - tx_desc->data_size = frag->size; + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, frag->size); - buf_phys_addr = dma_map_single(port->dev->dev.parent, addr, - tx_desc->data_size, + buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, + frag->size, DMA_TO_DEVICE); - if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) { + if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); - goto error; + goto cleanup; } - tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ - tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L_DESC); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ - tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); } } return 0; - -error: +cleanup: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ for (i = i - 1; i >= 0; i--) { tx_desc = txq->descs + i; - tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + tx_desc_unmap_put(port, txq, tx_desc); } return -ENOMEM; @@ -5285,7 +5643,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; - dma_addr_t buf_phys_addr; + dma_addr_t buf_dma_addr; int frags = 0; u16 txq_id; u32 tx_cmd; @@ -5307,35 +5665,38 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - tx_desc->phys_txq = txq->id; - tx_desc->data_size = skb_headlen(skb); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); - buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, - tx_desc->data_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) { + buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { mvpp2_txq_desc_put(txq); frags = 0; goto out; } - tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN; + + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); tx_cmd = mvpp2_skb_tx_csum(port, skb); if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; - tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; - tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { - tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + tx_desc_unmap_put(port, txq, tx_desc); frags = 0; goto out; } @@ -5396,6 +5757,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx, cause_rx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); + int cpu = smp_processor_id(); /* Rx/Tx cause register * @@ -5407,8 +5769,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@ -5417,8 +5779,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), - cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; @@ -5530,7 +5893,7 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev, return 0; } -static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) { u32 mac_addr_l, mac_addr_m, mac_addr_h; @@ -5698,7 +6061,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) { err = -EADDRNOTAVAIL; - goto error; + goto log_error; } if (!netif_running(dev)) { @@ -5708,7 +6071,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) /* Reconfigure parser to accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) - goto error; + goto log_error; } mvpp2_stop_dev(port); @@ -5720,15 +6083,14 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) /* Reconfigure parser accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) - goto error; + goto log_error; out_start: mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; - -error: - netdev_err(dev, "fail to change MAC address\n"); +log_error: + netdev_err(dev, "failed to change MAC address\n"); return err; } @@ -5753,7 +6115,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) - goto error; + goto log_error; } mvpp2_stop_dev(port); @@ -5767,7 +6129,7 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) - goto error; + goto log_error; out_start: mvpp2_start_dev(port); @@ -5775,9 +6137,8 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mvpp2_ingress_enable(port); return 0; - -error: - netdev_err(dev, "fail to change MTU\n"); +log_error: + netdev_err(dev, "failed to change MTU\n"); return err; } @@ -5946,7 +6307,7 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev, err_clean_rxqs: mvpp2_cleanup_rxqs(port); err_out: - netdev_err(dev, "fail to change ring parameters"); + netdev_err(dev, "failed to change ring parameters"); return err; } @@ -5975,16 +6336,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -/* Driver initialization */ - -static void mvpp2_port_power_up(struct mvpp2_port *port) -{ - mvpp2_port_mii_set(port); - mvpp2_port_periodic_xon_disable(port); - mvpp2_port_fc_adv_enable(port); - mvpp2_port_reset(port); -} - /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { @@ -5993,7 +6344,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) + if (port->first_rxq + rxq_number > + MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; /* Disable port */ @@ -6061,7 +6413,18 @@ static int mvpp2_port_init(struct mvpp2_port *port) } /* Configure Rx queue group interrupt for this port */ - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number); + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + rxq_number); + } else { + u32 val; + + val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { @@ -6103,8 +6466,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, - struct mvpp2 *priv, - int *next_first_rxq) + struct mvpp2 *priv) { struct device_node *phy_node; struct mvpp2_port *port; @@ -6117,11 +6479,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, u32 id; int features; int phy_mode; - int priv_common_regs_num = 2; int err, i, cpu; - dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, - rxq_number); + dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number); if (!dev) return -ENOMEM; @@ -6163,16 +6523,30 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->priv = priv; port->id = id; - port->first_rxq = *next_first_rxq; + if (priv->hw_version == MVPP21) + port->first_rxq = port->id * rxq_number; + else + port->first_rxq = port->id * priv->max_port_rxqs; + port->phy_node = phy_node; port->phy_interface = phy_mode; - res = platform_get_resource(pdev, IORESOURCE_MEM, - priv_common_regs_num + id); - port->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(port->base)) { - err = PTR_ERR(port->base); - goto err_free_irq; + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); + port->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + } else { + if (of_property_read_u32(port_node, "gop-port-id", + &port->gop_id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing gop-port-id value\n"); + goto err_free_irq; + } + + port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); } /* Alloc per-cpu stats */ @@ -6187,7 +6561,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, mac_from = "device tree"; ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { - mvpp2_get_mac_address(port, hw_mac_addr); + if (priv->hw_version == MVPP21) + mvpp21_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; ether_addr_copy(dev->dev_addr, hw_mac_addr); @@ -6207,7 +6582,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev_err(&pdev->dev, "failed to init port %d\n", id); goto err_free_stats; } - mvpp2_port_power_up(port); + + mvpp2_port_mii_set(port); + mvpp2_port_periodic_xon_disable(port); + + if (priv->hw_version == MVPP21) + mvpp2_port_fc_adv_enable(port); + + mvpp2_port_reset(port); port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); if (!port->pcpu) { @@ -6245,8 +6627,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - /* Increment the first Rx queue number to be used by the next port */ - *next_first_rxq += rxq_number; priv->port_list[id] = port; return 0; @@ -6330,6 +6710,60 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + /* Initialize network controller common part HW */ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) { @@ -6338,7 +6772,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) u32 val; /* Checks for hardware constraints */ - if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) || + if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || (txq_number > MVPP2_MAX_TXQ)) { dev_err(&pdev->dev, "invalid queue size parameter\n"); return -EINVAL; @@ -6349,14 +6783,23 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) if (dram_target_info) mvpp2_conf_mbus_windows(dram_target_info, priv); + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + /* Disable HW PHY polling */ - val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - val |= MVPP2_PHY_AN_STOP_SMI0_MASK; - writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + if (priv->hw_version == MVPP21) { + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val &= ~MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } /* Allocate and initialize aggregated TXQs */ priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), - sizeof(struct mvpp2_tx_queue), + sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; @@ -6374,11 +6817,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_rx_fifo_init(priv); /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number); + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), + rxq_number); + continue; + } else { + u32 val; + + val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } + } - writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, - priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); /* Allow cache snoop when transmiting packets */ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); @@ -6405,22 +6862,46 @@ static int mvpp2_probe(struct platform_device *pdev) struct device_node *port_node; struct mvpp2 *priv; struct resource *res; - int port_count, first_rxq; + void __iomem *base; + int port_count, cpu; int err; - priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->iface_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->lms_base)) - return PTR_ERR(priv->lms_base); + for_each_present_cpu(cpu) { + u32 addr_space_sz; + + addr_space_sz = (priv->hw_version == MVPP21 ? + MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); + priv->cpu_base[cpu] = base + cpu * addr_space_sz; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); if (IS_ERR(priv->pp_clk)) @@ -6438,42 +6919,70 @@ static int mvpp2_probe(struct platform_device *pdev) if (err < 0) goto err_pp_clk; + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + } + /* Get system's tclk rate */ priv->tclk = clk_get_rate(priv->pp_clk); + if (priv->hw_version == MVPP22) { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + goto err_mg_clk; + /* Sadly, the BM pools all share the same register to + * store the high 32 bits of their address. So they + * must all have the same high 32 bits, which forces + * us to restrict coherent memory to DMA_BIT_MASK(32). + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_mg_clk; + } + /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_gop_clk; + goto err_mg_clk; } port_count = of_get_available_child_count(dn); if (port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; - goto err_gop_clk; + goto err_mg_clk; } priv->port_list = devm_kcalloc(&pdev->dev, port_count, - sizeof(struct mvpp2_port *), - GFP_KERNEL); + sizeof(*priv->port_list), + GFP_KERNEL); if (!priv->port_list) { err = -ENOMEM; - goto err_gop_clk; + goto err_mg_clk; } /* Initialize ports */ - first_rxq = 0; for_each_available_child_of_node(dn, port_node) { - err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq); + err = mvpp2_port_probe(pdev, port_node, priv); if (err < 0) - goto err_gop_clk; + goto err_mg_clk; } platform_set_drvdata(pdev, priv); return 0; +err_mg_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_clk); err_gop_clk: clk_disable_unprepare(priv->gop_clk); err_pp_clk: @@ -6506,9 +7015,10 @@ static int mvpp2_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, aggr_txq->descs, - aggr_txq->descs_phys); + aggr_txq->descs_dma); } + clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); @@ -6516,7 +7026,14 @@ static int mvpp2_remove(struct platform_device *pdev) } static const struct of_device_id mvpp2_match[] = { - { .compatible = "marvell,armada-375-pp2" }, + { + .compatible = "marvell,armada-375-pp2", + .data = (void *)MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = (void *)MVPP22, + }, { } }; MODULE_DEVICE_TABLE(of, mvpp2_match); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 28cb36d9e50a24a798705ded2f8eb981c9f9ac63..993724959a7c32468646ae934605ba3ad81d9d1a 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -556,11 +556,11 @@ static int init_hash_table(struct pxa168_eth_private *pep) * function.Driver can dynamically switch to them if the 1/2kB hash * table is full. */ - if (pep->htpr == NULL) { + if (!pep->htpr) { pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, &pep->htpr_dma, GFP_KERNEL); - if (pep->htpr == NULL) + if (!pep->htpr) return -ENOMEM; } else { memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); @@ -1036,8 +1036,7 @@ static int rxq_init(struct net_device *dev) int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ - pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, - GFP_KERNEL); + pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; @@ -1096,8 +1095,7 @@ static int txq_init(struct net_device *dev) int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; - pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, - GFP_KERNEL); + pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; @@ -1358,7 +1356,7 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - if (dev->phydev != NULL) + if (dev->phydev) return phy_mii_ioctl(dev->phydev, ifr, cmd); return -EOPNOTSUPP; @@ -1503,7 +1501,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->timeout.data = (unsigned long)pep; pep->smi_bus = mdiobus_alloc(); - if (pep->smi_bus == NULL) { + if (!pep->smi_bus) { err = -ENOMEM; goto err_netdev; } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index edb95271a4f2f784d740b3201364a5e45de15bcb..5d7d94de4e00670feeec4f929c87e1cdfdf1a496 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2657,7 +2657,7 @@ static int skge_down(struct net_device *dev) struct skge_hw *hw = skge->hw; int port = skge->port; - if (skge->mem == NULL) + if (!skge->mem) return 0; netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); @@ -3718,7 +3718,7 @@ static int skge_debug_show(struct seq_file *seq, void *v) t->csum_offs, t->csum_write, t->csum_start); } - seq_printf(seq, "\nRx Ring:\n"); + seq_puts(seq, "\nRx Ring:\n"); for (e = skge->rx_ring.to_clean; ; e = e->next) { const struct skge_rx_desc *r = e->desc; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 2b2cc3f3ca1084ef7aebcb486a1eedc4985f3fa4..1145cde2274a4cb778ba816716c3c55a5850d71e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4544,7 +4544,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) sky2_read32(hw, B0_Y2_SP_ICR)); if (!netif_running(dev)) { - seq_printf(seq, "network not running\n"); + seq_puts(seq, "network not running\n"); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 93949139e62cf92a7e593b36d6a7399edbd1b6e6..16f97552ae983b6f8e9df9fb4abf956764b49d28 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1849,6 +1849,12 @@ static int mtk_hw_init(struct mtk_eth *eth) /* GE2, Force 1000M/FD, FC ON */ mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); + /* Indicates CDM to parse the MTK special tag from CPU + * which also is working out for untag packets. + */ + val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); + mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); + /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); @@ -1911,10 +1917,9 @@ static int __init mtk_init(struct net_device *dev) /* If the mac address is invalid, use random mac address */ if (!is_valid_ether_addr(dev->dev_addr)) { - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); dev_err(eth->dev, "generated random MAC address %pM\n", dev->dev_addr); - dev->addr_assign_type = NET_ADDR_RANDOM; } return mtk_phy_connect(dev); @@ -2320,6 +2325,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; eth->netdev[id]->irq = eth->irq[0]; + eth->netdev[id]->dev.of_node = np; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 08285a96ff7077f83b5ac530e8f59266922819d1..3c46a3b613b9db73300acc717db2a7e85cd9f8df 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -70,6 +70,10 @@ /* Frame Engine Interrupt Grouping Register */ #define MTK_FE_INT_GRP 0x20 +/* CDMP Ingress Control Register */ +#define MTK_CDMQ_IG_CTRL 0x1400 +#define MTK_CDMQ_STAG_EN BIT(0) + /* CDMP Exgress Control Register */ #define MTK_CDMP_EG_CTRL 0x404 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c4d714fcc7dae759998a49a1f90f9ab1ee9bdda3..ffbcb27c05e55f43630a812249bab21609886dd9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -117,7 +117,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* port statistics */ "tso_packets", "xmit_more", - "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", + "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages", "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", /* pf statistics */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 61420473fe5fb57032fa50de9a0d2abfa71831d6..94fab20ef146bd5874a21ec2ecbe3dea16180aec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -92,7 +92,9 @@ static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return mlx4_en_setup_tc(dev, tc->tc); + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return mlx4_en_setup_tc(dev, tc->mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 9166d90e732858610b1407fe85cbf6cbe27f5e0b..e0eb695318e64ebcaf58d6edb5f9a57be6f9ddf6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -213,6 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; priv->port_stats.rx_chksum_complete = 0; + priv->port_stats.rx_alloc_pages = 0; priv->xdp_stats.rx_xdp_drop = 0; priv->xdp_stats.rx_xdp_tx = 0; priv->xdp_stats.rx_xdp_tx_full = 0; @@ -223,6 +224,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); + priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 867292880c07a15124a0cf099d1fcda09926548e..aa074e57ce06fb2842fa1faabd156c3cd2fe10f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -50,173 +50,62 @@ #include "mlx4_en.h" -static int mlx4_alloc_pages(struct mlx4_en_priv *priv, - struct mlx4_en_rx_alloc *page_alloc, - const struct mlx4_en_frag_info *frag_info, - gfp_t _gfp) +static int mlx4_alloc_page(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *frag, + gfp_t gfp) { - int order; struct page *page; dma_addr_t dma; - for (order = frag_info->order; ;) { - gfp_t gfp = _gfp; - - if (order) - gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; - page = alloc_pages(gfp, order); - if (likely(page)) - break; - if (--order < 0 || - ((PAGE_SIZE << order) < frag_info->frag_size)) - return -ENOMEM; - } - dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - frag_info->dma_dir); + page = alloc_page(gfp); + if (unlikely(!page)) + return -ENOMEM; + dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir); if (unlikely(dma_mapping_error(priv->ddev, dma))) { - put_page(page); + __free_page(page); return -ENOMEM; } - page_alloc->page_size = PAGE_SIZE << order; - page_alloc->page = page; - page_alloc->dma = dma; - page_alloc->page_offset = 0; - /* Not doing get_page() for each frag is a big win - * on asymetric workloads. Note we can not use atomic_set(). - */ - page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1); + frag->page = page; + frag->dma = dma; + frag->page_offset = priv->rx_headroom; return 0; } static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, - struct mlx4_en_rx_alloc *ring_alloc, gfp_t gfp) { - struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; - const struct mlx4_en_frag_info *frag_info; - struct page *page; int i; - for (i = 0; i < priv->num_frags; i++) { - frag_info = &priv->frag_info[i]; - page_alloc[i] = ring_alloc[i]; - page_alloc[i].page_offset += frag_info->frag_stride; - - if (page_alloc[i].page_offset + frag_info->frag_stride <= - ring_alloc[i].page_size) - continue; - - if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i], - frag_info, gfp))) - goto out; - } - - for (i = 0; i < priv->num_frags; i++) { - frags[i] = ring_alloc[i]; - frags[i].page_offset += priv->frag_info[i].rx_headroom; - rx_desc->data[i].addr = cpu_to_be64(frags[i].dma + - frags[i].page_offset); - ring_alloc[i] = page_alloc[i]; - } - - return 0; - -out: - while (i--) { - if (page_alloc[i].page != ring_alloc[i].page) { - dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].page_size, - priv->frag_info[i].dma_dir); - page = page_alloc[i].page; - /* Revert changes done by mlx4_alloc_pages */ - page_ref_sub(page, page_alloc[i].page_size / - priv->frag_info[i].frag_stride - 1); - put_page(page); + for (i = 0; i < priv->num_frags; i++, frags++) { + if (!frags->page) { + if (mlx4_alloc_page(priv, frags, gfp)) + return -ENOMEM; + ring->rx_alloc_pages++; } - } - return -ENOMEM; -} - -static void mlx4_en_free_frag(struct mlx4_en_priv *priv, - struct mlx4_en_rx_alloc *frags, - int i) -{ - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; - - - if (next_frag_end > frags[i].page_size) - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, - frag_info->dma_dir); - - if (frags[i].page) - put_page(frags[i].page); -} - -static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) -{ - int i; - struct mlx4_en_rx_alloc *page_alloc; - - for (i = 0; i < priv->num_frags; i++) { - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - - if (mlx4_alloc_pages(priv, &ring->page_alloc[i], - frag_info, GFP_KERNEL | __GFP_COLD)) - goto out; - - en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", - i, ring->page_alloc[i].page_size, - page_ref_count(ring->page_alloc[i].page)); + rx_desc->data[i].addr = cpu_to_be64(frags->dma + + frags->page_offset); } return 0; - -out: - while (i--) { - struct page *page; - - page_alloc = &ring->page_alloc[i]; - dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, - priv->frag_info[i].dma_dir); - page = page_alloc->page; - /* Revert changes done by mlx4_alloc_pages */ - page_ref_sub(page, page_alloc->page_size / - priv->frag_info[i].frag_stride - 1); - put_page(page); - page_alloc->page = NULL; - } - return -ENOMEM; } -static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) +static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *frag) { - struct mlx4_en_rx_alloc *page_alloc; - int i; - - for (i = 0; i < priv->num_frags; i++) { - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - - page_alloc = &ring->page_alloc[i]; - en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", - i, page_count(page_alloc->page)); - - dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, frag_info->dma_dir); - while (page_alloc->page_offset + frag_info->frag_stride < - page_alloc->page_size) { - put_page(page_alloc->page); - page_alloc->page_offset += frag_info->frag_stride; - } - page_alloc->page = NULL; + if (frag->page) { + dma_unmap_page(priv->ddev, frag->dma, + PAGE_SIZE, priv->dma_dir); + __free_page(frag->page); } + /* We need to clear all fields, otherwise a change of priv->log_rx_info + * could lead to see garbage later in frag->page. + */ + memset(frag, 0, sizeof(*frag)); } -static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, +static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; @@ -248,18 +137,23 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); - if (ring->page_cache.index > 0) { - frags[0] = ring->page_cache.buf[--ring->page_cache.index]; - rx_desc->data[0].addr = cpu_to_be64(frags[0].dma + - frags[0].page_offset); + /* XDP uses a single page per frame */ + if (!frags->page) { + ring->page_cache.index--; + frags->page = ring->page_cache.buf[ring->page_cache.index].page; + frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; + } + frags->page_offset = XDP_PACKET_HEADROOM; + rx_desc->data[0].addr = cpu_to_be64(frags->dma + + XDP_PACKET_HEADROOM); return 0; } - return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); + return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); } -static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring) { return ring->prod == ring->cons; } @@ -269,7 +163,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); } -static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, +/* slow path */ +static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { @@ -279,7 +174,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, frags = ring->rx_info + (index << priv->log_rx_info); for (nr = 0; nr < priv->num_frags; nr++) { en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); - mlx4_en_free_frag(priv, frags, nr); + mlx4_en_free_frag(priv, frags + nr); } } @@ -335,12 +230,12 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ring->cons, ring->prod); /* Unmap and free Rx buffers */ - while (!mlx4_en_is_ring_empty(ring)) { - index = ring->cons & ring->size_mask; + for (index = 0; index < ring->size; index++) { en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); - ++ring->cons; } + ring->cons = 0; + ring->prod = 0; } void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) @@ -392,9 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); - ring->rx_info = vmalloc_node(tmp, node); + ring->rx_info = vzalloc_node(tmp, node); if (!ring->rx_info) { - ring->rx_info = vmalloc(tmp); + ring->rx_info = vzalloc(tmp); if (!ring->rx_info) { err = -ENOMEM; goto err_ring; @@ -464,16 +359,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) /* Initialize all descriptors */ for (i = 0; i < ring->size; i++) mlx4_en_init_rx_desc(priv, ring, i); - - /* Initialize page allocators */ - err = mlx4_en_init_allocator(priv, ring); - if (err) { - en_err(priv, "Failed initializing ring allocator\n"); - if (ring->stride <= TXBB_SIZE) - ring->buf -= TXBB_SIZE; - ring_ind--; - goto err_allocator; - } } err = mlx4_en_fill_rx_buffers(priv); if (err) @@ -493,11 +378,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); ring_ind = priv->rx_ring_num - 1; -err_allocator: while (ring_ind >= 0) { if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; - mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); ring_ind--; } return err; @@ -537,7 +420,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, if (cache->index >= MLX4_EN_CACHE_SIZE) return false; - cache->buf[cache->index++] = *frame; + cache->buf[cache->index].page = frame->page; + cache->buf[cache->index].dma = frame->dma; + cache->index++; return true; } @@ -567,136 +452,91 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, int i; for (i = 0; i < ring->page_cache.index; i++) { - struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; - - dma_unmap_page(priv->ddev, frame->dma, frame->page_size, - priv->frag_info[0].dma_dir); - put_page(frame->page); + dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, + PAGE_SIZE, priv->dma_dir); + put_page(ring->page_cache.buf[i].page); } ring->page_cache.index = 0; mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; - mlx4_en_destroy_allocator(priv, ring); } static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, struct sk_buff *skb, int length) { - struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; - struct mlx4_en_frag_info *frag_info; - int nr; + const struct mlx4_en_frag_info *frag_info = priv->frag_info; + unsigned int truesize = 0; + int nr, frag_size; + struct page *page; dma_addr_t dma; + bool release; /* Collect used fragments while replacing them in the HW descriptors */ - for (nr = 0; nr < priv->num_frags; nr++) { - frag_info = &priv->frag_info[nr]; - if (length <= frag_info->frag_prefix_size) - break; - if (unlikely(!frags[nr].page)) + for (nr = 0;; frags++) { + frag_size = min_t(int, length, frag_info->frag_size); + + page = frags->page; + if (unlikely(!page)) goto fail; - dma = be64_to_cpu(rx_desc->data[nr].addr); - dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, - DMA_FROM_DEVICE); + dma = frags->dma; + dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, + frag_size, priv->dma_dir); + + __skb_fill_page_desc(skb, nr, page, frags->page_offset, + frag_size); - __skb_fill_page_desc(skb, nr, frags[nr].page, - frags[nr].page_offset, - frag_info->frag_size); + truesize += frag_info->frag_stride; + if (frag_info->frag_stride == PAGE_SIZE / 2) { + frags->page_offset ^= PAGE_SIZE / 2; + release = page_count(page) != 1 || + page_is_pfmemalloc(page) || + page_to_nid(page) != numa_mem_id(); + } else { + u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); - skb->truesize += frag_info->frag_stride; - frags[nr].page = NULL; + frags->page_offset += sz_align; + release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; + } + if (release) { + dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir); + frags->page = NULL; + } else { + page_ref_inc(page); + } + + nr++; + length -= frag_size; + if (!length) + break; + frag_info++; } - /* Adjust size of last fragment to match actual length */ - if (nr > 0) - skb_frag_size_set(&skb_frags_rx[nr - 1], - length - priv->frag_info[nr - 1].frag_prefix_size); + skb->truesize += truesize; return nr; fail: while (nr > 0) { nr--; - __skb_frag_unref(&skb_frags_rx[nr]); + __skb_frag_unref(skb_shinfo(skb)->frags + nr); } return 0; } - -static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, - struct mlx4_en_rx_desc *rx_desc, - struct mlx4_en_rx_alloc *frags, - unsigned int length) -{ - struct sk_buff *skb; - void *va; - int used_frags; - dma_addr_t dma; - - skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); - if (unlikely(!skb)) { - en_dbg(RX_ERR, priv, "Failed allocating skb\n"); - return NULL; - } - skb_reserve(skb, NET_IP_ALIGN); - skb->len = length; - - /* Get pointer to first fragment so we could copy the headers into the - * (linear part of the) skb */ - va = page_address(frags[0].page) + frags[0].page_offset; - - if (length <= SMALL_PACKET_SIZE) { - /* We are copying all relevant data to the skb - temporarily - * sync buffers for the copy */ - dma = be64_to_cpu(rx_desc->data[0].addr); - dma_sync_single_for_cpu(priv->ddev, dma, length, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, va, length); - skb->tail += length; - } else { - unsigned int pull_len; - - /* Move relevant fragments to skb */ - used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, - skb, length); - if (unlikely(!used_frags)) { - kfree_skb(skb); - return NULL; - } - skb_shinfo(skb)->nr_frags = used_frags; - - pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); - /* Copy headers into the skb linear buffer */ - memcpy(skb->data, va, pull_len); - skb->tail += pull_len; - - /* Skip headers in first fragment */ - skb_shinfo(skb)->frags[0].page_offset += pull_len; - - /* Adjust size of first fragment */ - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); - skb->data_len = length - pull_len; - } - return skb; -} - -static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) +static void validate_loopback(struct mlx4_en_priv *priv, void *va) { + const unsigned char *data = va + ETH_HLEN; int i; - int offset = ETH_HLEN; - for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { - if (*(skb->data + offset) != (unsigned char) (i & 0xff)) - goto out_loopback; + for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) { + if (data[i] != (unsigned char)i) + return; } /* Loopback found */ priv->loopback_ok = 1; - -out_loopback: - dev_kfree_skb_any(skb); } static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, @@ -801,7 +641,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct mlx4_cqe *cqe; struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; - struct mlx4_en_rx_desc *rx_desc; struct bpf_prog *xdp_prog; int doorbell_pending; struct sk_buff *skb; @@ -834,10 +673,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { + void *va; frags = ring->rx_info + (index << priv->log_rx_info); - rx_desc = ring->buf + (index << ring->log_stride); - + va = page_address(frags[0].page) + frags[0].page_offset; /* * make sure we read the CQE after we read the ownership bit */ @@ -860,16 +699,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * and not performing the selftest or flb disabled */ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { - struct ethhdr *ethh; + const struct ethhdr *ethh = va; dma_addr_t dma; /* Get pointer to first fragment since we haven't * skb yet and cast it to ethhdr struct */ - dma = be64_to_cpu(rx_desc->data[0].addr); + dma = frags[0].dma + frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); - ethh = (struct ethhdr *)(page_address(frags[0].page) + - frags[0].page_offset); if (is_multicast_ether_addr(ethh->h_dest)) { struct mlx4_mac_entry *entry; @@ -887,13 +724,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud } } + if (unlikely(priv->validate_loopback)) { + validate_loopback(priv, va); + goto next; + } + /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); length -= ring->fcs_del; - l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && - (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); /* A bpf program gets first chance to drop the packet. It may * read bytes but not past the end of the frag. @@ -904,13 +744,13 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud void *orig_data; u32 act; - dma = be64_to_cpu(rx_desc->data[0].addr); + dma = frags[0].dma + frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, priv->frag_info[0].frag_size, DMA_FROM_DEVICE); - xdp.data_hard_start = page_address(frags[0].page); - xdp.data = xdp.data_hard_start + frags[0].page_offset; + xdp.data_hard_start = va - frags[0].page_offset; + xdp.data = va; xdp.data_end = xdp.data + length; orig_data = xdp.data; @@ -920,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud length = xdp.data_end - xdp.data; frags[0].page_offset = xdp.data - xdp.data_hard_start; + va = xdp.data; } switch (act) { @@ -928,8 +769,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud case XDP_TX: if (likely(!mlx4_en_xmit_frame(ring, frags, dev, length, cq->ring, - &doorbell_pending))) - goto consumed; + &doorbell_pending))) { + frags[0].page = NULL; + goto next; + } trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: @@ -939,8 +782,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: - if (likely(mlx4_en_rx_recycle(ring, frags))) - goto consumed; goto next; } } @@ -948,129 +789,51 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ring->bytes += length; ring->packets++; + skb = napi_get_frags(&cq->napi); + if (!skb) + goto next; + + if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), + timestamp); + } + skb_record_rx_queue(skb, cq->ring); + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && cqe->checksum == cpu_to_be16(0xffff)) { ip_summed = CHECKSUM_UNNECESSARY; + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + if (l2_tunnel) + skb->csum_level = 1; ring->csum_ok++; } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + goto csum_none; } } else { if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6))) { - ip_summed = CHECKSUM_COMPLETE; - ring->csum_complete++; + if (check_csum(cqe, skb, va, dev->features)) { + goto csum_none; + } else { + ip_summed = CHECKSUM_COMPLETE; + ring->csum_complete++; + } } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + goto csum_none; } } } else { +csum_none: ip_summed = CHECKSUM_NONE; ring->csum_none++; } - - /* This packet is eligible for GRO if it is: - * - DIX Ethernet (type interpretation) - * - TCP/IP (v4) - * - without IP options - * - not an IP fragment - */ - if (dev->features & NETIF_F_GRO) { - struct sk_buff *gro_skb = napi_get_frags(&cq->napi); - if (!gro_skb) - goto next; - - nr = mlx4_en_complete_rx_desc(priv, - rx_desc, frags, gro_skb, - length); - if (!nr) - goto next; - - if (ip_summed == CHECKSUM_COMPLETE) { - void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); - if (check_csum(cqe, gro_skb, va, - dev->features)) { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; - ring->csum_complete--; - } - } - - skb_shinfo(gro_skb)->nr_frags = nr; - gro_skb->len = length; - gro_skb->data_len = length; - gro_skb->ip_summed = ip_summed; - - if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) - gro_skb->csum_level = 1; - - if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && - (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u16 vid = be16_to_cpu(cqe->sl_vid); - - __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); - } else if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_SVLAN_PRESENT_MASK) && - (dev->features & NETIF_F_HW_VLAN_STAG_RX)) { - __vlan_hwaccel_put_tag(gro_skb, - htons(ETH_P_8021AD), - be16_to_cpu(cqe->sl_vid)); - } - - if (dev->features & NETIF_F_RXHASH) - skb_set_hash(gro_skb, - be32_to_cpu(cqe->immed_rss_invalid), - (ip_summed == CHECKSUM_UNNECESSARY) ? - PKT_HASH_TYPE_L4 : - PKT_HASH_TYPE_L3); - - skb_record_rx_queue(gro_skb, cq->ring); - - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, - skb_hwtstamps(gro_skb), - timestamp); - } - - napi_gro_frags(&cq->napi); - goto next; - } - - /* GRO not possible, complete processing here */ - skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); - if (unlikely(!skb)) { - ring->dropped++; - goto next; - } - - if (unlikely(priv->validate_loopback)) { - validate_loopback(priv, skb); - goto next; - } - - if (ip_summed == CHECKSUM_COMPLETE) { - if (check_csum(cqe, skb, skb->data, dev->features)) { - ip_summed = CHECKSUM_NONE; - ring->csum_complete--; - ring->csum_none++; - } - } - skb->ip_summed = ip_summed; - skb->protocol = eth_type_trans(skb, dev); - skb_record_rx_queue(skb, cq->ring); - - if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) - skb->csum_level = 1; - if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, be32_to_cpu(cqe->immed_rss_invalid), @@ -1078,36 +841,36 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); - if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_CVLAN_PRESENT_MASK) && + + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); - else if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_SVLAN_PRESENT_MASK) && + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(cqe->sl_vid)); + else if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_STAG_RX)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), be16_to_cpu(cqe->sl_vid)); - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), - timestamp); + nr = mlx4_en_complete_rx_desc(priv, frags, skb, length); + if (likely(nr)) { + skb_shinfo(skb)->nr_frags = nr; + skb->len = length; + skb->data_len = length; + napi_gro_frags(&cq->napi); + } else { + skb->vlan_tci = 0; + skb_clear_hash(skb); } - - napi_gro_receive(&cq->napi, skb); next: - for (nr = 0; nr < priv->num_frags; nr++) - mlx4_en_free_frag(priv, frags, nr); - -consumed: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; if (++polled == budget) - goto out; + break; } -out: rcu_read_unlock(); if (polled) { @@ -1178,13 +941,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) return done; } -static const int frag_sizes[] = { - FRAG_SZ0, - FRAG_SZ1, - FRAG_SZ2, - FRAG_SZ3 -}; - void mlx4_en_calc_rx_buf(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1195,33 +951,43 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) * This only works when num_frags == 1. */ if (priv->tx_ring_num[TX_XDP]) { - priv->frag_info[0].order = 0; priv->frag_info[0].frag_size = eff_mtu; - priv->frag_info[0].frag_prefix_size = 0; /* This will gain efficient xdp frame recycling at the * expense of more costly truesize accounting */ priv->frag_info[0].frag_stride = PAGE_SIZE; - priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL; - priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM; + priv->dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->rx_headroom = XDP_PACKET_HEADROOM; i = 1; } else { - int buf_size = 0; + int frag_size_max = 2048, buf_size = 0; + + /* should not happen, right ? */ + if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048) + frag_size_max = PAGE_SIZE; while (buf_size < eff_mtu) { - priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER; - priv->frag_info[i].frag_size = - (eff_mtu > buf_size + frag_sizes[i]) ? - frag_sizes[i] : eff_mtu - buf_size; - priv->frag_info[i].frag_prefix_size = buf_size; - priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, - SMP_CACHE_BYTES); - priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE; - priv->frag_info[i].rx_headroom = 0; - buf_size += priv->frag_info[i].frag_size; + int frag_stride, frag_size = eff_mtu - buf_size; + int pad, nb; + + if (i < MLX4_EN_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + priv->frag_info[i].frag_size = frag_size; + frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES); + /* We can only pack 2 1536-bytes frames in on 4K page + * Therefore, each frame would consume more bytes (truesize) + */ + nb = PAGE_SIZE / frag_stride; + pad = (PAGE_SIZE - nb * frag_stride) / nb; + pad &= ~(SMP_CACHE_BYTES - 1); + priv->frag_info[i].frag_stride = frag_stride + pad; + + buf_size += frag_size; i++; } + priv->dma_dir = PCI_DMA_FROMDEVICE; + priv->rx_headroom = 0; } priv->num_frags = i; @@ -1232,10 +998,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { en_err(priv, - " frag:%d - size:%d prefix:%d stride:%d\n", + " frag:%d - size:%d stride:%d\n", i, priv->frag_info[i].frag_size, - priv->frag_info[i].frag_prefix_size, priv->frag_info[i].frag_stride); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 95290e1fc9fe7600b2e3bcca334f3fad7d733c09..17112faafbccc5f7a75ee82a287be7952859ae9e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -81,14 +81,11 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; - bool gro_enabled; priv->loopback_ok = 0; priv->validate_loopback = 1; - gro_enabled = priv->dev->features & NETIF_F_GRO; mlx4_en_update_loopback_state(priv->dev, priv->dev->features); - priv->dev->features &= ~NETIF_F_GRO; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { @@ -111,9 +108,6 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) priv->validate_loopback = 0; - if (gro_enabled) - priv->dev->features |= NETIF_F_GRO; - mlx4_en_update_loopback_state(priv->dev, priv->dev->features); return !loopback_ok; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 3ed42199d3f1275f77560e92a430c0dde181e95a..3ba89bc43d74d8c023776079bcd0bbadd70fb5c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -354,13 +354,11 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, - .page_offset = XDP_PACKET_HEADROOM, - .page_size = PAGE_SIZE, }; if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, - PAGE_SIZE, priv->frag_info[0].dma_dir); + PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); } @@ -980,8 +978,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->tso_packets++; - i = ((skb->len - lso_header_size) / shinfo->gso_size) + - !!((skb->len - lso_header_size) % shinfo->gso_size); + i = shinfo->gso_segs; tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; ring->packets += i; } else { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3629ce11a68b9dec5c1659539bdc6f2c11114e35..39f401aa30474e61c0b0029463b23a829ec35fa3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -102,17 +102,6 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \ - PAGE_ALLOC_COSTLY_ORDER) - -/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU - * and 4K allocations) */ -enum { - FRAG_SZ0 = 1536 - NET_IP_ALIGN, - FRAG_SZ1 = 4096, - FRAG_SZ2 = 4096, - FRAG_SZ3 = MLX4_EN_ALLOC_SIZE -}; #define MLX4_EN_MAX_RX_FRAGS 4 /* Maximum ring sizes */ @@ -264,13 +253,16 @@ struct mlx4_en_rx_alloc { struct page *page; dma_addr_t dma; u32 page_offset; - u32 page_size; }; #define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) + struct mlx4_en_page_cache { u32 index; - struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; + struct { + struct page *page; + dma_addr_t dma; + } buf[MLX4_EN_CACHE_SIZE]; }; struct mlx4_en_priv; @@ -335,7 +327,6 @@ struct mlx4_en_rx_desc { struct mlx4_en_rx_ring { struct mlx4_hwq_resources wqres; - struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; u32 size ; /* number of Rx descs*/ u32 actual_size; u32 size_mask; @@ -355,6 +346,7 @@ struct mlx4_en_rx_ring { unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; + unsigned long rx_alloc_pages; unsigned long xdp_drop; unsigned long xdp_tx; unsigned long xdp_tx_full; @@ -472,11 +464,7 @@ struct mlx4_en_mc_list { struct mlx4_en_frag_info { u16 frag_size; - u16 frag_prefix_size; u32 frag_stride; - enum dma_data_direction dma_dir; - u16 order; - u16 rx_headroom; }; #ifdef CONFIG_MLX4_EN_DCB @@ -584,8 +572,10 @@ struct mlx4_en_priv { u32 rx_ring_num; u32 rx_skb_size; struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; - u16 num_frags; - u16 log_rx_info; + u8 num_frags; + u8 log_rx_info; + u8 dma_dir; + u16 rx_headroom; struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES]; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 48641cb0367f251a07537b82d0a16bf50d8479ef..926f3c3f3665c5d28fe5d35c41afaa0e5917c007 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -37,7 +37,7 @@ struct mlx4_en_port_stats { unsigned long queue_stopped; unsigned long wake_queue; unsigned long tx_timeout; - unsigned long rx_alloc_failed; + unsigned long rx_alloc_pages; unsigned long rx_chksum_good; unsigned long rx_chksum_none; unsigned long rx_chksum_complete; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d8d5d161b8c7bfb82abbf19de17ea08047473cc0..4aa29ee930134cc41f54839f3e6c05a828f5e0f3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2749,7 +2749,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, int err; int index = vhcr->in_modifier; struct res_mtt *mtt; - struct res_mpt *mpt; + struct res_mpt *mpt = NULL; int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; int phys; int id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 117170014e8897f0f91cfc25464e3a03aba044ec..a84b652f9b5401eb7655eb8f9b568d5fb72d06c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -31,3 +31,10 @@ config MLX5_CORE_EN_DCB This flag is depended on the kernel's DCB support. If unsure, set to Y + +config MLX5_CORE_IPOIB + bool "Mellanox Technologies ConnectX-4 IPoIB offloads support" + depends on MLX5_CORE_EN + default y + ---help--- + MLX5 IPoIB offloads & acceleration support. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9f43beb86250cd0a800c4ac4cb53492e6a0a4493..9e644615f07a8b4e76a680cd830d36ffeed5043a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -11,3 +11,5 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o + +mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a380353a78c2d349291e33aa3a1ef7af837111c2..5bdaf3d545b2fc656a318d5b562f940e14ecd9d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_XRC_SRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: + case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: case MLX5_CMD_OP_DETACH_FROM_MCG: @@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: - case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: - case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: + case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -363,6 +364,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_SET_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: + case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_CONFIG_INT_MODERATION: @@ -414,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: - case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: - case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: - case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: - case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: + case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -501,6 +503,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); + MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_UAR); @@ -576,12 +584,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); - MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); - MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); - MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); - MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); - MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); - MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); + MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); + MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3d9490cd2db19720d2b06f90d4cd322a3c87f4b0..0099a3e397bcf8758388b44e0e44888b35468920 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -111,18 +112,13 @@ #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ -#define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_ICOSQ_MAX_WQEBBS \ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) -#define MLX5E_XDP_IHS_DS_COUNT \ - DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT \ ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) -#define MLX5E_XDP_TX_WQEBBS \ - DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS) #define MLX5E_NUM_MAIN_GROUPS 9 @@ -158,6 +154,14 @@ static inline int mlx5_max_log_rq_size(int wq_type) } } +static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) +{ + return is_kdump_kernel() ? + MLX5E_MIN_NUM_CHANNELS : + min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); +} + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -187,15 +191,15 @@ enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1), }; -#define MLX5E_SET_PFLAG(priv, pflag, enable) \ +#define MLX5E_SET_PFLAG(params, pflag, enable) \ do { \ if (enable) \ - (priv)->params.pflags |= (pflag); \ + (params)->pflags |= (pflag); \ else \ - (priv)->params.pflags &= ~(pflag); \ + (params)->pflags &= ~(pflag); \ } while (0) -#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag))) +#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag))) #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ @@ -218,7 +222,6 @@ struct mlx5e_params { bool rx_cqe_compress_def; struct mlx5e_cq_moder rx_cq_moderation; struct mlx5e_cq_moder tx_cq_moderation; - u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; u16 tx_max_inline; @@ -227,9 +230,11 @@ struct mlx5e_params { u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; + bool scatter_fcs_en; bool rx_am_enabled; u32 lro_timeout; u32 pflags; + struct bpf_prog *xdp_prog; }; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -285,7 +290,6 @@ struct mlx5e_cq { struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; - struct mlx5e_priv *priv; /* cqe decompression */ struct mlx5_cqe64 title; @@ -295,22 +299,163 @@ struct mlx5e_cq { u16 decmprs_wqe_counter; /* control */ + struct mlx5_core_dev *mdev; struct mlx5_frag_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; -struct mlx5e_rq; -typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe); -typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, - u16 ix); +struct mlx5e_tx_wqe_info { + struct sk_buff *skb; + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +enum mlx5e_dma_map_type { + MLX5E_DMA_MAP_SINGLE, + MLX5E_DMA_MAP_PAGE +}; + +struct mlx5e_sq_dma { + dma_addr_t addr; + u32 size; + enum mlx5e_dma_map_type type; +}; + +enum { + MLX5E_SQ_STATE_ENABLED, +}; + +struct mlx5e_sq_wqe_info { + u8 opcode; + u8 num_wqebbs; +}; + +struct mlx5e_txqsq { + /* data path */ + + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + struct mlx5e_sq_stats stats; + + struct mlx5e_cq cq; + + /* write@xmit, read@completion */ + struct { + struct mlx5e_sq_dma *dma_fifo; + struct mlx5e_tx_wqe_info *wqe_info; + } db; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 dma_fifo_mask; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u16 max_inline; + u8 min_inline_mode; + u16 edge; + struct device *pdev; + struct mlx5e_tstamp *tstamp; + __be32 mkey_be; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5e_channel *channel; + int txq_ix; + u32 rate_limit; +} ____cacheline_aligned_in_smp; + +struct mlx5e_xdpsq { + /* data path */ + + /* dirtied @rx completion */ + u16 cc; + u16 pc; + + struct mlx5e_cq cq; + + /* write@xmit, read@completion */ + struct { + struct mlx5e_dma_info *di; + bool doorbell; + } db; + + /* read only */ + struct mlx5_wq_cyc wq; + void __iomem *uar_map; + u32 sqn; + struct device *pdev; + __be32 mkey_be; + u8 min_inline_mode; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5e_channel *channel; +} ____cacheline_aligned_in_smp; + +struct mlx5e_icosq { + /* data path */ + + /* dirtied @completion */ + u16 cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + u16 prev_cc; -typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix); + struct mlx5e_cq cq; + + /* write@xmit, read@completion */ + struct { + struct mlx5e_sq_wqe_info *ico_wqe; + } db; + + /* read only */ + struct mlx5_wq_cyc wq; + void __iomem *uar_map; + u32 sqn; + u16 edge; + struct device *pdev; + __be32 mkey_be; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5e_channel *channel; +} ____cacheline_aligned_in_smp; + +static inline bool +mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) +{ + return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); +} struct mlx5e_dma_info { struct page *page; dma_addr_t addr; }; +struct mlx5e_umr_dma_info { + __be64 *mtt; + dma_addr_t mtt_addr; + struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; + struct mlx5e_umr_wqe wqe; +}; + +struct mlx5e_mpw_info { + struct mlx5e_umr_dma_info umr; + u16 consumed_strides; + u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; +}; + struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ int epms; /* events per msec */ @@ -347,6 +492,11 @@ struct mlx5e_page_cache { struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; }; +struct mlx5e_rq; +typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); +typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16); +typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -381,7 +531,10 @@ struct mlx5e_rq { u16 rx_headroom; struct mlx5e_rx_am am; /* Adaptive Moderation */ + + /* XDP */ struct bpf_prog *xdp_prog; + struct mlx5e_xdpsq xdpsq; /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -390,118 +543,10 @@ struct mlx5e_rq { u32 mpwqe_num_strides; u32 rqn; struct mlx5e_channel *channel; - struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; struct mlx5_core_mkey umr_mkey; } ____cacheline_aligned_in_smp; -struct mlx5e_umr_dma_info { - __be64 *mtt; - dma_addr_t mtt_addr; - struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE]; - struct mlx5e_umr_wqe wqe; -}; - -struct mlx5e_mpw_info { - struct mlx5e_umr_dma_info umr; - u16 consumed_strides; - u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; -}; - -struct mlx5e_tx_wqe_info { - u32 num_bytes; - u8 num_wqebbs; - u8 num_dma; -}; - -enum mlx5e_dma_map_type { - MLX5E_DMA_MAP_SINGLE, - MLX5E_DMA_MAP_PAGE -}; - -struct mlx5e_sq_dma { - dma_addr_t addr; - u32 size; - enum mlx5e_dma_map_type type; -}; - -enum { - MLX5E_SQ_STATE_ENABLED, - MLX5E_SQ_STATE_BF_ENABLE, -}; - -struct mlx5e_sq_wqe_info { - u8 opcode; - u8 num_wqebbs; -}; - -enum mlx5e_sq_type { - MLX5E_SQ_TXQ, - MLX5E_SQ_ICO, - MLX5E_SQ_XDP -}; - -struct mlx5e_sq { - /* data path */ - - /* dirtied @completion */ - u16 cc; - u32 dma_fifo_cc; - - /* dirtied @xmit */ - u16 pc ____cacheline_aligned_in_smp; - u32 dma_fifo_pc; - u16 bf_offset; - u16 prev_cc; - u8 bf_budget; - struct mlx5e_sq_stats stats; - - struct mlx5e_cq cq; - - /* pointers to per tx element info: write@xmit, read@completion */ - union { - struct { - struct sk_buff **skb; - struct mlx5e_sq_dma *dma_fifo; - struct mlx5e_tx_wqe_info *wqe_info; - } txq; - struct mlx5e_sq_wqe_info *ico_wqe; - struct { - struct mlx5e_sq_wqe_info *wqe_info; - struct mlx5e_dma_info *di; - bool doorbell; - } xdp; - } db; - - /* read only */ - struct mlx5_wq_cyc wq; - u32 dma_fifo_mask; - void __iomem *uar_map; - struct netdev_queue *txq; - u32 sqn; - u16 bf_buf_size; - u16 max_inline; - u8 min_inline_mode; - u16 edge; - struct device *pdev; - struct mlx5e_tstamp *tstamp; - __be32 mkey_be; - unsigned long state; - - /* control path */ - struct mlx5_wq_ctrl wq_ctrl; - struct mlx5_sq_bfreg bfreg; - struct mlx5e_channel *channel; - int tc; - u32 rate_limit; - u8 type; -} ____cacheline_aligned_in_smp; - -static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) -{ - return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || - (sq->cc == sq->pc)); -} - enum channel_flags { MLX5E_CHANNEL_NAPI_SCHED = 1, }; @@ -509,9 +554,8 @@ enum channel_flags { struct mlx5e_channel { /* data path */ struct mlx5e_rq rq; - struct mlx5e_sq xdp_sq; - struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; - struct mlx5e_sq icosq; /* internal control operations */ + struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_icosq icosq; /* internal control operations */ bool xdp; struct napi_struct napi; struct device *pdev; @@ -522,10 +566,18 @@ struct mlx5e_channel { /* control */ struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; + struct mlx5e_tstamp *tstamp; int ix; int cpu; }; +struct mlx5e_channels { + struct mlx5e_channel **c; + unsigned int num; + struct mlx5e_params params; +}; + enum mlx5e_traffic_types { MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV6_TCP, @@ -675,34 +727,17 @@ enum { MLX5E_NIC_PRIO }; -struct mlx5e_profile { - void (*init)(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, void *ppriv); - void (*cleanup)(struct mlx5e_priv *priv); - int (*init_rx)(struct mlx5e_priv *priv); - void (*cleanup_rx)(struct mlx5e_priv *priv); - int (*init_tx)(struct mlx5e_priv *priv); - void (*cleanup_tx)(struct mlx5e_priv *priv); - void (*enable)(struct mlx5e_priv *priv); - void (*disable)(struct mlx5e_priv *priv); - void (*update_stats)(struct mlx5e_priv *priv); - int (*max_nch)(struct mlx5_core_dev *mdev); - int max_tc; -}; - struct mlx5e_priv { /* priv data path fields - start */ - struct mlx5e_sq **txq_to_sq_map; - int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; - struct bpf_prog *xdp_prog; + struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; + int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; /* priv data path fields - end */ unsigned long state; struct mutex state_lock; /* Protects Interface state */ struct mlx5e_rq drop_rq; - struct mlx5e_channel **channel; + struct mlx5e_channels channels; u32 tisn[MLX5E_MAX_NUM_TC]; struct mlx5e_rqt indir_rqt; struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; @@ -712,7 +747,6 @@ struct mlx5e_priv { struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; - struct mlx5e_params params; struct workqueue_struct *wq; struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; @@ -732,9 +766,28 @@ struct mlx5e_priv { void *ppriv; }; +struct mlx5e_profile { + void (*init)(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, void *ppriv); + void (*cleanup)(struct mlx5e_priv *priv); + int (*init_rx)(struct mlx5e_priv *priv); + void (*cleanup_rx)(struct mlx5e_priv *priv); + int (*init_tx)(struct mlx5e_priv *priv); + void (*cleanup_tx)(struct mlx5e_priv *priv); + void (*enable)(struct mlx5e_priv *priv); + void (*disable)(struct mlx5e_priv *priv); + void (*update_stats)(struct mlx5e_priv *priv); + int (*max_nch)(struct mlx5_core_dev *mdev); + struct { + mlx5e_fp_handle_rx_cqe handle_rx_cqe; + mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; + } rx_handlers; + int max_tc; +}; + void mlx5e_build_ptys2ethtool_map(void); -void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); @@ -744,7 +797,9 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); -void mlx5e_free_sq_descs(struct mlx5e_sq *sq); +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); +void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); @@ -792,7 +847,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); -void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); @@ -801,14 +856,40 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); -int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); +struct mlx5e_redirect_rqt_param { + bool is_rss; + union { + u32 rqn; /* Direct RQN (Non-RSS) */ + struct { + u8 hfunc; + struct mlx5e_channels *channels; + } rss; /* RSS data */ + }; +}; -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, - enum mlx5e_traffic_types tt); +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, + struct mlx5e_redirect_rqt_param rrp); +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, + enum mlx5e_traffic_types tt, + void *tirc); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); + +int mlx5e_open_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *chs); +void mlx5e_close_channels(struct mlx5e_channels *chs); + +/* Function pointer to be used to modify WH settings while + * switching channels + */ +typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify); +void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); +void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); + void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, u32 *indirection_rqt, int len, int num_channels); @@ -816,30 +897,43 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type); +void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u8 rq_type); -static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, - struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) +static inline +struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { - u16 ofst = sq->bf_offset; + u16 pi = *pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + + memset(cseg, 0, sizeof(*cseg)); + + cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP); + cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01); + (*pc)++; + + return wqe; +} + +static inline +void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, + void __iomem *uar_map, + struct mlx5_wqe_ctrl_seg *ctrl) +{ + ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); - *sq->wq.db = cpu_to_be32(sq->pc); + *wq->db = cpu_to_be32(pc); /* ensure doorbell record is visible to device before ringing the * doorbell */ wmb(); - if (bf_sz) - __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); - else - mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); - /* flush the write-combining mapped buffer */ - wmb(); - sq->bf_offset ^= sq->bf_buf_size; + mlx5_write64((__be32 *)ctrl, uar_map, NULL); } static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) @@ -895,44 +989,43 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); -int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, - bool enable_uc_lb); - -struct mlx5_eswitch_rep; -int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); -void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); -int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep); -void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); -int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); -void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); -int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv); +int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); + +/* common netdev helpers */ +int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); + +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); -void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); +void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv); int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); +void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); + +int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); + +int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, + u32 underlay_qpn, u32 *tisn); +void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); + int mlx5e_create_tises(struct mlx5e_priv *priv); void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); void mlx5e_update_stats_work(struct work_struct *work); -struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, - const struct mlx5e_profile *profile, - void *ppriv); -void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); -int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); -void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); -int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, - void *sp); -bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id); +/* mlx5e generic netdev management API */ +struct net_device* +mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, + void *ppriv); +int mlx5e_attach_netdev(struct mlx5e_priv *priv); +void mlx5e_detach_netdev(struct mlx5e_priv *priv); +void mlx5e_destroy_netdev(struct mlx5e_priv *priv); +void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + u16 max_channels); -bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); -bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 68419a01db36e33765b1cc366455da8b55420da7..c8a005326e308561e1b4a7cb59794f5231219c91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; - struct mlx5_flow_act flow_act = { - .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, - .encap_id = 0, - }; - struct mlx5_flow_destination dest; struct mlx5e_tir *tir = priv->indir_tir; + struct mlx5_flow_destination dest; + MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; int err = 0; @@ -325,10 +321,16 @@ static int arfs_create_table(struct mlx5e_priv *priv, { struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; + struct mlx5_flow_table_attr ft_attr = {}; int err; - ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0); + ft->num_groups = 0; + + ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE; + ft_attr.level = MLX5E_ARFS_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -469,15 +471,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, struct arfs_rule *arfs_rule) { - struct mlx5_flow_act flow_act = { - .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, - .encap_id = 0, - }; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_tuple *tuple = &arfs_rule->tuple; struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_destination dest; + MLX5_DECLARE_FLOW_ACT(flow_act); struct arfs_table *arfs_table; struct mlx5_flow_spec *spec; struct mlx5_flow_table *ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 37e66eef6fb5ea62576e0a8b012b04e6ba579d56..e706a87fc8b2b06b8f620a96f66d316e90070e04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -90,6 +90,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) { struct mlx5e_priv *priv = netdev_priv(dev); struct hwtstamp_config config; + int err; if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) return -EOPNOTSUPP; @@ -111,7 +112,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def); + mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -129,7 +130,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ netdev_warn(dev, "Disabling cqe compression"); - mlx5e_modify_rx_cqe_compression_locked(priv, false); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + if (err) { + netdev_err(dev, "Failed disabling cqe compression err=%d\n", err); + mutex_unlock(&priv->state_lock); + return err; + } config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index bd898d8deda0ce0c4d6dca7f1ac26722eacf96c4..f1f17f7a3cd049de412abdff58b00f314160be22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -107,10 +107,18 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) goto err_dealloc_transport_domain; } + err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false); + if (err) { + mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err); + goto err_destroy_mkey; + } + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); return 0; +err_destroy_mkey: + mlx5_core_destroy_mkey(mdev, &res->mkey); err_dealloc_transport_domain: mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); err_dealloc_pd: @@ -122,23 +130,26 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) { struct mlx5e_resources *res = &mdev->mlx5e_res; + mlx5_free_bfreg(mdev, &res->bfreg); mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); } -int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, - bool enable_uc_lb) +int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_tir *tir; - void *in; + int err = -ENOMEM; + u32 tirn = 0; int inlen; - int err = 0; + void *in; + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); if (!in) - return -ENOMEM; + goto out; if (enable_uc_lb) MLX5_SET(modify_tir_in, in, ctx.self_lb_block, @@ -147,13 +158,16 @@ int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { - err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); + tirn = tir->tirn; + err = mlx5_core_modify_tir(mdev, tirn, in, inlen); if (err) goto out; } out: kvfree(in); + if (err) + netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a004a5a1a4c22a742ef3f9939769c6b5c9445f46..ce7b09d72ff68b1850f6df3845f2fd9ae7f57bbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -42,8 +42,9 @@ static void mlx5e_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")", sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d", - fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + "%d.%d.%04d (%.16s)", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), + mdev->board_id); strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), sizeof(drvinfo->bus_info)); } @@ -152,12 +153,9 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) } #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) -#define MLX5E_NUM_RQ_STATS(priv) \ - (NUM_RQ_STATS * priv->params.num_channels * \ - test_bit(MLX5E_STATE_OPENED, &priv->state)) +#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num) #define MLX5E_NUM_SQ_STATS(priv) \ - (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ - test_bit(MLX5E_STATE_OPENED, &priv->state)) + (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc) #define MLX5E_NUM_PFC_COUNTERS(priv) \ ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \ NUM_PPORT_PER_PRIO_PFC_COUNTERS) @@ -262,17 +260,17 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) return; /* per channel counters */ - for (i = 0; i < priv->params.num_channels; i++) + for (i = 0; i < priv->channels.num; i++) for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); - for (tc = 0; tc < priv->params.num_tc; tc++) - for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->channels.params.num_tc; tc++) + for (i = 0; i < priv->channels.num; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, - priv->channeltc_to_txq_map[i][tc]); + priv->channel_tc2txq[i][tc]); } static void mlx5e_get_strings(struct net_device *dev, @@ -303,6 +301,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channels *channels; struct mlx5_priv *mlx5_priv; int i, j, tc, prio, idx = 0; unsigned long pfc_combined; @@ -313,6 +312,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_update_stats(priv); + channels = &priv->channels; mutex_unlock(&priv->state_lock); for (i = 0; i < NUM_SW_COUNTERS; i++) @@ -382,16 +382,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, return; /* per channel counters */ - for (i = 0; i < priv->params.num_channels; i++) + for (i = 0; i < channels->num; i++) for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats, + MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, rq_stats_desc, j); - for (tc = 0; tc < priv->params.num_tc; tc++) - for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->channels.params.num_tc; tc++) + for (i = 0; i < channels->num; i++) for (j = 0; j < NUM_SQ_STATS; j++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats, + data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, sq_stats_desc, j); } @@ -406,8 +406,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return num_wqe; - stride_size = 1 << priv->params.mpwqe_log_stride_sz; - num_strides = 1 << priv->params.mpwqe_log_num_strides; + stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; + num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; wqe_size = stride_size * num_strides; packets_per_wqe = wqe_size / @@ -427,8 +427,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) return num_packets; - stride_size = 1 << priv->params.mpwqe_log_stride_sz; - num_strides = 1 << priv->params.mpwqe_log_num_strides; + stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz; + num_strides = 1 << priv->channels.params.mpwqe_log_num_strides; wqe_size = stride_size * num_strides; num_packets = (1 << order_base_2(num_packets)); @@ -443,26 +443,25 @@ static void mlx5e_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); - int rq_wq_type = priv->params.rq_wq_type; + int rq_wq_type = priv->channels.params.rq_wq_type; param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, 1 << mlx5_max_log_rq_size(rq_wq_type)); param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << priv->params.log_rq_size); - param->tx_pending = 1 << priv->params.log_sq_size; + 1 << priv->channels.params.log_rq_size); + param->tx_pending = 1 << priv->channels.params.log_sq_size; } static int mlx5e_set_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); - bool was_opened; - int rq_wq_type = priv->params.rq_wq_type; + int rq_wq_type = priv->channels.params.rq_wq_type; + struct mlx5e_channels new_channels = {}; u32 rx_pending_wqes; u32 min_rq_size; u32 max_rq_size; - u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; u32 num_mtts; @@ -500,7 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, } num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); - if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && !MLX5E_VALID_NUM_MTTS(num_mtts)) { netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", __func__, param->rx_pending); @@ -522,26 +521,29 @@ static int mlx5e_set_ringparam(struct net_device *dev, log_rq_size = order_base_2(rx_pending_wqes); log_sq_size = order_base_2(param->tx_pending); - min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes); - if (log_rq_size == priv->params.log_rq_size && - log_sq_size == priv->params.log_sq_size && - min_rx_wqes == priv->params.min_rx_wqes) + if (log_rq_size == priv->channels.params.log_rq_size && + log_sq_size == priv->channels.params.log_sq_size) return 0; mutex_lock(&priv->state_lock); - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(dev); + new_channels.params = priv->channels.params; + new_channels.params.log_rq_size = log_rq_size; + new_channels.params.log_sq_size = log_sq_size; - priv->params.log_rq_size = log_rq_size; - priv->params.log_sq_size = log_sq_size; - priv->params.min_rx_wqes = min_rx_wqes; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto unlock; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + goto unlock; - if (was_opened) - err = mlx5e_open_locked(dev); + mlx5e_switch_priv_channels(priv, &new_channels, NULL); +unlock: mutex_unlock(&priv->state_lock); return err; @@ -553,7 +555,7 @@ static void mlx5e_get_channels(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); ch->max_combined = priv->profile->max_nch(priv->mdev); - ch->combined_count = priv->params.num_channels; + ch->combined_count = priv->channels.params.num_channels; } static int mlx5e_set_channels(struct net_device *dev, @@ -561,8 +563,8 @@ static int mlx5e_set_channels(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); unsigned int count = ch->combined_count; + struct mlx5e_channels new_channels = {}; bool arfs_enabled; - bool was_opened; int err = 0; if (!count) { @@ -571,27 +573,32 @@ static int mlx5e_set_channels(struct net_device *dev, return -EINVAL; } - if (priv->params.num_channels == count) + if (priv->channels.params.num_channels == count) return 0; mutex_lock(&priv->state_lock); - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(dev); + new_channels.params = priv->channels.params; + new_channels.params.num_channels = count; + mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } + + /* Create fresh channels with new parameters */ + err = mlx5e_open_channels(priv, &new_channels); + if (err) + goto out; arfs_enabled = dev->features & NETIF_F_NTUPLE; if (arfs_enabled) mlx5e_arfs_disable(priv); - priv->params.num_channels = count; - mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); - - if (was_opened) - err = mlx5e_open_locked(dev); - if (err) - goto out; + /* Switch to new channels, set new parameters and close old ones */ + mlx5e_switch_priv_channels(priv, &new_channels, NULL); if (arfs_enabled) { err = mlx5e_arfs_enable(priv); @@ -614,49 +621,24 @@ static int mlx5e_get_coalesce(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -EOPNOTSUPP; - coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; - coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled; + coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled; return 0; } -static int mlx5e_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) +static void +mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_channel *c; - bool restart = - !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; - bool was_opened; - int err = 0; int tc; int i; - if (!MLX5_CAP_GEN(mdev, cq_moderation)) - return -EOPNOTSUPP; - - mutex_lock(&priv->state_lock); - - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened && restart) { - mlx5e_close_locked(netdev); - priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; - } - - priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; - - if (!was_opened || restart) - goto out; - - for (i = 0; i < priv->params.num_channels; ++i) { - c = priv->channel[i]; + for (i = 0; i < priv->channels.num; ++i) { + struct mlx5e_channel *c = priv->channels.c[i]; for (tc = 0; tc < c->num_tc; tc++) { mlx5_core_modify_cq_moderation(mdev, @@ -669,11 +651,50 @@ static int mlx5e_set_coalesce(struct net_device *netdev, coal->rx_coalesce_usecs, coal->rx_max_coalesced_frames); } +} -out: - if (was_opened && restart) - err = mlx5e_open_locked(netdev); +static int mlx5e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err = 0; + bool reset; + + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -EOPNOTSUPP; + + mutex_lock(&priv->state_lock); + new_channels.params = priv->channels.params; + + new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } + /* we are opened */ + + reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled; + if (!reset) { + mlx5e_set_priv_channels_coalesce(priv, coal); + priv->channels.params = new_channels.params; + goto out; + } + + /* open fresh channels with new coal parameters */ + err = mlx5e_open_channels(priv, &new_channels); + if (err) + goto out; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); +out: mutex_unlock(&priv->state_lock); return err; } @@ -968,7 +989,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - return sizeof(priv->params.toeplitz_hash_key); + return sizeof(priv->channels.params.toeplitz_hash_key); } static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) @@ -982,15 +1003,15 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, struct mlx5e_priv *priv = netdev_priv(netdev); if (indir) - memcpy(indir, priv->params.indirection_rqt, - sizeof(priv->params.indirection_rqt)); + memcpy(indir, priv->channels.params.indirection_rqt, + sizeof(priv->channels.params.indirection_rqt)); if (key) - memcpy(key, priv->params.toeplitz_hash_key, - sizeof(priv->params.toeplitz_hash_key)); + memcpy(key, priv->channels.params.toeplitz_hash_key, + sizeof(priv->channels.params.toeplitz_hash_key)); if (hfunc) - *hfunc = priv->params.rss_hfunc; + *hfunc = priv->channels.params.rss_hfunc; return 0; } @@ -1006,7 +1027,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(tirc, 0, ctxlen); - mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); } } @@ -1030,25 +1051,37 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); - if (indir) { - u32 rqtn = priv->indir_rqt.rqtn; - - memcpy(priv->params.indirection_rqt, indir, - sizeof(priv->params.indirection_rqt)); - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); - } - if (hfunc != ETH_RSS_HASH_NO_CHANGE && - hfunc != priv->params.rss_hfunc) { - priv->params.rss_hfunc = hfunc; + hfunc != priv->channels.params.rss_hfunc) { + priv->channels.params.rss_hfunc = hfunc; hash_changed = true; } + if (indir) { + memcpy(priv->channels.params.indirection_rqt, indir, + sizeof(priv->channels.params.indirection_rqt)); + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + u32 rqtn = priv->indir_rqt.rqtn; + struct mlx5e_redirect_rqt_param rrp = { + .is_rss = true, + { + .rss = { + .hfunc = priv->channels.params.rss_hfunc, + .channels = &priv->channels, + }, + }, + }; + + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); + } + } + if (key) { - memcpy(priv->params.toeplitz_hash_key, key, - sizeof(priv->params.toeplitz_hash_key)); + memcpy(priv->channels.params.toeplitz_hash_key, key, + sizeof(priv->channels.params.toeplitz_hash_key)); hash_changed = hash_changed || - priv->params.rss_hfunc == ETH_RSS_HASH_TOP; + priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP; } if (hash_changed) @@ -1069,7 +1102,7 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, switch (info->cmd) { case ETHTOOL_GRXRINGS: - info->data = priv->params.num_channels; + info->data = priv->channels.params.num_channels; break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = priv->fs.ethtool.tot_num_rules; @@ -1097,7 +1130,7 @@ static int mlx5e_get_tunable(struct net_device *dev, switch (tuna->id) { case ETHTOOL_TX_COPYBREAK: - *(u32 *)data = priv->params.tx_max_inline; + *(u32 *)data = priv->channels.params.tx_max_inline; break; default: err = -EINVAL; @@ -1113,9 +1146,11 @@ static int mlx5e_set_tunable(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - bool was_opened; - u32 val; + struct mlx5e_channels new_channels = {}; int err = 0; + u32 val; + + mutex_lock(&priv->state_lock); switch (tuna->id) { case ETHTOOL_TX_COPYBREAK: @@ -1125,24 +1160,26 @@ static int mlx5e_set_tunable(struct net_device *dev, break; } - mutex_lock(&priv->state_lock); + new_channels.params = priv->channels.params; + new_channels.params.tx_max_inline = val; - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(dev); - - priv->params.tx_max_inline = val; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + break; + } - if (was_opened) - err = mlx5e_open_locked(dev); + err = mlx5e_open_channels(priv, &new_channels); + if (err) + break; + mlx5e_switch_priv_channels(priv, &new_channels, NULL); - mutex_unlock(&priv->state_lock); break; default: err = -EINVAL; break; } + mutex_unlock(&priv->state_lock); return err; } @@ -1442,15 +1479,15 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; bool rx_mode_changed; u8 rx_cq_period_mode; int err = 0; - bool reset; rx_cq_period_mode = enable ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode; if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) @@ -1459,16 +1496,51 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) if (!rx_mode_changed) return 0; - reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (reset) - mlx5e_close_locked(netdev); + new_channels.params = priv->channels.params; + mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode); - mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } - if (reset) - err = mlx5e_open_locked(netdev); + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; - return err; + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; +} + +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) +{ + bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); + struct mlx5e_channels new_channels = {}; + int err = 0; + + if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) + return new_val ? -EOPNOTSUPP : 0; + + if (curr_val == new_val) + return 0; + + new_channels.params = priv->channels.params; + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); + + mlx5e_set_rq_type_params(priv->mdev, &new_channels.params, + new_channels.params.rq_wq_type); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; } static int set_pflag_rx_cqe_compress(struct net_device *netdev, @@ -1486,8 +1558,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, } mlx5e_modify_rx_cqe_compression_locked(priv, enable); - priv->params.rx_cqe_compress_def = enable; - mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type); + priv->channels.params.rx_cqe_compress_def = enable; return 0; } @@ -1499,7 +1570,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); bool enable = !!(wanted_flags & flag); - u32 changes = wanted_flags ^ priv->params.pflags; + u32 changes = wanted_flags ^ priv->channels.params.pflags; int err; if (!(changes & flag)) @@ -1512,7 +1583,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev, return err; } - MLX5E_SET_PFLAG(priv, flag, enable); + MLX5E_SET_PFLAG(&priv->channels.params, flag, enable); return 0; } @@ -1541,7 +1612,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - return priv->params.pflags; + return priv->channels.params.pflags; } static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f2762e45c8ae2aadd5366ea467a5ce3b4edb3d7e..576d6787b484b6387c55a172a65fa838b50e198a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid, struct mlx5_flow_spec *spec) { - struct mlx5_flow_act flow_act = { - .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, - .encap_id = 0, - }; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; struct mlx5_flow_handle **rule_p; + MLX5_DECLARE_FLOW_ACT(flow_act); int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, u16 etype, u8 proto) { - struct mlx5_flow_act flow_act = { - .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, - .encap_id = 0, - }; + MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; int err = 0; @@ -800,7 +792,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) return err; } -static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) { struct mlx5e_ttc_table *ttc = &priv->fs.ttc; @@ -808,14 +800,19 @@ static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) mlx5e_destroy_flow_table(&ttc->ft); } -static int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) { struct mlx5e_ttc_table *ttc = &priv->fs.ttc; + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_flow_table *ft = &ttc->ft; int err; - ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0); + ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; + ft_attr.level = MLX5E_TTC_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.underlay_qpn = underlay_qpn; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -848,13 +845,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type) { - struct mlx5_flow_act flow_act = { - .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, - .encap_id = 0, - }; struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_destination dest; + MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; int err = 0; u8 *mc_dmac; @@ -985,12 +978,16 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv) { struct mlx5e_l2_table *l2_table = &priv->fs.l2; struct mlx5e_flow_table *ft = &l2_table->ft; + struct mlx5_flow_table_attr ft_attr = {}; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0); + ft_attr.max_fte = MLX5E_L2_TABLE_SIZE; + ft_attr.level = MLX5E_L2_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -1088,11 +1085,16 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) { struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; + struct mlx5_flow_table_attr ft_attr = {}; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0); + + ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; + ft_attr.level = MLX5E_VLAN_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1145,7 +1147,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - err = mlx5e_create_ttc_table(priv); + err = mlx5e_create_ttc_table(priv, 0); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 26fc77e80f7b38d45e52911233d5678da6334eb1..85bf4a38929529e55f724b929e0a0bfca8ccc7bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -390,7 +390,7 @@ static int validate_flow(struct mlx5e_priv *priv, if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES) return -EINVAL; - if (fs->ring_cookie >= priv->params.num_channels && + if (fs->ring_cookie >= priv->channels.params.num_channels && fs->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 15cc7b469d2ed9c066ee11a94a17a8ea5c9709f5..a61b71b6fff30358e43602b1f84e16c697e3140f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -31,28 +31,24 @@ */ #include -#include #include #include #include #include +#include "eswitch.h" #include "en.h" #include "en_tc.h" -#include "eswitch.h" +#include "en_rep.h" #include "vxlan.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; - bool am_enabled; }; struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; - u16 max_inline; - u8 min_inline_mode; - enum mlx5e_sq_type type; }; struct mlx5e_cq_param { @@ -79,49 +75,47 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, reg_umr_sq); } -void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) +void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u8 rq_type) { - priv->params.rq_wq_type = rq_type; - priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; - switch (priv->params.rq_wq_type) { + params->rq_wq_type = rq_type; + params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - priv->params.log_rq_size = is_kdump_kernel() ? + params->log_rq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - priv->params.mpwqe_log_stride_sz = - MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? - MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) : - MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev); - priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - - priv->params.mpwqe_log_stride_sz; + params->mpwqe_log_stride_sz = + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? + MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : + MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); + params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - + params->mpwqe_log_stride_sz; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - priv->params.log_rq_size = is_kdump_kernel() ? + params->log_rq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; /* Extra room needed for build_skb */ - priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM + + params->lro_wqe_sz -= MLX5_RX_HEADROOM + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } - priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, - BIT(priv->params.log_rq_size)); - mlx5_core_info(priv->mdev, - "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", - priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, - BIT(priv->params.log_rq_size), - BIT(priv->params.mpwqe_log_stride_sz), - MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)); + mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, + BIT(params->log_rq_size), + BIT(params->mpwqe_log_stride_sz), + MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); } -static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv) +static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) && - !priv->xdp_prog ? + u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) && + !params->xdp_prog ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : MLX5_WQ_TYPE_LINKED_LIST; - mlx5e_set_rq_type_params(priv, rq_type); + mlx5e_set_rq_type_params(mdev, params, rq_type); } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -181,8 +175,10 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) int i, j; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->params.num_channels; i++) { - rq_stats = &priv->channel[i]->rq.stats; + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_channel *c = priv->channels.c[i]; + + rq_stats = &c->rq.stats; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; @@ -204,8 +200,8 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; - for (j = 0; j < priv->params.num_tc; j++) { - sq_stats = &priv->channel[i]->sq[j].stats; + for (j = 0; j < priv->channels.params.num_tc; j++) { + sq_stats = &c->sq[j].stats; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -402,8 +398,10 @@ static inline int mlx5e_get_wqe_mtt_sz(void) MLX5_UMR_MTT_ALIGNMENT); } -static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq, - struct mlx5e_umr_wqe *wqe, u16 ix) +static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, + struct mlx5e_icosq *sq, + struct mlx5e_umr_wqe *wqe, + u16 ix) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; @@ -493,11 +491,10 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) kfree(rq->mpwqe.info); } -static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv, +static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, u64 npages, u8 page_shift, struct mlx5_core_mkey *umr_mkey) { - struct mlx5_core_dev *mdev = priv->mdev; int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; u32 *in; @@ -531,21 +528,20 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv, return err; } -static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq) +static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - struct mlx5e_priv *priv = rq->priv; - u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size)); + u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq)); - return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey); + return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } -static int mlx5e_create_rq(struct mlx5e_channel *c, - struct mlx5e_rq_param *param, - struct mlx5e_rq *rq) +static int mlx5e_alloc_rq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_rq_param *rqp, + struct mlx5e_rq *rq) { - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - void *rqc = param->rqc; + struct mlx5_core_dev *mdev = c->mdev; + void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); u32 byte_count; u32 frag_sz; @@ -554,9 +550,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, int err; int i; - param->wq.db_numa_node = cpu_to_node(c->cpu); + rqp->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); if (err) return err; @@ -565,15 +561,15 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->wq_type = priv->params.rq_wq_type; + rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; - rq->tstamp = &priv->tstamp; + rq->tstamp = c->tstamp; rq->channel = c; rq->ix = c->ix; - rq->priv = c->priv; + rq->mdev = mdev; - rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL; + rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { err = PTR_ERR(rq->xdp_prog); rq->xdp_prog = NULL; @@ -588,24 +584,26 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->rx_headroom = MLX5_RX_HEADROOM; } - switch (priv->params.rq_wq_type) { + switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - if (mlx5e_is_vf_vport_rep(priv)) { - err = -EINVAL; - goto err_rq_wq_destroy; - } - rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); - rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); + rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; + if (!rq->handle_rx_cqe) { + err = -EINVAL; + netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); + goto err_rq_wq_destroy; + } + + rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz); + rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides); rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; byte_count = rq->buff.wqe_sz; - err = mlx5e_create_rq_umr_mkey(rq); + err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_wq_destroy; rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); @@ -621,18 +619,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, err = -ENOMEM; goto err_rq_wq_destroy; } - - if (mlx5e_is_vf_vport_rep(priv)) - rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep; - else - rq->handle_rx_cqe = mlx5e_handle_rx_cqe; - rq->alloc_wqe = mlx5e_alloc_rx_wqe; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; - rq->buff.wqe_sz = (priv->params.lro_en) ? - priv->params.lro_wqe_sz : - MLX5E_SW2HW_MTU(priv->netdev->mtu); + rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; + if (!rq->handle_rx_cqe) { + kfree(rq->dma_info); + err = -EINVAL; + netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); + goto err_rq_wq_destroy; + } + + rq->buff.wqe_sz = params->lro_en ? + params->lro_wqe_sz : + MLX5E_SW2HW_MTU(c->netdev->mtu); byte_count = rq->buff.wqe_sz; /* calc the required page order */ @@ -656,8 +656,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, } INIT_WORK(&rq->am.work, mlx5e_rx_am_work); - rq->am.mode = priv->params.rx_cq_period_mode; - + rq->am.mode = params->rx_cq_period_mode; rq->page_cache.head = 0; rq->page_cache.tail = 0; @@ -674,7 +673,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, return err; } -static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +static void mlx5e_free_rq(struct mlx5e_rq *rq) { int i; @@ -684,7 +683,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: mlx5e_rq_free_mpwqe_info(rq); - mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ kfree(rq->dma_info); @@ -699,10 +698,10 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) mlx5_wq_destroy(&rq->wq_ctrl); } -static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) +static int mlx5e_create_rq(struct mlx5e_rq *rq, + struct mlx5e_rq_param *param) { - struct mlx5e_priv *priv = rq->priv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; @@ -723,7 +722,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); - MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); @@ -742,8 +740,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *mdev = c->mdev; void *in; void *rqc; @@ -767,7 +764,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, return err; } -static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) +static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; @@ -785,6 +782,35 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); + MLX5_SET64(modify_rq_in, in, modify_bitmask, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS); + MLX5_SET(rqc, rqc, scatter_fcs, enable); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); + + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5_core_dev *mdev = c->mdev; + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); @@ -798,25 +824,28 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) return err; } -static void mlx5e_disable_rq(struct mlx5e_rq *rq) +static void mlx5e_destroy_rq(struct mlx5e_rq *rq) { - mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn); + mlx5_core_destroy_rq(rq->mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) { unsigned long exp_time = jiffies + msecs_to_jiffies(20000); struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5_wq_ll *wq = &rq->wq; + u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq)); while (time_before(jiffies, exp_time)) { - if (wq->cur_sz >= priv->params.min_rx_wqes) + if (wq->cur_sz >= min_wqes) return 0; msleep(20); } + netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + rq->rqn, wq->cur_sz, min_wqes); return -ETIMEDOUT; } @@ -842,83 +871,128 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) } static int mlx5e_open_rq(struct mlx5e_channel *c, + struct mlx5e_params *params, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { - struct mlx5e_sq *sq = &c->icosq; - u16 pi = sq->pc & sq->wq.sz_m1; int err; - err = mlx5e_create_rq(c, param, rq); + err = mlx5e_alloc_rq(c, params, param, rq); if (err) return err; - err = mlx5e_enable_rq(rq, param); + err = mlx5e_create_rq(rq, param); if (err) - goto err_destroy_rq; + goto err_free_rq; - set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) - goto err_disable_rq; + goto err_destroy_rq; - if (param->am_enabled) + if (params->rx_am_enabled) set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - sq->db.ico_wqe[pi].num_wqebbs = 1; - mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ - return 0; -err_disable_rq: - clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - mlx5e_disable_rq(rq); err_destroy_rq: mlx5e_destroy_rq(rq); +err_free_rq: + mlx5e_free_rq(rq); return err; } -static void mlx5e_close_rq(struct mlx5e_rq *rq) +static void mlx5e_activate_rq(struct mlx5e_rq *rq) +{ + struct mlx5e_icosq *sq = &rq->channel->icosq; + u16 pi = sq->pc & sq->wq.sz_m1; + struct mlx5e_tx_wqe *nopwqe; + + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + sq->db.ico_wqe[pi].num_wqebbs = 1; + nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); +} + +static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) { clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - cancel_work_sync(&rq->am.work); +} - mlx5e_disable_rq(rq); - mlx5e_free_rx_descs(rq); +static void mlx5e_close_rq(struct mlx5e_rq *rq) +{ + cancel_work_sync(&rq->am.work); mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); + mlx5e_free_rq(rq); } -static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq) +static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kfree(sq->db.xdp.di); - kfree(sq->db.xdp.wqe_info); + kfree(sq->db.di); } -static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa) +static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz, + sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz, GFP_KERNEL, numa); - sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz, - GFP_KERNEL, numa); - if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) { - mlx5e_free_sq_xdp_db(sq); + if (!sq->db.di) { + mlx5e_free_xdpsq_db(sq); return -ENOMEM; } return 0; } -static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq) +static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_xdpsq *sq) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); + struct mlx5_core_dev *mdev = c->mdev; + int err; + + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->min_inline_mode = params->tx_min_inline_mode; + + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + if (err) + return err; + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + + err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); + if (err) + goto err_sq_wq_destroy; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + + return err; +} + +static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) +{ + mlx5e_free_xdpsq_db(sq); + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) { kfree(sq->db.ico_wqe); } -static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa) +static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); @@ -930,155 +1004,128 @@ static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa) return 0; } -static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq) +static int mlx5e_alloc_icosq(struct mlx5e_channel *c, + struct mlx5e_sq_param *param, + struct mlx5e_icosq *sq) { - kfree(sq->db.txq.wqe_info); - kfree(sq->db.txq.dma_fifo); - kfree(sq->db.txq.skb); -} + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); + struct mlx5_core_dev *mdev = c->mdev; + int err; -static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa) -{ - int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->uar_map = mdev->mlx5e_res.bfreg.map; - sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb), - GFP_KERNEL, numa); - sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo), - GFP_KERNEL, numa); - sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info), - GFP_KERNEL, numa); - if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) { - mlx5e_free_sq_txq_db(sq); - return -ENOMEM; - } + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + if (err) + return err; + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - sq->dma_fifo_mask = df_sz - 1; + err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); + if (err) + goto err_sq_wq_destroy; + + sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS; return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + + return err; } -static void mlx5e_free_sq_db(struct mlx5e_sq *sq) +static void mlx5e_free_icosq(struct mlx5e_icosq *sq) { - switch (sq->type) { - case MLX5E_SQ_TXQ: - mlx5e_free_sq_txq_db(sq); - break; - case MLX5E_SQ_ICO: - mlx5e_free_sq_ico_db(sq); - break; - case MLX5E_SQ_XDP: - mlx5e_free_sq_xdp_db(sq); - break; - } + mlx5e_free_icosq_db(sq); + mlx5_wq_destroy(&sq->wq_ctrl); } -static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) +static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { - switch (sq->type) { - case MLX5E_SQ_TXQ: - return mlx5e_alloc_sq_txq_db(sq, numa); - case MLX5E_SQ_ICO: - return mlx5e_alloc_sq_ico_db(sq, numa); - case MLX5E_SQ_XDP: - return mlx5e_alloc_sq_xdp_db(sq, numa); - } - - return 0; + kfree(sq->db.wqe_info); + kfree(sq->db.dma_fifo); } -static int mlx5e_sq_get_max_wqebbs(u8 sq_type) +static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) { - switch (sq_type) { - case MLX5E_SQ_ICO: - return MLX5E_ICOSQ_MAX_WQEBBS; - case MLX5E_SQ_XDP: - return MLX5E_XDP_TX_WQEBBS; + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + + sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo), + GFP_KERNEL, numa); + sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info), + GFP_KERNEL, numa); + if (!sq->db.dma_fifo || !sq->db.wqe_info) { + mlx5e_free_txqsq_db(sq); + return -ENOMEM; } - return MLX5_SEND_WQE_MAX_WQEBBS; + + sq->dma_fifo_mask = df_sz - 1; + + return 0; } -static int mlx5e_create_sq(struct mlx5e_channel *c, - int tc, - struct mlx5e_sq_param *param, - struct mlx5e_sq *sq) +static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, + int txq_ix, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - - void *sqc = param->sqc; - void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); + struct mlx5_core_dev *mdev = c->mdev; int err; - sq->type = param->type; sq->pdev = c->pdev; - sq->tstamp = &priv->tstamp; + sq->tstamp = c->tstamp; sq->mkey_be = c->mkey_be; sq->channel = c; - sq->tc = tc; - - err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false); - if (err) - return err; + sq->txq_ix = txq_ix; + sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->max_inline = params->tx_max_inline; + sq->min_inline_mode = params->tx_min_inline_mode; - sq->uar_map = sq->bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); - - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, - &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) - goto err_unmap_free_uar; - - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - if (sq->bfreg.wc) - set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); - - sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; - sq->max_inline = param->max_inline; - sq->min_inline_mode = param->min_inline_mode; + return err; + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); + err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - if (sq->type == MLX5E_SQ_TXQ) { - int txq_ix; - - txq_ix = c->ix + tc * priv->params.num_channels; - sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); - priv->txq_to_sq_map[txq_ix] = sq; - } - - sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type); - sq->bf_budget = MLX5E_SQ_BF_BUDGET; + sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; return 0; err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); -err_unmap_free_uar: - mlx5_free_bfreg(mdev, &sq->bfreg); - return err; } -static void mlx5e_destroy_sq(struct mlx5e_sq *sq) +static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) { - struct mlx5e_channel *c = sq->channel; - struct mlx5e_priv *priv = c->priv; - - mlx5e_free_sq_db(sq); + mlx5e_free_txqsq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); - mlx5_free_bfreg(priv->mdev, &sq->bfreg); } -static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) -{ - struct mlx5e_channel *c = sq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; +struct mlx5e_create_sq_param { + struct mlx5_wq_ctrl *wq_ctrl; + u32 cqn; + u32 tisn; + u8 tis_lst_sz; + u8 min_inline_mode; +}; +static int mlx5e_create_sq(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn) +{ void *in; void *sqc; void *wq; @@ -1086,7 +1133,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) int err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + - sizeof(u64) * sq->wq_ctrl.buf.npages; + sizeof(u64) * csp->wq_ctrl->buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; @@ -1095,40 +1142,40 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) wq = MLX5_ADDR_OF(sqc, sqc, wq); memcpy(sqc, param->sqc, sizeof(param->sqc)); - - MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ? - 0 : priv->tisn[sq->tc]); - MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); + MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); + MLX5_SET(sqc, sqc, cqn, csp->cqn); if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) - MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); + MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); - MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); - MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); - MLX5_SET(wq, wq, uar_page, sq->bfreg.index); - MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); - mlx5_fill_page_array(&sq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); - err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + err = mlx5_core_create_sq(mdev, in, inlen, sqn); kvfree(in); return err; } -static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, - int next_state, bool update_rl, int rl_index) -{ - struct mlx5e_channel *c = sq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; +struct mlx5e_modify_sq_param { + int curr_state; + int next_state; + bool rl_update; + int rl_index; +}; +static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p) +{ void *in; void *sqc; int inlen; @@ -1141,68 +1188,94 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); - MLX5_SET(modify_sq_in, in, sq_state, curr_state); - MLX5_SET(sqc, sqc, state, next_state); - if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET(modify_sq_in, in, sq_state, p->curr_state); + MLX5_SET(sqc, sqc, state, p->next_state); + if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) { MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); - MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); } - err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); + err = mlx5_core_modify_sq(mdev, sqn, in, inlen); kvfree(in); return err; } -static void mlx5e_disable_sq(struct mlx5e_sq *sq) +static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) { - struct mlx5e_channel *c = sq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - - mlx5_core_destroy_sq(mdev, sq->sqn); - if (sq->rate_limit) - mlx5_rl_remove_rate(mdev, sq->rate_limit); + mlx5_core_destroy_sq(mdev, sqn); } -static int mlx5e_open_sq(struct mlx5e_channel *c, - int tc, - struct mlx5e_sq_param *param, - struct mlx5e_sq *sq) +static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn) { + struct mlx5e_modify_sq_param msp = {0}; int err; - err = mlx5e_create_sq(c, tc, param, sq); + err = mlx5e_create_sq(mdev, param, csp, sqn); if (err) return err; - err = mlx5e_enable_sq(sq, param); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + err = mlx5e_modify_sq(mdev, *sqn, &msp); if (err) - goto err_destroy_sq; + mlx5e_destroy_sq(mdev, *sqn); - set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, - false, 0); + return err; +} + +static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_txqsq *sq, u32 rate); + +static int mlx5e_open_txqsq(struct mlx5e_channel *c, + u32 tisn, + int txq_ix, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq) +{ + struct mlx5e_create_sq_param csp = {}; + u32 tx_rate; + int err; + + err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq); if (err) - goto err_disable_sq; + return err; - if (sq->txq) { - netdev_tx_reset_queue(sq->txq); - netif_tx_start_queue(sq->txq); - } + csp.tisn = tisn; + csp.tis_lst_sz = 1; + csp.cqn = sq->cq.mcq.cqn; + csp.wq_ctrl = &sq->wq_ctrl; + csp.min_inline_mode = sq->min_inline_mode; + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + if (err) + goto err_free_txqsq; + + tx_rate = c->priv->tx_rates[sq->txq_ix]; + if (tx_rate) + mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); return 0; -err_disable_sq: +err_free_txqsq: clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - mlx5e_disable_sq(sq); -err_destroy_sq: - mlx5e_destroy_sq(sq); + mlx5e_free_txqsq(sq); return err; } +static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) +{ + sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); +} + static inline void netif_tx_disable_queue(struct netdev_queue *txq) { __netif_tx_lock_bh(txq); @@ -1210,43 +1283,153 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) __netif_tx_unlock_bh(txq); } -static void mlx5e_close_sq(struct mlx5e_sq *sq) +static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { + struct mlx5e_channel *c = sq->channel; + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ - napi_synchronize(&sq->channel->napi); + napi_synchronize(&c->napi); - if (sq->txq) { - netif_tx_disable_queue(sq->txq); + netif_tx_disable_queue(sq->txq); - /* last doorbell out, godspeed .. */ - if (mlx5e_sq_has_room_for(sq, 1)) { - sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL; - mlx5e_send_nop(sq, true); - } + /* last doorbell out, godspeed .. */ + if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) { + struct mlx5e_tx_wqe *nop; + + sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL; + nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl); } +} + +static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5_core_dev *mdev = c->mdev; - mlx5e_disable_sq(sq); - mlx5e_free_sq_descs(sq); - mlx5e_destroy_sq(sq); + mlx5e_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); + mlx5e_free_txqsq_descs(sq); + mlx5e_free_txqsq(sq); } -static int mlx5e_create_cq(struct mlx5e_channel *c, - struct mlx5e_cq_param *param, - struct mlx5e_cq *cq) +static int mlx5e_open_icosq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_icosq *sq) +{ + struct mlx5e_create_sq_param csp = {}; + int err; + + err = mlx5e_alloc_icosq(c, param, sq); + if (err) + return err; + + csp.cqn = sq->cq.mcq.cqn; + csp.wq_ctrl = &sq->wq_ctrl; + csp.min_inline_mode = params->tx_min_inline_mode; + set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + if (err) + goto err_free_icosq; + + return 0; + +err_free_icosq: + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + mlx5e_free_icosq(sq); + + return err; +} + +static void mlx5e_close_icosq(struct mlx5e_icosq *sq) +{ + struct mlx5e_channel *c = sq->channel; + + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + napi_synchronize(&c->napi); + + mlx5e_destroy_sq(c->mdev, sq->sqn); + mlx5e_free_icosq(sq); +} + +static int mlx5e_open_xdpsq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_xdpsq *sq) +{ + unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; + struct mlx5e_create_sq_param csp = {}; + unsigned int inline_hdr_sz = 0; + int err; + int i; + + err = mlx5e_alloc_xdpsq(c, params, param, sq); + if (err) + return err; + + csp.tis_lst_sz = 1; + csp.tisn = c->priv->tisn[0]; /* tc = 0 */ + csp.cqn = sq->cq.mcq.cqn; + csp.wq_ctrl = &sq->wq_ctrl; + csp.min_inline_mode = sq->min_inline_mode; + set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + if (err) + goto err_free_xdpsq; + + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + inline_hdr_sz = MLX5E_XDP_MIN_INLINE; + ds_cnt++; + } + + /* Pre initialize fixed WQE fields */ + for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); + + dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); + dseg->lkey = sq->mkey_be; + } + + return 0; + +err_free_xdpsq: + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + mlx5e_free_xdpsq(sq); + + return err; +} + +static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) +{ + struct mlx5e_channel *c = sq->channel; + + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + napi_synchronize(&c->napi); + + mlx5e_destroy_sq(c->mdev, sq->sqn); + mlx5e_free_xdpsq_descs(sq); + mlx5e_free_xdpsq(sq); +} + +static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq) { - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; unsigned int irqn; int err; u32 i; - param->wq.buf_numa_node = cpu_to_node(c->cpu); - param->wq.db_numa_node = cpu_to_node(c->cpu); - param->eq_ix = c->ix; - err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) @@ -1254,8 +1437,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - cq->napi = &c->napi; - mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; @@ -1272,21 +1453,38 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, cqe->op_own = 0xf1; } - cq->channel = c; - cq->priv = priv; + cq->mdev = mdev; return 0; } -static void mlx5e_destroy_cq(struct mlx5e_cq *cq) +static int mlx5e_alloc_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq) +{ + struct mlx5_core_dev *mdev = c->priv->mdev; + int err; + + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); + param->eq_ix = c->ix; + + err = mlx5e_alloc_cq_common(mdev, param, cq); + + cq->napi = &c->napi; + cq->channel = c; + + return err; +} + +static void mlx5e_free_cq(struct mlx5e_cq *cq) { mlx5_cqwq_destroy(&cq->wq_ctrl); } -static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) +static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { - struct mlx5e_priv *priv = cq->priv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *mdev = cq->mdev; struct mlx5_core_cq *mcq = &cq->mcq; void *in; @@ -1330,47 +1528,41 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) return 0; } -static void mlx5e_disable_cq(struct mlx5e_cq *cq) +static void mlx5e_destroy_cq(struct mlx5e_cq *cq) { - struct mlx5e_priv *priv = cq->priv; - struct mlx5_core_dev *mdev = priv->mdev; - - mlx5_core_destroy_cq(mdev, &cq->mcq); + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); } static int mlx5e_open_cq(struct mlx5e_channel *c, + struct mlx5e_cq_moder moder, struct mlx5e_cq_param *param, - struct mlx5e_cq *cq, - struct mlx5e_cq_moder moderation) + struct mlx5e_cq *cq) { + struct mlx5_core_dev *mdev = c->mdev; int err; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - err = mlx5e_create_cq(c, param, cq); + err = mlx5e_alloc_cq(c, param, cq); if (err) return err; - err = mlx5e_enable_cq(cq, param); + err = mlx5e_create_cq(cq, param); if (err) - goto err_destroy_cq; + goto err_free_cq; if (MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation.usec, - moderation.pkts); + mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); return 0; -err_destroy_cq: - mlx5e_destroy_cq(cq); +err_free_cq: + mlx5e_free_cq(cq); return err; } static void mlx5e_close_cq(struct mlx5e_cq *cq) { - mlx5e_disable_cq(cq); mlx5e_destroy_cq(cq); + mlx5e_free_cq(cq); } static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) @@ -1379,15 +1571,15 @@ static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) } static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, + struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - struct mlx5e_priv *priv = c->priv; int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { - err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation); + err = mlx5e_open_cq(c, params->tx_cq_moderation, + &cparam->tx_cq, &c->sq[tc].cq); if (err) goto err_close_tx_cqs; } @@ -1410,13 +1602,17 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) } static int mlx5e_open_sqs(struct mlx5e_channel *c, + struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { int err; int tc; - for (tc = 0; tc < c->num_tc; tc++) { - err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); + for (tc = 0; tc < params->num_tc; tc++) { + int txq_ix = c->ix + tc * params->num_channels; + + err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, + params, &cparam->sq, &c->sq[tc]); if (err) goto err_close_sqs; } @@ -1425,7 +1621,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, err_close_sqs: for (tc--; tc >= 0; tc--) - mlx5e_close_sq(&c->sq[tc]); + mlx5e_close_txqsq(&c->sq[tc]); return err; } @@ -1435,23 +1631,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c) int tc; for (tc = 0; tc < c->num_tc; tc++) - mlx5e_close_sq(&c->sq[tc]); -} - -static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix) -{ - int i; - - for (i = 0; i < priv->profile->max_tc; i++) - priv->channeltc_to_txq_map[ix][i] = - ix + i * priv->params.num_channels; + mlx5e_close_txqsq(&c->sq[tc]); } static int mlx5e_set_sq_maxrate(struct net_device *dev, - struct mlx5e_sq *sq, u32 rate) + struct mlx5e_txqsq *sq, u32 rate) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_modify_sq_param msp = {0}; u16 rl_index = 0; int err; @@ -1474,8 +1662,11 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, } } - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, - MLX5_SQC_STATE_RDY, true, rl_index); + msp.curr_state = MLX5_SQC_STATE_RDY; + msp.next_state = MLX5_SQC_STATE_RDY; + msp.rl_index = rl_index; + msp.rl_update = true; + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); if (err) { netdev_err(dev, "Failed configuring rate %u: %d\n", rate, err); @@ -1493,7 +1684,7 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + struct mlx5e_txqsq *sq = priv->txq2sq[index]; int err = 0; if (!mlx5_rl_is_supported(mdev)) { @@ -1520,114 +1711,87 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) -{ - return is_kdump_kernel() ? - MLX5E_MIN_NUM_CHANNELS : - min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); -} - static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + struct mlx5e_params *params, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { - struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; + struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; - struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; - struct mlx5e_sq *sq; int err; - int i; c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; c->priv = priv; + c->mdev = priv->mdev; + c->tstamp = &priv->tstamp; c->ix = ix; c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); - c->num_tc = priv->params.num_tc; - c->xdp = !!priv->xdp_prog; - - if (priv->params.rx_am_enabled) - rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); - else - rx_cq_profile = priv->params.rx_cq_moderation; - - mlx5e_build_channeltc_to_txq_map(priv, ix); + c->num_tc = params->num_tc; + c->xdp = !!params->xdp_prog; netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); if (err) goto err_napi_del; - err = mlx5e_open_tx_cqs(c, cparam); + err = mlx5e_open_tx_cqs(c, params, cparam); if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - rx_cq_profile); + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); if (err) goto err_close_tx_cqs; /* XDP SQ CQ params are same as normal TXQ sq CQ params */ - err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, - priv->params.tx_cq_moderation) : 0; + err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, + &cparam->tx_cq, &c->rq.xdpsq.cq) : 0; if (err) goto err_close_rx_cq; napi_enable(&c->napi); - err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); if (err) goto err_disable_napi; - err = mlx5e_open_sqs(c, cparam); + err = mlx5e_open_sqs(c, params, cparam); if (err) goto err_close_icosq; - for (i = 0; i < priv->params.num_tc; i++) { - u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; - - if (priv->tx_rates[txq_ix]) { - sq = priv->txq_to_sq_map[txq_ix]; - mlx5e_set_sq_maxrate(priv->netdev, sq, - priv->tx_rates[txq_ix]); - } - } - - err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; + err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; if (err) goto err_close_sqs; - err = mlx5e_open_rq(c, &cparam->rq, &c->rq); + err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq); if (err) goto err_close_xdp_sq; - netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); *cp = c; return 0; err_close_xdp_sq: if (c->xdp) - mlx5e_close_sq(&c->xdp_sq); + mlx5e_close_xdpsq(&c->rq.xdpsq); err_close_sqs: mlx5e_close_sqs(c); err_close_icosq: - mlx5e_close_sq(&c->icosq); + mlx5e_close_icosq(&c->icosq); err_disable_napi: napi_disable(&c->napi); if (c->xdp) - mlx5e_close_cq(&c->xdp_sq.cq); + mlx5e_close_cq(&c->rq.xdpsq.cq); err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); @@ -1645,16 +1809,35 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, return err; } +static void mlx5e_activate_channel(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_activate_txqsq(&c->sq[tc]); + mlx5e_activate_rq(&c->rq); + netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); +} + +static void mlx5e_deactivate_channel(struct mlx5e_channel *c) +{ + int tc; + + mlx5e_deactivate_rq(&c->rq); + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_deactivate_txqsq(&c->sq[tc]); +} + static void mlx5e_close_channel(struct mlx5e_channel *c) { mlx5e_close_rq(&c->rq); if (c->xdp) - mlx5e_close_sq(&c->xdp_sq); + mlx5e_close_xdpsq(&c->rq.xdpsq); mlx5e_close_sqs(c); - mlx5e_close_sq(&c->icosq); + mlx5e_close_icosq(&c->icosq); napi_disable(&c->napi); if (c->xdp) - mlx5e_close_cq(&c->xdp_sq.cq); + mlx5e_close_cq(&c->rq.xdpsq.cq); mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); @@ -1664,17 +1847,16 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) } static void mlx5e_build_rq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, struct mlx5e_rq_param *param) { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - switch (priv->params.rq_wq_type) { + switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - MLX5_SET(wq, wq, log_wqe_num_of_strides, - priv->params.mpwqe_log_num_strides - 9); - MLX5_SET(wq, wq, log_wqe_stride_size, - priv->params.mpwqe_log_stride_sz - 6); + MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9); + MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ @@ -1683,14 +1865,14 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); - MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); + MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size); MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); + MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); + MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; - - param->am_enabled = priv->params.rx_am_enabled; } static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@ -1715,17 +1897,14 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, } static void mlx5e_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_build_sq_param_common(priv, param); - MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); - - param->max_inline = priv->params.tx_max_inline; - param->min_inline_mode = priv->params.tx_min_inline_mode; - param->type = MLX5E_SQ_TXQ; + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -1737,37 +1916,36 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, struct mlx5e_cq_param *param) { void *cqc = param->cqc; u8 log_cq_size; - switch (priv->params.rq_wq_type) { + switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - log_cq_size = priv->params.log_rq_size + - priv->params.mpwqe_log_num_strides; + log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ - log_cq_size = priv->params.log_rq_size; + log_cq_size = params->log_rq_size; } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); MLX5_SET(cqc, cqc, cqe_comp_en, 1); } mlx5e_build_common_cq_param(priv, param); - - param->cq_period_mode = priv->params.rx_cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, struct mlx5e_cq_param *param) { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); mlx5e_build_common_cq_param(priv, param); @@ -1775,8 +1953,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, } static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, - struct mlx5e_cq_param *param, - u8 log_wq_size) + u8 log_wq_size, + struct mlx5e_cq_param *param) { void *cqc = param->cqc; @@ -1788,8 +1966,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, - struct mlx5e_sq_param *param, - u8 log_wq_size) + u8 log_wq_size, + struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); @@ -1798,162 +1976,119 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, log_wq_size); MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); - - param->type = MLX5E_SQ_ICO; } static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_build_sq_param_common(priv, param); - MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); - - param->max_inline = priv->params.tx_max_inline; - param->min_inline_mode = priv->params.tx_min_inline_mode; - param->type = MLX5E_SQ_XDP; + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); } -static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, + struct mlx5e_channel_param *cparam) { u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - mlx5e_build_rq_param(priv, &cparam->rq); - mlx5e_build_sq_param(priv, &cparam->sq); - mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq); - mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz); - mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); - mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); - mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz); + mlx5e_build_rq_param(priv, params, &cparam->rq); + mlx5e_build_sq_param(priv, params, &cparam->sq); + mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); + mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); + mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq); + mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); + mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq); } -static int mlx5e_open_channels(struct mlx5e_priv *priv) +int mlx5e_open_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *chs) { struct mlx5e_channel_param *cparam; - int nch = priv->params.num_channels; int err = -ENOMEM; int i; - int j; - - priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *), - GFP_KERNEL); - priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc, - sizeof(struct mlx5e_sq *), GFP_KERNEL); + chs->num = chs->params.num_channels; + chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + if (!chs->c || !cparam) + goto err_free; - if (!priv->channel || !priv->txq_to_sq_map || !cparam) - goto err_free_txq_to_sq_map; - - mlx5e_build_channel_param(priv, cparam); - - for (i = 0; i < nch; i++) { - err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]); - if (err) - goto err_close_channels; - } - - for (j = 0; j < nch; j++) { - err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); + mlx5e_build_channel_param(priv, &chs->params, cparam); + for (i = 0; i < chs->num; i++) { + err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]); if (err) goto err_close_channels; } - /* FIXME: This is a W/A for tx timeout watch dog false alarm when - * polling for inactive tx queues. - */ - netif_tx_start_all_queues(priv->netdev); - kfree(cparam); return 0; err_close_channels: for (i--; i >= 0; i--) - mlx5e_close_channel(priv->channel[i]); + mlx5e_close_channel(chs->c[i]); -err_free_txq_to_sq_map: - kfree(priv->txq_to_sq_map); - kfree(priv->channel); +err_free: + kfree(chs->c); kfree(cparam); - + chs->num = 0; return err; } -static void mlx5e_close_channels(struct mlx5e_priv *priv) +static void mlx5e_activate_channels(struct mlx5e_channels *chs) { int i; - /* FIXME: This is a W/A only for tx timeout watch dog false alarm when - * polling for inactive tx queues. - */ - netif_tx_stop_all_queues(priv->netdev); - netif_tx_disable(priv->netdev); - - for (i = 0; i < priv->params.num_channels; i++) - mlx5e_close_channel(priv->channel[i]); - - kfree(priv->txq_to_sq_map); - kfree(priv->channel); + for (i = 0; i < chs->num; i++) + mlx5e_activate_channel(chs->c[i]); } -static int mlx5e_rx_hash_fn(int hfunc) +static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) { - return (hfunc == ETH_RSS_HASH_TOP) ? - MLX5_RX_HASH_FN_TOEPLITZ : - MLX5_RX_HASH_FN_INVERTED_XOR8; -} - -static int mlx5e_bits_invert(unsigned long a, int size) -{ - int inv = 0; + int err = 0; int i; - for (i = 0; i < size; i++) - inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + for (i = 0; i < chs->num; i++) { + err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq); + if (err) + break; + } - return inv; + return err; } -static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) +static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) { int i; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { - int ix = i; - u32 rqn; - - if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); - - ix = priv->params.indirection_rqt[ix]; - rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn; - MLX5_SET(rqtc, rqtc, rq_num[i], rqn); - } + for (i = 0; i < chs->num; i++) + mlx5e_deactivate_channel(chs->c[i]); } -static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc, - int ix) +void mlx5e_close_channels(struct mlx5e_channels *chs) { - u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn; + int i; - MLX5_SET(rqtc, rqtc, rq_num[0], rqn); + for (i = 0; i < chs->num; i++) + mlx5e_close_channel(chs->c[i]); + + kfree(chs->c); + chs->num = 0; } -static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, - int ix, struct mlx5e_rqt *rqt) +static int +mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; int inlen; int err; u32 *in; + int i; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1965,10 +2100,8 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - if (sz > 1) /* RSS */ - mlx5e_fill_indir_rqt_rqns(priv, rqtc); - else - mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); + for (i = 0; i < sz; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); if (!err) @@ -1984,11 +2117,15 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); } -static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv) +int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) { struct mlx5e_rqt *rqt = &priv->indir_rqt; + int err; - return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt); + err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt); + if (err) + mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err); + return err; } int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) @@ -1999,7 +2136,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { rqt = &priv->direct_tir[ix].rqt; - err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt); + err = mlx5e_create_rqt(priv, 1 /*size */, rqt); if (err) goto err_destroy_rqts; } @@ -2007,13 +2144,64 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) return 0; err_destroy_rqts: + mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err); for (ix--; ix >= 0; ix--) mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt); return err; } -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) +void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); +} + +static int mlx5e_rx_hash_fn(int hfunc) +{ + return (hfunc == ETH_RSS_HASH_TOP) ? + MLX5_RX_HASH_FN_TOEPLITZ : + MLX5_RX_HASH_FN_INVERTED_XOR8; +} + +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; + + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + + return inv; +} + +static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, + struct mlx5e_redirect_rqt_param rrp, void *rqtc) +{ + int i; + + for (i = 0; i < sz; i++) { + u32 rqn; + + if (rrp.is_rss) { + int ix = i; + + if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, ilog2(sz)); + + ix = priv->channels.params.indirection_rqt[ix]; + rqn = rrp.rss.channels->c[ix]->rq.rqn; + } else { + rqn = rrp.rqn; + } + MLX5_SET(rqtc, rqtc, rq_num[i], rqn); + } +} + +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, + struct mlx5e_redirect_rqt_param rrp) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; @@ -2029,41 +2217,86 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - if (sz > 1) /* RSS */ - mlx5e_fill_indir_rqt_rqns(priv, rqtc); - else - mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); - MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - + mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc); err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); kvfree(in); - return err; } -static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) +static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, + struct mlx5e_redirect_rqt_param rrp) +{ + if (!rrp.is_rss) + return rrp.rqn; + + if (ix >= rrp.rss.channels->num) + return priv->drop_rq.rqn; + + return rrp.rss.channels->c[ix]->rq.rqn; +} + +static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, + struct mlx5e_redirect_rqt_param rrp) { u32 rqtn; int ix; if (priv->indir_rqt.enabled) { + /* RSS RQ table */ rqtn = priv->indir_rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); } - for (ix = 0; ix < priv->params.num_channels; ix++) { + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + struct mlx5e_redirect_rqt_param direct_rrp = { + .is_rss = false, + { + .rqn = mlx5e_get_direct_rqn(priv, ix, rrp) + }, + }; + + /* Direct RQ Tables */ if (!priv->direct_tir[ix].rqt.enabled) continue; + rqtn = priv->direct_tir[ix].rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, 1, ix); + mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); } } -static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) +static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *chs) +{ + struct mlx5e_redirect_rqt_param rrp = { + .is_rss = true, + { + .rss = { + .channels = chs, + .hfunc = chs->params.rss_hfunc, + } + }, + }; + + mlx5e_redirect_rqts(priv, rrp); +} + +static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) +{ + struct mlx5e_redirect_rqt_param drop_rrp = { + .is_rss = false, + { + .rqn = priv->drop_rq.rqn, + }, + }; + + mlx5e_redirect_rqts(priv, drop_rrp); +} + +static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) { - if (!priv->params.lro_en) + if (!params->lro_en) return; #define ROUGH_MAX_L2_L3_HDR_SZ 256 @@ -2072,13 +2305,13 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (priv->params.lro_wqe_sz - - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); + (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); } -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, - enum mlx5e_traffic_types tt) +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, + enum mlx5e_traffic_types tt, + void *tirc) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); @@ -2094,16 +2327,15 @@ void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) - MLX5_SET(tirc, tirc, rx_hash_fn, - mlx5e_rx_hash_fn(priv->params.rss_hfunc)); - if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc)); + if (params->rss_hfunc == ETH_RSS_HASH_TOP) { void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, priv->params.toeplitz_hash_key, len); + memcpy(rss_key, params->toeplitz_hash_key, len); } switch (tt) { @@ -2208,7 +2440,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) MLX5_SET(modify_tir_in, in, bitmask.lro, 1); tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - mlx5e_build_tir_ctx_lro(tirc, priv); + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, @@ -2258,9 +2490,9 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) *mtu = MLX5E_HW2SW_MTU(hw_mtu); } -static int mlx5e_set_dev_port_mtu(struct net_device *netdev) +static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { - struct mlx5e_priv *priv = netdev_priv(netdev); + struct net_device *netdev = priv->netdev; u16 mtu; int err; @@ -2280,8 +2512,8 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) static void mlx5e_netdev_set_tcs(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - int nch = priv->params.num_channels; - int ntc = priv->params.num_tc; + int nch = priv->channels.params.num_channels; + int ntc = priv->channels.params.num_tc; int tc; netdev_reset_tc(netdev); @@ -2298,53 +2530,116 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } +static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) +{ + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int i, tc; + + for (i = 0; i < priv->channels.num; i++) + for (tc = 0; tc < priv->profile->max_tc; tc++) + priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; + + for (i = 0; i < priv->channels.num; i++) { + c = priv->channels.c[i]; + for (tc = 0; tc < c->num_tc; tc++) { + sq = &c->sq[tc]; + priv->txq2sq[sq->txq_ix] = sq; + } + } +} + +static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev) +{ + return (MLX5_CAP_GEN(mdev, vport_group_manager) && + MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH); +} + +void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) +{ + int num_txqs = priv->channels.num * priv->channels.params.num_tc; + struct net_device *netdev = priv->netdev; + + mlx5e_netdev_set_tcs(netdev); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, priv->channels.num); + + mlx5e_build_channels_tx_maps(priv); + mlx5e_activate_channels(&priv->channels); + netif_tx_start_all_queues(priv->netdev); + + if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + mlx5e_add_sqs_fwd_rules(priv); + + mlx5e_wait_channels_min_rx_wqes(&priv->channels); + mlx5e_redirect_rqts_to_channels(priv, &priv->channels); +} + +void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) +{ + mlx5e_redirect_rqts_to_drop(priv); + + if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + mlx5e_remove_sqs_fwd_rules(priv); + + /* FIXME: This is a W/A only for tx timeout watch dog false alarm when + * polling for inactive tx queues. + */ + netif_tx_stop_all_queues(priv->netdev); + netif_tx_disable(priv->netdev); + mlx5e_deactivate_channels(&priv->channels); +} + +void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify) +{ + struct net_device *netdev = priv->netdev; + int new_num_txqs; + + new_num_txqs = new_chs->num * new_chs->params.num_tc; + + netif_carrier_off(netdev); + + if (new_num_txqs < netdev->real_num_tx_queues) + netif_set_real_num_tx_queues(netdev, new_num_txqs); + + mlx5e_deactivate_priv_channels(priv); + mlx5e_close_channels(&priv->channels); + + priv->channels = *new_chs; + + /* New channels are ready to roll, modify HW settings if needed */ + if (hw_modify) + hw_modify(priv); + + mlx5e_refresh_tirs(priv, false); + mlx5e_activate_priv_channels(priv); + + mlx5e_update_carrier(priv); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - int num_txqs; int err; set_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_netdev_set_tcs(netdev); - - num_txqs = priv->params.num_channels * priv->params.num_tc; - netif_set_real_num_tx_queues(netdev, num_txqs); - netif_set_real_num_rx_queues(netdev, priv->params.num_channels); - - err = mlx5e_open_channels(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", - __func__, err); + err = mlx5e_open_channels(priv, &priv->channels); + if (err) goto err_clear_state_opened_flag; - } - - err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false); - if (err) { - netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", - __func__, err); - goto err_close_channels; - } - mlx5e_redirect_rqts(priv); + mlx5e_refresh_tirs(priv, false); + mlx5e_activate_priv_channels(priv); mlx5e_update_carrier(priv); mlx5e_timestamp_init(priv); -#ifdef CONFIG_RFS_ACCEL - priv->netdev->rx_cpu_rmap = priv->mdev->rmap; -#endif + if (priv->profile->update_stats) queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - err = mlx5e_add_sqs_fwd_rules(priv); - if (err) - goto err_close_channels; - } return 0; -err_close_channels: - mlx5e_close_channels(priv); err_clear_state_opened_flag: clear_bit(MLX5E_STATE_OPENED, &priv->state); return err; @@ -2365,7 +2660,6 @@ int mlx5e_open(struct net_device *netdev) int mlx5e_close_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; /* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. @@ -2375,13 +2669,10 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) - mlx5e_remove_sqs_fwd_rules(priv); - mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); - mlx5e_redirect_rqts(priv); - mlx5e_close_channels(priv); + mlx5e_deactivate_priv_channels(priv); + mlx5e_close_channels(&priv->channels); return 0; } @@ -2401,11 +2692,10 @@ int mlx5e_close(struct net_device *netdev) return err; } -static int mlx5e_create_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *rq, - struct mlx5e_rq_param *param) +static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, + struct mlx5e_rq *rq, + struct mlx5e_rq_param *param) { - struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); int err; @@ -2417,111 +2707,85 @@ static int mlx5e_create_drop_rq(struct mlx5e_priv *priv, if (err) return err; - rq->priv = priv; + rq->mdev = mdev; return 0; } -static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, - struct mlx5e_cq *cq, - struct mlx5e_cq_param *param) +static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, + struct mlx5e_cq *cq, + struct mlx5e_cq_param *param) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_core_cq *mcq = &cq->mcq; - int eqn_not_used; - unsigned int irqn; - int err; - - err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, - &cq->wq_ctrl); - if (err) - return err; - - mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); - - mcq->cqe_sz = 64; - mcq->set_ci_db = cq->wq_ctrl.db.db; - mcq->arm_db = cq->wq_ctrl.db.db + 1; - *mcq->set_ci_db = 0; - *mcq->arm_db = 0; - mcq->vector = param->eq_ix; - mcq->comp = mlx5e_completion_event; - mcq->event = mlx5e_cq_error_event; - mcq->irqn = irqn; - - cq->priv = priv; - - return 0; + return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5e_priv *priv) +static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, + struct mlx5e_rq *drop_rq) { - struct mlx5e_cq_param cq_param; - struct mlx5e_rq_param rq_param; - struct mlx5e_rq *rq = &priv->drop_rq; - struct mlx5e_cq *cq = &priv->drop_rq.cq; + struct mlx5e_cq_param cq_param = {}; + struct mlx5e_rq_param rq_param = {}; + struct mlx5e_cq *cq = &drop_rq->cq; int err; - memset(&cq_param, 0, sizeof(cq_param)); - memset(&rq_param, 0, sizeof(rq_param)); mlx5e_build_drop_rq_param(&rq_param); - err = mlx5e_create_drop_cq(priv, cq, &cq_param); + err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) return err; - err = mlx5e_enable_cq(cq, &cq_param); + err = mlx5e_create_cq(cq, &cq_param); if (err) - goto err_destroy_cq; + goto err_free_cq; - err = mlx5e_create_drop_rq(priv, rq, &rq_param); + err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param); if (err) - goto err_disable_cq; + goto err_destroy_cq; - err = mlx5e_enable_rq(rq, &rq_param); + err = mlx5e_create_rq(drop_rq, &rq_param); if (err) - goto err_destroy_rq; + goto err_free_rq; return 0; -err_destroy_rq: - mlx5e_destroy_rq(&priv->drop_rq); - -err_disable_cq: - mlx5e_disable_cq(&priv->drop_rq.cq); +err_free_rq: + mlx5e_free_rq(drop_rq); err_destroy_cq: - mlx5e_destroy_cq(&priv->drop_rq.cq); + mlx5e_destroy_cq(cq); + +err_free_cq: + mlx5e_free_cq(cq); return err; } -static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) +static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { - mlx5e_disable_rq(&priv->drop_rq); - mlx5e_destroy_rq(&priv->drop_rq); - mlx5e_disable_cq(&priv->drop_rq.cq); - mlx5e_destroy_cq(&priv->drop_rq.cq); + mlx5e_destroy_rq(drop_rq); + mlx5e_free_rq(drop_rq); + mlx5e_destroy_cq(&drop_rq->cq); + mlx5e_free_cq(&drop_rq->cq); } -static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) +int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, + u32 underlay_qpn, u32 *tisn) { - struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); MLX5_SET(tisc, tisc, prio, tc << 1); + MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); if (mlx5_lag_is_lacp_owner(mdev)) MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); - return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); + return mlx5_core_create_tis(mdev, in, sizeof(in), tisn); } -static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc) +void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn) { - mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5_core_destroy_tis(mdev, tisn); } int mlx5e_create_tises(struct mlx5e_priv *priv) @@ -2530,7 +2794,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv) int tc; for (tc = 0; tc < priv->profile->max_tc; tc++) { - err = mlx5e_create_tis(priv, tc); + err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]); if (err) goto err_close_tises; } @@ -2539,7 +2803,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv) err_close_tises: for (tc--; tc >= 0; tc--) - mlx5e_destroy_tis(priv, tc); + mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); return err; } @@ -2549,34 +2813,34 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) int tc; for (tc = 0; tc < priv->profile->max_tc; tc++) - mlx5e_destroy_tis(priv, tc); + mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); } -static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, - enum mlx5e_traffic_types tt) +static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, + enum mlx5e_traffic_types tt, + u32 *tirc) { MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); - mlx5e_build_tir_ctx_lro(tirc, priv); + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); } -static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, - u32 rqtn) +static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) { MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); - mlx5e_build_tir_ctx_lro(tirc, priv); + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, rqtn); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } -static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) +int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) { struct mlx5e_tir *tir; void *tirc; @@ -2594,7 +2858,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) memset(in, 0, inlen); tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_indir_tir_ctx(priv, tirc, tt); + mlx5e_build_indir_tir_ctx(priv, tt, tirc); err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_tirs; @@ -2605,6 +2869,7 @@ static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) return 0; err_destroy_tirs: + mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); for (tt--; tt >= 0; tt--) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); @@ -2632,8 +2897,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) memset(in, 0, inlen); tir = &priv->direct_tir[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_direct_tir_ctx(priv, tirc, - priv->direct_tir[ix].rqt.rqtn); + mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc); err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_ch_tirs; @@ -2644,6 +2908,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) return 0; err_destroy_ch_tirs: + mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err); for (ix--; ix >= 0; ix--) mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]); @@ -2652,7 +2917,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) return err; } -static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) +void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { int i; @@ -2669,16 +2934,27 @@ void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); } -int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) +static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) { int err = 0; int i; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return 0; + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); + if (err) + return err; + } + + return 0; +} + +static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) +{ + int err = 0; + int i; - for (i = 0; i < priv->params.num_channels; i++) { - err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd); + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); if (err) return err; } @@ -2689,7 +2965,7 @@ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool was_opened; + struct mlx5e_channels new_channels = {}; int err = 0; if (tc && tc != MLX5E_MAX_NUM_TC) @@ -2697,17 +2973,21 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) mutex_lock(&priv->state_lock); - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(priv->netdev); + new_channels.params = priv->channels.params; + new_channels.params.num_tc = tc ? tc : 1; - priv->params.num_tc = tc ? tc : 1; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + goto out; + } - if (was_opened) - err = mlx5e_open_locked(priv->netdev); + err = mlx5e_open_channels(priv, &new_channels); + if (err) + goto out; + mlx5e_switch_priv_channels(priv, &new_channels, NULL); +out: mutex_unlock(&priv->state_lock); - return err; } @@ -2737,7 +3017,9 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return mlx5e_setup_tc(dev, tc->tc); + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return mlx5e_setup_tc(dev, tc->mqprio->num_tc); } static void @@ -2822,26 +3104,31 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - int err; + struct mlx5e_channels new_channels = {}; + int err = 0; + bool reset; mutex_lock(&priv->state_lock); - if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) - mlx5e_close_locked(priv->netdev); + reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST); + reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); - priv->params.lro_en = enable; - err = mlx5e_modify_tirs_lro(priv); - if (err) { - netdev_err(netdev, "lro modify failed, %d\n", err); - priv->params.lro_en = !enable; + new_channels.params = priv->channels.params; + new_channels.params.lro_en = enable; + + if (!reset) { + priv->channels.params = new_channels.params; + err = mlx5e_modify_tirs_lro(priv); + goto out; } - if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) - mlx5e_open_locked(priv->netdev); + err = mlx5e_open_channels(priv, &new_channels); + if (err) + goto out; + mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro); +out: mutex_unlock(&priv->state_lock); - return err; } @@ -2878,18 +3165,39 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable) return mlx5_set_port_fcs(mdev, !enable); } -static int set_feature_rx_vlan(struct net_device *netdev, bool enable) +static int set_feature_rx_fcs(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; mutex_lock(&priv->state_lock); - priv->params.vlan_strip_disable = !enable; - err = mlx5e_modify_rqs_vsd(priv, !enable); + priv->channels.params.scatter_fcs_en = enable; + err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable); + if (err) + priv->channels.params.scatter_fcs_en = !enable; + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int set_feature_rx_vlan(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; + + mutex_lock(&priv->state_lock); + + priv->channels.params.vlan_strip_disable = !enable; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + err = mlx5e_modify_channels_vsd(&priv->channels, !enable); if (err) - priv->params.vlan_strip_disable = enable; + priv->channels.params.vlan_strip_disable = enable; +unlock: mutex_unlock(&priv->state_lock); return err; @@ -2947,6 +3255,8 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_tc_num_filters); err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, set_feature_rx_all); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS, + set_feature_rx_fcs); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_RFS_ACCEL @@ -2960,28 +3270,38 @@ static int mlx5e_set_features(struct net_device *netdev, static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool was_opened; + struct mlx5e_channels new_channels = {}; + int curr_mtu; int err = 0; bool reset; mutex_lock(&priv->state_lock); - reset = !priv->params.lro_en && - (priv->params.rq_wq_type != + reset = !priv->channels.params.lro_en && + (priv->channels.params.rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened && reset) - mlx5e_close_locked(netdev); + reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); + curr_mtu = netdev->mtu; netdev->mtu = new_mtu; - mlx5e_set_dev_port_mtu(netdev); - if (was_opened && reset) - err = mlx5e_open_locked(netdev); + if (!reset) { + mlx5e_set_dev_port_mtu(priv); + goto out; + } - mutex_unlock(&priv->state_lock); + new_channels.params = priv->channels.params; + err = mlx5e_open_channels(priv, &new_channels); + if (err) { + netdev->mtu = curr_mtu; + goto out; + } + mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + +out: + mutex_unlock(&priv->state_lock); return err; } @@ -3186,8 +3506,8 @@ static void mlx5e_tx_timeout(struct net_device *dev) netdev_err(dev, "TX timeout detected\n"); - for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { - struct mlx5e_sq *sq = priv->txq_to_sq_map[i]; + for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { + struct mlx5e_txqsq *sq = priv->txq2sq[i]; if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; @@ -3219,7 +3539,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); /* no need for full reset when exchanging programs */ - reset = (!priv->xdp_prog || !prog); + reset = (!priv->channels.params.xdp_prog || !prog); if (was_opened && reset) mlx5e_close_locked(netdev); @@ -3227,7 +3547,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* num_channels is invariant here, so we can take the * batched reference right upfront. */ - prog = bpf_prog_add(prog, priv->params.num_channels); + prog = bpf_prog_add(prog, priv->channels.num); if (IS_ERR(prog)) { err = PTR_ERR(prog); goto unlock; @@ -3237,12 +3557,12 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* exchange programs, extra prog reference we got from caller * as long as we don't fail from this point onwards. */ - old_prog = xchg(&priv->xdp_prog, prog); + old_prog = xchg(&priv->channels.params.xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); if (reset) /* change RQ type according to priv->xdp_prog */ - mlx5e_set_rq_priv_params(priv); + mlx5e_set_rq_params(priv->mdev, &priv->channels.params); if (was_opened && reset) mlx5e_open_locked(netdev); @@ -3253,8 +3573,8 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* exchanging programs w/o reset, we update ref counts on behalf * of the channels RQs here. */ - for (i = 0; i < priv->params.num_channels; i++) { - struct mlx5e_channel *c = priv->channel[i]; + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_channel *c = priv->channels.c[i]; clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); napi_synchronize(&c->napi); @@ -3280,7 +3600,7 @@ static bool mlx5e_xdp_attached(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - return !!priv->xdp_prog; + return !!priv->channels.params.xdp_prog; } static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp) @@ -3303,10 +3623,12 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp) static void mlx5e_netpoll(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channels *chs = &priv->channels; + int i; - for (i = 0; i < priv->params.num_channels; i++) - napi_schedule(&priv->channel[i]->napi); + for (i = 0; i < chs->num; i++) + napi_schedule(&chs->c[i]->napi); } #endif @@ -3463,6 +3785,12 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) (pci_bw < 40000) && (pci_bw < link_speed)); } +static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw) +{ + return !(link_speed && pci_bw && + (pci_bw <= 16000) && (pci_bw < link_speed)); +} + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) { params->rx_cq_period_mode = cq_period_mode; @@ -3475,6 +3803,13 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) params->rx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + + if (params->rx_am_enabled) + params->rx_cq_moderation = + mlx5e_am_get_def_profile(params->rx_cq_period_mode); + + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) @@ -3489,75 +3824,81 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } -static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + u16 max_channels) { - struct mlx5e_priv *priv = netdev_priv(netdev); + u8 cq_period_mode = 0; u32 link_speed = 0; u32 pci_bw = 0; - u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - priv->mdev = mdev; - priv->netdev = netdev; - priv->params.num_channels = profile->max_nch(mdev); - priv->profile = profile; - priv->ppriv = ppriv; + params->num_channels = max_channels; + params->num_tc = 1; - priv->params.lro_timeout = - mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_get_pci_bw(mdev, &pci_bw); + mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); - priv->params.log_sq_size = is_kdump_kernel() ? + /* SQ */ + params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ - priv->params.rx_cqe_compress_def = false; + params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && - MLX5_CAP_GEN(mdev, vport_group_manager)) { - mlx5e_get_max_linkspeed(mdev, &link_speed); - mlx5e_get_pci_bw(mdev, &pci_bw); - mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", - link_speed, pci_bw); - priv->params.rx_cqe_compress_def = - cqe_compress_heuristic(link_speed, pci_bw); - } - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, - priv->params.rx_cqe_compress_def); - - mlx5e_set_rq_priv_params(priv); - if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - priv->params.lro_en = true; - - priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); - - priv->params.tx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation.pkts = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); - if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE && + MLX5_CAP_GEN(mdev, vport_group_manager)) + params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); + + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); + + /* RQ */ + mlx5e_set_rq_params(mdev, params); + + /* HW LRO */ + /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + params->lro_en = hw_lro_heuristic(link_speed, pci_bw); + params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + + /* CQ moderation params */ + cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(params, cq_period_mode); + + params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + + /* TX inline */ + params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); + if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE && !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) - priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2; + params->tx_min_inline_mode = MLX5_INLINE_MODE_L2; - priv->params.num_tc = 1; - priv->params.rss_hfunc = ETH_RSS_HASH_XOR; + /* RSS */ + params->rss_hfunc = ETH_RSS_HASH_XOR; + netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); + mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt, + MLX5E_INDIR_RQT_SIZE, max_channels); +} - netdev_rss_key_fill(priv->params.toeplitz_hash_key, - sizeof(priv->params.toeplitz_hash_key)); +static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); - mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); + priv->mdev = mdev; + priv->netdev = netdev; + priv->profile = profile; + priv->ppriv = ppriv; - /* Initialize pflags */ - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, - priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); mutex_init(&priv->state_lock); @@ -3642,13 +3983,19 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (fcs_supported) netdev->hw_features |= NETIF_F_RXALL; + if (MLX5_CAP_ETH(mdev, scatter_fcs)) + netdev->hw_features |= NETIF_F_RXFCS; + netdev->features = netdev->hw_features; - if (!priv->params.lro_en) + if (!priv->channels.params.lro_en) netdev->features &= ~NETIF_F_LRO; if (fcs_enabled) netdev->features &= ~NETIF_F_RXALL; + if (!priv->channels.params.scatter_fcs_en) + netdev->features &= ~NETIF_F_RXFCS; + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && @@ -3708,39 +4055,30 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_vxlan_cleanup(priv); - if (priv->xdp_prog) - bpf_prog_put(priv->xdp_prog); + if (priv->channels.params.xdp_prog) + bpf_prog_put(priv->channels.params.xdp_prog); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; - int i; - err = mlx5e_create_indirect_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err); + err = mlx5e_create_indirect_rqt(priv); + if (err) return err; - } err = mlx5e_create_direct_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + if (err) goto err_destroy_indirect_rqts; - } err = mlx5e_create_indirect_tirs(priv); - if (err) { - mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err); + if (err) goto err_destroy_direct_rqts; - } err = mlx5e_create_direct_tirs(priv); - if (err) { - mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + if (err) goto err_destroy_indirect_tirs; - } err = mlx5e_create_flow_steering(priv); if (err) { @@ -3761,8 +4099,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) err_destroy_indirect_tirs: mlx5e_destroy_indirect_tirs(priv); err_destroy_direct_rqts: - for (i = 0; i < priv->profile->max_nch(mdev); i++) - mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); return err; @@ -3770,14 +4107,11 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { - int i; - mlx5e_tc_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) - mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); } @@ -3801,21 +4135,22 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_eswitch_rep rep; + u16 max_mtu; + + mlx5e_init_l2_addr(priv); + + /* MTU range: 68 - hw-specific max */ + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu); + mlx5e_set_dev_port_mtu(priv); mlx5_lag_add(mdev, netdev); mlx5e_enable_async_events(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); - rep.load = mlx5e_nic_rep_load; - rep.unload = mlx5e_nic_rep_unload; - rep.vport = FDB_UPLINK_VPORT; - rep.netdev = netdev; - mlx5_eswitch_register_vport_rep(esw, 0, &rep); - } + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) return; @@ -3828,16 +4163,29 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) } queue_work(priv->wq, &priv->set_rx_mode_work); + + rtnl_lock(); + if (netif_running(netdev)) + mlx5e_open(netdev); + netif_device_attach(netdev); + rtnl_unlock(); } static void mlx5e_nic_disable(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; + + rtnl_lock(); + if (netif_running(priv->netdev)) + mlx5e_close(priv->netdev); + netif_device_detach(priv->netdev); + rtnl_unlock(); queue_work(priv->wq, &priv->set_rx_mode_work); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) - mlx5_eswitch_unregister_vport_rep(esw, 0); + mlx5e_unregister_vport_reps(priv); + mlx5e_disable_async_events(priv); mlx5_lag_remove(mdev); } @@ -3853,9 +4201,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .disable = mlx5e_nic_disable, .update_stats = mlx5e_update_stats, .max_nch = mlx5e_get_max_num_channels, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, .max_tc = MLX5E_MAX_NUM_TC, }; +/* mlx5e generic netdev management API (move to en_common.c) */ + struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, void *ppriv) @@ -3872,6 +4224,10 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return NULL; } +#ifdef CONFIG_RFS_ACCEL + netdev->rx_cpu_rmap = mdev->rmap; +#endif + profile->init(mdev, netdev, profile, ppriv); netif_carrier_off(netdev); @@ -3891,14 +4247,12 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return NULL; } -int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) +int mlx5e_attach_netdev(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; - struct mlx5e_priv *priv; - u16 max_mtu; int err; - priv = netdev_priv(netdev); profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); @@ -3906,7 +4260,7 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) if (err) goto out; - err = mlx5e_open_drop_rq(priv); + err = mlx5e_open_drop_rq(mdev, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); goto err_cleanup_tx; @@ -3918,28 +4272,13 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) mlx5e_create_q_counter(priv); - mlx5e_init_l2_addr(priv); - - /* MTU range: 68 - hw-specific max */ - netdev->min_mtu = ETH_MIN_MTU; - mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu); - - mlx5e_set_dev_port_mtu(netdev); - if (profile->enable) profile->enable(priv); - rtnl_lock(); - if (netif_running(netdev)) - mlx5e_open(netdev); - netif_device_attach(netdev); - rtnl_unlock(); - return 0; err_close_drop_rq: - mlx5e_close_drop_rq(priv); + mlx5e_close_drop_rq(&priv->drop_rq); err_cleanup_tx: profile->cleanup_tx(priv); @@ -3948,66 +4287,34 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) return err; } -static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) -{ - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; - u8 mac[ETH_ALEN]; - - if (!MLX5_CAP_GEN(mdev, vport_group_manager)) - return; - - mlx5_query_nic_vport_mac_address(mdev, 0, mac); - - for (vport = 1; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep rep; - - rep.load = mlx5e_vport_rep_load; - rep.unload = mlx5e_vport_rep_unload; - rep.vport = vport; - ether_addr_copy(rep.hw_id, mac); - mlx5_eswitch_register_vport_rep(esw, vport, &rep); - } -} - -static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev) -{ - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; - - if (!MLX5_CAP_GEN(mdev, vport_group_manager)) - return; - - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); -} - -void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) +void mlx5e_detach_netdev(struct mlx5e_priv *priv) { - struct mlx5e_priv *priv = netdev_priv(netdev); const struct mlx5e_profile *profile = priv->profile; set_bit(MLX5E_STATE_DESTROYING, &priv->state); - rtnl_lock(); - if (netif_running(netdev)) - mlx5e_close(netdev); - netif_device_detach(netdev); - rtnl_unlock(); - if (profile->disable) profile->disable(priv); flush_workqueue(priv->wq); mlx5e_destroy_q_counter(priv); profile->cleanup_rx(priv); - mlx5e_close_drop_rq(priv); + mlx5e_close_drop_rq(&priv->drop_rq); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } +void mlx5e_destroy_netdev(struct mlx5e_priv *priv) +{ + const struct mlx5e_profile *profile = priv->profile; + struct net_device *netdev = priv->netdev; + + destroy_workqueue(priv->wq); + if (profile->cleanup) + profile->cleanup(priv); + free_netdev(netdev); +} + /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying * hardware contexts and to connect it to the current netdev. */ @@ -4024,13 +4331,12 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) if (err) return err; - err = mlx5e_attach_netdev(mdev, netdev); + err = mlx5e_attach_netdev(priv); if (err) { mlx5e_destroy_mdev_resources(mdev); return err; } - mlx5e_register_vport_rep(mdev); return 0; } @@ -4042,8 +4348,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) if (!netif_device_present(netdev)) return; - mlx5e_unregister_vport_rep(mdev); - mlx5e_detach_netdev(mdev, netdev); + mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); } @@ -4051,7 +4356,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; int total_vfs = MLX5_TOTAL_VPORTS(mdev); - void *ppriv = NULL; + struct mlx5e_rep_priv *rpriv = NULL; void *priv; int vport; int err; @@ -4061,10 +4366,17 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) if (err) return NULL; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) - ppriv = &esw->offloads.vport_reps[0]; + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) { + mlx5_core_warn(mdev, + "Not creating net device, Failed to alloc rep priv data\n"); + return NULL; + } + rpriv->rep = &esw->offloads.vport_reps[0]; + } - netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv); + netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); goto err_unregister_reps; @@ -4090,33 +4402,25 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) mlx5e_detach(mdev, priv); err_destroy_netdev: - mlx5e_destroy_netdev(mdev, priv); + mlx5e_destroy_netdev(priv); err_unregister_reps: for (vport = 1; vport < total_vfs; vport++) mlx5_eswitch_unregister_vport_rep(esw, vport); + kfree(rpriv); return NULL; } -void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) -{ - const struct mlx5e_profile *profile = priv->profile; - struct net_device *netdev = priv->netdev; - - destroy_workqueue(priv->wq); - if (profile->cleanup) - profile->cleanup(priv); - free_netdev(netdev); -} - static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) { struct mlx5e_priv *priv = vpriv; + void *ppriv = priv->ppriv; unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); - mlx5e_destroy_netdev(mdev, priv); + mlx5e_destroy_netdev(priv); + kfree(ppriv); } static void *mlx5e_get_netdev(void *vpriv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f621373bd7a564aca3ddd1247117467daffc72df..79462c0368a0781ca7a38354726ed628097c0c89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -34,10 +34,14 @@ #include #include #include +#include +#include #include "eswitch.h" #include "en.h" +#include "en_rep.h" #include "en_tc.h" +#include "fs_core.h" static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -75,7 +79,8 @@ static void mlx5e_rep_get_strings(struct net_device *dev, static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct rtnl_link_stats64 *vport_stats; struct ifla_vf_stats vf_stats; int err; @@ -102,14 +107,16 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) int i, j; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->params.num_channels; i++) { - rq_stats = &priv->channel[i]->rq.stats; + for (i = 0; i < priv->channels.num; i++) { + struct mlx5e_channel *c = priv->channels.c[i]; + + rq_stats = &c->rq.stats; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; - for (j = 0; j < priv->params.num_tc; j++) { - sq_stats = &priv->channel[i]->sq[j].stats; + for (j = 0; j < priv->channels.params.num_tc; j++) { + sq_stats = &c->sq[j].stats; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -163,7 +170,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; if (esw->mode == SRIOV_NONE) @@ -182,66 +190,426 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) } int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) - { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5e_channel *c; - int n, tc, err, num_sqs = 0; + int n, tc, num_sqs = 0; + int err = -ENOMEM; u16 *sqs; - sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL); + sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL); if (!sqs) - return -ENOMEM; + goto out; - for (n = 0; n < priv->params.num_channels; n++) { - c = priv->channel[n]; + for (n = 0; n < priv->channels.num; n++) { + c = priv->channels.c[n]; for (tc = 0; tc < c->num_tc; tc++) sqs[num_sqs++] = c->sq[tc].sqn; } err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); - kfree(sqs); + +out: + if (err) + netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err); return err; } -int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) { - struct net_device *netdev = rep->netdev; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + + mlx5_eswitch_sqs2vport_stop(esw, rep); +} + +static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) +{ +#if IS_ENABLED(CONFIG_IPV6) + unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, + DELAY_PROBE_TIME); +#else + unsigned long ipv6_interval = ~0UL; +#endif + unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, + DELAY_PROBE_TIME); + struct net_device *netdev = rpriv->rep->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - return mlx5e_add_sqs_fwd_rules(priv); - return 0; + rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); + mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval); } -void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) +void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - mlx5_eswitch_sqs2vport_stop(esw, rep); + mlx5_fc_queue_stats_work(priv->mdev, + &neigh_update->neigh_stats_work, + neigh_update->min_interval); } -void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep) +static void mlx5e_rep_neigh_stats_work(struct work_struct *work) { - struct net_device *netdev = rep->netdev; + struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, + neigh_update.neigh_stats_work.work); + struct net_device *netdev = rpriv->rep->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_neigh_hash_entry *nhe; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_remove_sqs_fwd_rules(priv); + rtnl_lock(); + if (!list_empty(&rpriv->neigh_update.neigh_list)) + mlx5e_rep_queue_neigh_stats_work(priv); - /* clean (and re-init) existing uplink offloaded TC rules */ - mlx5e_tc_cleanup(priv); - mlx5e_tc_init(priv); + list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list) + mlx5e_tc_update_neigh_used_value(nhe); + + rtnl_unlock(); +} + +static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) +{ + refcount_inc(&nhe->refcnt); +} + +static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) +{ + if (refcount_dec_and_test(&nhe->refcnt)) + kfree(nhe); +} + +static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + bool neigh_connected, + unsigned char ha[ETH_ALEN]) +{ + struct ethhdr *eth = (struct ethhdr *)e->encap_header; + + ASSERT_RTNL(); + + if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) || + !ether_addr_equal(e->h_dest, ha)) + mlx5e_tc_encap_flows_del(priv, e); + + if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { + ether_addr_copy(e->h_dest, ha); + ether_addr_copy(eth->h_dest, ha); + + mlx5e_tc_encap_flows_add(priv, e); + } +} + +static void mlx5e_rep_neigh_update(struct work_struct *work) +{ + struct mlx5e_neigh_hash_entry *nhe = + container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); + struct neighbour *n = nhe->n; + struct mlx5e_encap_entry *e; + unsigned char ha[ETH_ALEN]; + struct mlx5e_priv *priv; + bool neigh_connected; + bool encap_connected; + u8 nud_state, dead; + + rtnl_lock(); + + /* If these parameters are changed after we release the lock, + * we'll receive another event letting us know about it. + * We use this lock to avoid inconsistency between the neigh validity + * and it's hw address. + */ + read_lock_bh(&n->lock); + memcpy(ha, n->ha, ETH_ALEN); + nud_state = n->nud_state; + dead = n->dead; + read_unlock_bh(&n->lock); + + neigh_connected = (nud_state & NUD_VALID) && !dead; + + list_for_each_entry(e, &nhe->encap_list, encap_list) { + encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); + priv = netdev_priv(e->out_dev); + + if (encap_connected != neigh_connected || + !ether_addr_equal(e->h_dest, ha)) + mlx5e_rep_update_flows(priv, e, neigh_connected, ha); + } + mlx5e_rep_neigh_entry_release(nhe); + rtnl_unlock(); + neigh_release(n); +} + +static struct mlx5e_neigh_hash_entry * +mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, + struct mlx5e_neigh *m_neigh); + +static int mlx5e_rep_netevent_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, + neigh_update.netevent_nb); + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct net_device *netdev = rpriv->rep->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_neigh_hash_entry *nhe = NULL; + struct mlx5e_neigh m_neigh = {}; + struct neigh_parms *p; + struct neighbour *n; + bool found = false; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + n = ptr; +#if IS_ENABLED(CONFIG_IPV6) + if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) +#else + if (n->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + m_neigh.dev = n->dev; + m_neigh.family = n->ops->family; + memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + + /* We are in atomic context and can't take RTNL mutex, so use + * spin_lock_bh to lookup the neigh table. bh is used since + * netevent can be called from a softirq context. + */ + spin_lock_bh(&neigh_update->encap_lock); + nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); + if (!nhe) { + spin_unlock_bh(&neigh_update->encap_lock); + return NOTIFY_DONE; + } + + /* This assignment is valid as long as the the neigh reference + * is taken + */ + nhe->n = n; + + /* Take a reference to ensure the neighbour and mlx5 encap + * entry won't be destructed until we drop the reference in + * delayed work. + */ + neigh_hold(n); + mlx5e_rep_neigh_entry_hold(nhe); + + if (!queue_work(priv->wq, &nhe->neigh_update_work)) { + mlx5e_rep_neigh_entry_release(nhe); + neigh_release(n); + } + spin_unlock_bh(&neigh_update->encap_lock); + break; + + case NETEVENT_DELAY_PROBE_TIME_UPDATE: + p = ptr; + + /* We check the device is present since we don't care about + * changes in the default table, we only care about changes + * done per device delay prob time parameter. + */ +#if IS_ENABLED(CONFIG_IPV6) + if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) +#else + if (!p->dev || p->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + /* We are in atomic context and can't take RTNL mutex, + * so use spin_lock_bh to walk the neigh list and look for + * the relevant device. bh is used since netevent can be + * called from a softirq context. + */ + spin_lock_bh(&neigh_update->encap_lock); + list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) { + if (p->dev == nhe->m_neigh.dev) { + found = true; + break; + } + } + spin_unlock_bh(&neigh_update->encap_lock); + if (!found) + return NOTIFY_DONE; + + neigh_update->min_interval = min_t(unsigned long, + NEIGH_VAR(p, DELAY_PROBE_TIME), + neigh_update->min_interval); + mlx5_fc_update_sampling_interval(priv->mdev, + neigh_update->min_interval); + break; + } + return NOTIFY_DONE; +} + +static const struct rhashtable_params mlx5e_neigh_ht_params = { + .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node), + .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh), + .key_len = sizeof(struct mlx5e_neigh), + .automatic_shrinking = true, +}; + +static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + int err; + + err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); + if (err) + return err; + + INIT_LIST_HEAD(&neigh_update->neigh_list); + spin_lock_init(&neigh_update->encap_lock); + INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, + mlx5e_rep_neigh_stats_work); + mlx5e_rep_neigh_update_init_interval(rpriv); + + rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; + err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); + if (err) + goto out_err; + return 0; + +out_err: + rhashtable_destroy(&neigh_update->neigh_ht); + return err; +} + +static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev); + + unregister_netevent_notifier(&neigh_update->netevent_nb); + + flush_workqueue(priv->wq); /* flush neigh update works */ + + cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); + + rhashtable_destroy(&neigh_update->neigh_ht); +} + +static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + int err; + + err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht, + &nhe->rhash_node, + mlx5e_neigh_ht_params); + if (err) + return err; + + list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); + + return err; +} + +static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + spin_lock_bh(&rpriv->neigh_update.encap_lock); + + list_del(&nhe->neigh_list); + + rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, + &nhe->rhash_node, + mlx5e_neigh_ht_params); + spin_unlock_bh(&rpriv->neigh_update.encap_lock); +} + +/* This function must only be called under RTNL lock or under the + * representor's encap_lock in case RTNL mutex can't be held. + */ +static struct mlx5e_neigh_hash_entry * +mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, + struct mlx5e_neigh *m_neigh) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; + + return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, + mlx5e_neigh_ht_params); +} + +static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct mlx5e_neigh_hash_entry **nhe) +{ + int err; + + *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL); + if (!*nhe) + return -ENOMEM; + + memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); + INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); + INIT_LIST_HEAD(&(*nhe)->encap_list); + refcount_set(&(*nhe)->refcnt, 1); + + err = mlx5e_rep_neigh_entry_insert(priv, *nhe); + if (err) + goto out_free; + return 0; + +out_free: + kfree(*nhe); + return err; +} + +static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, + struct mlx5e_neigh_hash_entry *nhe) +{ + /* The neigh hash entry must be removed from the hash table regardless + * of the reference count value, so it won't be found by the next + * neigh notification call. The neigh hash entry reference count is + * incremented only during creation and neigh notification calls and + * protects from freeing the nhe struct. + */ + mlx5e_rep_neigh_entry_remove(priv, nhe); + mlx5e_rep_neigh_entry_release(nhe); +} + +int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_neigh_hash_entry *nhe; + int err; + + nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + if (!nhe) { + err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); + if (err) + return err; + } + list_add(&e->encap_list, &nhe->encap_list); + return 0; +} + +void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_neigh_hash_entry *nhe; + + list_del(&e->encap_list); + nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + + if (list_empty(&nhe->encap_list)) + mlx5e_rep_neigh_entry_destroy(priv, nhe); } static int mlx5e_rep_open(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; @@ -259,7 +627,8 @@ static int mlx5e_rep_open(struct net_device *dev) static int mlx5e_rep_close(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); @@ -271,7 +640,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; int ret; ret = snprintf(buf, len, "%d", rep->vport - 1); @@ -314,18 +684,25 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { - struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep; - if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS) + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + return false; + + rep = rpriv->rep; + if (esw->mode == SRIOV_OFFLOADS && + rep && rep->vport == FDB_UPLINK_VPORT) return true; return false; } -bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) +static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { - struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; @@ -397,42 +774,23 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_offload_stats = mlx5e_get_offload_stats, }; -static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) { - struct mlx5e_priv *priv = netdev_priv(netdev); u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - priv->params.log_sq_size = - MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - - priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, - BIT(priv->params.log_rq_size)); - - priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); - - priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - priv->params.num_tc = 1; - - priv->params.lro_wqe_sz = - MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; - - priv->mdev = mdev; - priv->netdev = netdev; - priv->params.num_channels = profile->max_nch(mdev); - priv->profile = profile; - priv->ppriv = ppriv; + params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - mutex_init(&priv->state_lock); + params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); + params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); + params->num_tc = 1; + params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; } static void mlx5e_build_rep_netdev(struct net_device *netdev) @@ -458,30 +816,39 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, void *ppriv) { - mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv); + struct mlx5e_priv *priv = netdev_priv(netdev); + + priv->mdev = mdev; + priv->netdev = netdev; + priv->profile = profile; + priv->ppriv = ppriv; + + mutex_init(&priv->state_lock); + + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); + + priv->channels.params.num_channels = profile->max_nch(mdev); + mlx5e_build_rep_params(mdev, &priv->channels.params); mlx5e_build_rep_netdev(netdev); } static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_eswitch_rep *rep = priv->ppriv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_flow_handle *flow_rule; int err; - int i; + + mlx5e_init_l2_addr(priv); err = mlx5e_create_direct_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + if (err) return err; - } err = mlx5e_create_direct_tirs(priv); - if (err) { - mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + if (err) goto err_destroy_direct_rqts; - } flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, @@ -503,21 +870,19 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: - for (i = 0; i < priv->params.num_channels; i++) - mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_direct_rqts(priv); return err; } static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { - struct mlx5_eswitch_rep *rep = priv->ppriv; - int i; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; mlx5e_tc_cleanup(priv); mlx5_del_flow_rules(rep->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); - for (i = 0; i < priv->params.num_channels; i++) - mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_direct_rqts(priv); } static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) @@ -546,56 +911,181 @@ static struct mlx5e_profile mlx5e_rep_profile = { .cleanup_tx = mlx5e_cleanup_nic_tx, .update_stats = mlx5e_rep_update_stats, .max_nch = mlx5e_get_rep_max_num_channels, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, + .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */, .max_tc = 1, }; -int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep) +/* e-Switch vport representors */ + +static int +mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = netdev_priv(rep->netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + int err; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + err = mlx5e_add_sqs_fwd_rules(priv); + if (err) + return err; + } + + err = mlx5e_rep_neigh_init(rpriv); + if (err) + goto err_remove_sqs; + + return 0; + +err_remove_sqs: + mlx5e_remove_sqs_fwd_rules(priv); + return err; +} + +static void +mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { + struct mlx5e_priv *priv = netdev_priv(rep->netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_remove_sqs_fwd_rules(priv); + + /* clean (and re-init) existing uplink offloaded TC rules */ + mlx5e_tc_cleanup(priv); + mlx5e_tc_init(priv); + + mlx5e_rep_neigh_cleanup(rpriv); +} + +static int +mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; struct net_device *netdev; int err; - netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep); + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return -ENOMEM; + + netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", rep->vport); + kfree(rpriv); return -EINVAL; } rep->netdev = netdev; + rpriv->rep = rep; - err = mlx5e_attach_netdev(esw->dev, netdev); + err = mlx5e_attach_netdev(netdev_priv(netdev)); if (err) { pr_warn("Failed to attach representor netdev for vport %d\n", rep->vport); goto err_destroy_netdev; } + err = mlx5e_rep_neigh_init(rpriv); + if (err) { + pr_warn("Failed to initialized neighbours handling for vport %d\n", + rep->vport); + goto err_detach_netdev; + } + err = register_netdev(netdev); if (err) { pr_warn("Failed to register representor netdev for vport %d\n", rep->vport); - goto err_detach_netdev; + goto err_neigh_cleanup; } return 0; +err_neigh_cleanup: + mlx5e_rep_neigh_cleanup(rpriv); + err_detach_netdev: - mlx5e_detach_netdev(esw->dev, netdev); + mlx5e_detach_netdev(netdev_priv(netdev)); err_destroy_netdev: - mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev)); - + mlx5e_destroy_netdev(netdev_priv(netdev)); + kfree(rpriv); return err; } -void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep) +static void +mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { struct net_device *netdev = rep->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + void *ppriv = priv->ppriv; + + unregister_netdev(rep->netdev); + + mlx5e_rep_neigh_cleanup(rpriv); + mlx5e_detach_netdev(priv); + mlx5e_destroy_netdev(priv); + kfree(ppriv); /* mlx5e_rep_priv */ +} + +static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + u8 mac[ETH_ALEN]; + + mlx5_query_nic_vport_mac_address(mdev, 0, mac); + + for (vport = 1; vport < total_vfs; vport++) { + struct mlx5_eswitch_rep rep; + + rep.load = mlx5e_vport_rep_load; + rep.unload = mlx5e_vport_rep_unload; + rep.vport = vport; + ether_addr_copy(rep.hw_id, mac); + mlx5_eswitch_register_vport_rep(esw, vport, &rep); + } +} + +static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); +} + +void mlx5e_register_vport_reps(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep rep; + + mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); + rep.load = mlx5e_nic_rep_load; + rep.unload = mlx5e_nic_rep_unload; + rep.vport = FDB_UPLINK_VPORT; + rep.netdev = priv->netdev; + mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/ + + mlx5e_rep_register_vf_vports(priv); /* VFs vports */ +} + +void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; - unregister_netdev(netdev); - mlx5e_detach_netdev(esw->dev, netdev); - mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev)); + mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ + mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h new file mode 100644 index 0000000000000000000000000000000000000000..a0a1a7a1d6c0e835fbe284bb50a15047d33b961e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5E_REP_H__ +#define __MLX5E_REP_H__ + +#include +#include +#include "eswitch.h" +#include "en.h" + +struct mlx5e_neigh_update_table { + struct rhashtable neigh_ht; + /* Save the neigh hash entries in a list in addition to the hash table + * (neigh_ht). In order to iterate easily over the neigh entries. + * Used for stats query. + */ + struct list_head neigh_list; + /* protect lookup/remove operations */ + spinlock_t encap_lock; + struct notifier_block netevent_nb; + struct delayed_work neigh_stats_work; + unsigned long min_interval; /* jiffies */ +}; + +struct mlx5e_rep_priv { + struct mlx5_eswitch_rep *rep; + struct mlx5e_neigh_update_table neigh_update; +}; + +struct mlx5e_neigh { + struct net_device *dev; + union { + __be32 v4; + struct in6_addr v6; + } dst_ip; + int family; +}; + +struct mlx5e_neigh_hash_entry { + struct rhash_head rhash_node; + struct mlx5e_neigh m_neigh; + + /* Save the neigh hash entry in a list on the representor in + * addition to the hash table. In order to iterate easily over the + * neighbour entries. Used for stats query. + */ + struct list_head neigh_list; + + /* encap list sharing the same neigh */ + struct list_head encap_list; + + /* valid only when the neigh reference is taken during + * neigh_update_work workqueue callback. + */ + struct neighbour *n; + struct work_struct neigh_update_work; + + /* neigh hash entry can be deleted only when the refcount is zero. + * refcount is needed to avoid neigh hash entry removal by TC, while + * it's used by the neigh notification call. + */ + refcount_t refcnt; + + /* Save the last reported time offloaded trafic pass over one of the + * neigh hash entry flows. Use it to periodically update the neigh + * 'used' value and avoid neigh deleting by the kernel. + */ + unsigned long reported_lastuse; +}; + +enum { + /* set when the encap entry is successfully offloaded into HW */ + MLX5_ENCAP_ENTRY_VALID = BIT(0), +}; + +struct mlx5e_encap_entry { + /* neigh hash entry list of encaps sharing the same neigh */ + struct list_head encap_list; + struct mlx5e_neigh m_neigh; + /* a node of the eswitch encap hash table which keeping all the encap + * entries + */ + struct hlist_node encap_hlist; + struct list_head flows; + u32 encap_id; + struct ip_tunnel_info tun_info; + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + + struct net_device *out_dev; + int tunnel_type; + u8 flags; + char *encap_header; + int encap_size; +}; + +void mlx5e_register_vport_reps(struct mlx5e_priv *priv); +void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); +bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); +int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); + +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp); +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id); + +int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); +void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + +int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); +void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); + +void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); + +#endif /* __MLX5E_REP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index bafcb349a50c6d3809aa329d43382e51dbd3ffe4..7b1566f0ae58c7c64e3313ed4f6f3fb83bafee5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -39,6 +39,8 @@ #include "en.h" #include "en_tc.h" #include "eswitch.h" +#include "en_rep.h" +#include "ipoib.h" static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) { @@ -156,28 +158,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) -{ - bool was_opened; - - if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) - return; - - if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) - return; - - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(priv->netdev); - - MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val); - mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type); - - if (was_opened) - mlx5e_open_locked(priv->netdev); - -} - #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, @@ -331,7 +311,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - struct mlx5e_sq *sq = &rq->channel->icosq; + struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *wqe; u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); @@ -341,7 +321,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; sq->db.ico_wqe[pi].num_wqebbs = 1; - mlx5e_send_nop(sq, false); + mlx5e_post_nop(wq, sq->sqn, &sq->pc); } wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -353,7 +333,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs; sq->pc += num_wqebbs; - mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); } static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, @@ -637,37 +617,36 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } -static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) +static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *wqe; - u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */ + u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */ wqe = mlx5_wq_cyc_get_wqe(wq, pi); - wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); } static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, const struct xdp_buff *xdp) { - struct mlx5e_sq *sq = &rq->channel->xdp_sq; + struct mlx5e_xdpsq *sq = &rq->xdpsq; struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; + u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi]; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; - u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT; ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; dma_addr_t dma_addr = di->addr + data_offset; unsigned int dma_len = xdp->data_end - xdp->data; + prefetchw(wqe); + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { rq->stats.xdp_drop++; @@ -675,48 +654,42 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, return false; } - if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { - if (sq->db.xdp.doorbell) { + if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { + if (sq->db.doorbell) { /* SQ is full, ring doorbell */ mlx5e_xmit_xdp_doorbell(sq); - sq->db.xdp.doorbell = false; + sq->db.doorbell = false; } rq->stats.xdp_tx_full++; mlx5e_page_release(rq, di, true); return false; } - dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); - memset(wqe, 0, sizeof(*wqe)); + cseg->fm_ce_se = 0; dseg = (struct mlx5_wqe_data_seg *)eseg + 1; + /* copy the inline part if required */ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); dma_len -= MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE; - - ds_cnt += MLX5E_XDP_IHS_DS_COUNT; dseg++; } /* write the dma part */ dseg->addr = cpu_to_be64(dma_addr); dseg->byte_count = cpu_to_be32(dma_len); - dseg->lkey = sq->mkey_be; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - sq->db.xdp.di[pi] = *di; - wi->opcode = MLX5_OPCODE_SEND; - wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS; - sq->pc += MLX5E_XDP_TX_WQEBBS; + sq->db.di[pi] = *di; + sq->pc++; - sq->db.xdp.doorbell = true; + sq->db.doorbell = true; rq->stats.xdp_tx++; return true; } @@ -837,7 +810,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; __be16 wqe_counter_be; @@ -932,7 +906,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto mpwrq_cqe_out; } - prefetch(skb->data); + prefetchw(skb->data); cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); @@ -950,7 +924,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq; + struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; int work_done = 0; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) @@ -977,9 +951,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) rq->handle_rx_cqe(rq, cqe); } - if (xdp_sq->db.xdp.doorbell) { - mlx5e_xmit_xdp_doorbell(xdp_sq); - xdp_sq->db.xdp.doorbell = false; + if (xdpsq->db.doorbell) { + mlx5e_xmit_xdp_doorbell(xdpsq); + xdpsq->db.doorbell = false; } mlx5_cqwq_update_db_record(&cq->wq); @@ -989,3 +963,152 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) return work_done; } + +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_xdpsq *sq; + struct mlx5e_rq *rq; + u16 sqcc; + int i; + + sq = container_of(cq, struct mlx5e_xdpsq, cq); + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return false; + + rq = container_of(sq, struct mlx5e_rq, xdpsq); + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { + struct mlx5_cqe64 *cqe; + u16 wqe_counter; + bool last_wqe; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + + do { + struct mlx5e_dma_info *di; + u16 ci; + + last_wqe = (sqcc == wqe_counter); + + ci = sqcc & sq->wq.sz_m1; + di = &sq->db.di[ci]; + + sqcc++; + /* Recycle RX page */ + mlx5e_page_release(rq, di, true); + } while (!last_wqe); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc = sqcc; + return (i == MLX5E_TX_CQ_POLL_BUDGET); +} + +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) +{ + struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); + struct mlx5e_dma_info *di; + u16 ci; + + while (sq->cc != sq->pc) { + ci = sq->cc & sq->wq.sz_m1; + di = &sq->db.di[ci]; + sq->cc++; + + mlx5e_page_release(rq, di, false); + } +} + +#ifdef CONFIG_MLX5_CORE_IPOIB + +#define MLX5_IB_GRH_DGID_OFFSET 24 +#define MLX5_IB_GRH_BYTES 40 +#define MLX5_IPOIB_ENCAP_LEN 4 +#define MLX5_GID_SIZE 16 + +static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + struct net_device *netdev = rq->netdev; + u8 *dgid; + u8 g; + + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; + if ((!g) || dgid[0] != 0xff) + skb->pkt_type = PACKET_HOST; + else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + + /* TODO: IB/ipoib: Allow mcast packets from other VFs + * 68996a6e760e5c74654723eeb57bf65628ae87f4 + */ + + skb_pull(skb, MLX5_IB_GRH_BYTES); + + skb->protocol = *((__be16 *)(skb->data)); + + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + + skb_record_rx_queue(skb, rq->ix); + + if (likely(netdev->features & NETIF_F_RXHASH)) + mlx5e_skb_set_hash(cqe, skb); + + skb_reset_mac_header(skb); + skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); + + skb->dev = netdev; + + rq->stats.csum_complete++; + rq->stats.packets++; + rq->stats.bytes += cqe_bcnt; +} + +void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5e_rx_wqe *wqe; + __be16 wqe_counter_be; + struct sk_buff *skb; + u16 wqe_counter; + u32 cqe_bcnt; + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + + skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt); + if (!skb) + goto wq_ll_pop; + + mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + napi_gro_receive(rq->cq.napi, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); +} + +#endif /* CONFIG_MLX5_CORE_IPOIB */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index cbfac06b7ffd1d5140226ccb87331db57d4880d8..02dd3a95ed8f013d0d4795d5054bd79ae4ca1201 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -293,7 +293,7 @@ void mlx5e_rx_am_work(struct work_struct *work) struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; - mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq, + mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, cur_profile.usec, cur_profile.pkts); am->state = MLX5E_AM_START_MEASURE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5621dcfda4f1868c6bccdff9cd220b7a43d46dd3..5225f2226a67cc25761d265e3d5f69b510d0df3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -236,12 +236,9 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, { int err = 0; - err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true); - if (err) { - netdev_err(priv->netdev, - "\tFailed to enable UC loopback err(%d)\n", err); + err = mlx5e_refresh_tirs(priv, true); + if (err) return err; - } lbtp->loopback_ok = false; init_completion(&lbtp->comp); @@ -258,7 +255,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, struct mlx5e_lbt_priv *lbtp) { dev_remove_pack(&lbtp->pt); - mlx5e_refresh_tirs_self_loopback(priv->mdev, false); + mlx5e_refresh_tirs(priv, false); } #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 5436866798f447eef43dac9af242decae4ed6017..11c27e4fadf6e09400a432c5498749df73265477 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -42,14 +42,25 @@ #include #include #include +#include #include +#include #include "en.h" +#include "en_rep.h" #include "en_tc.h" #include "eswitch.h" #include "vxlan.h" +struct mlx5_nic_flow_attr { + u32 action; + u32 flow_tag; + u32 mod_hdr_id; +}; + enum { MLX5E_TC_FLOW_ESWITCH = BIT(0), + MLX5E_TC_FLOW_NIC = BIT(1), + MLX5E_TC_FLOW_OFFLOADED = BIT(2), }; struct mlx5e_tc_flow { @@ -58,7 +69,16 @@ struct mlx5e_tc_flow { u8 flags; struct mlx5_flow_handle *rule; struct list_head encap; /* flows sharing the same encap */ - struct mlx5_esw_flow_attr *attr; + union { + struct mlx5_esw_flow_attr esw_attr[0]; + struct mlx5_nic_flow_attr nic_attr[0]; + }; +}; + +struct mlx5e_tc_flow_parse_attr { + struct mlx5_flow_spec spec; + int num_mod_hdr_actions; + void *mod_hdr_actions; }; enum { @@ -71,24 +91,26 @@ enum { static struct mlx5_flow_handle * mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - u32 action, u32 flow_tag) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) { + struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; - struct mlx5_flow_destination dest = { 0 }; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = { - .action = action, - .flow_tag = flow_tag, + .action = attr->action, + .flow_tag = attr->flow_tag, .encap_id = 0, }; struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; bool table_created = false; + int err; - if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = priv->fs.vlan.ft.t; - } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); if (IS_ERR(counter)) return ERR_CAST(counter); @@ -97,6 +119,19 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, dest.counter = counter; } + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr->num_mod_hdr_actions, + parse_attr->mod_hdr_actions, + &attr->mod_hdr_id); + flow_act.modify_id = attr->mod_hdr_id; + kfree(parse_attr->mod_hdr_actions); + if (err) { + rule = ERR_PTR(err); + goto err_create_mod_hdr_id; + } + } + if (IS_ERR_OR_NULL(priv->fs.tc.t)) { priv->fs.tc.t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, @@ -114,8 +149,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, table_created = true; } - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1); + parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, + &flow_act, &dest, 1); if (IS_ERR(rule)) goto err_add_rule; @@ -128,6 +164,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, priv->fs.tc.t = NULL; } err_create_ft: + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + mlx5_modify_header_dealloc(priv->mdev, + attr->mod_hdr_id); +err_create_mod_hdr_id: mlx5_fc_destroy(dev, counter); return rule; @@ -138,47 +178,195 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, { struct mlx5_fc *counter = NULL; - if (!IS_ERR(flow->rule)) { - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); - mlx5_fc_destroy(priv->mdev, counter); - } + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } + + if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + mlx5_modify_header_dealloc(priv->mdev, + flow->nic_attr->mod_hdr_id); } +static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + static struct mlx5_flow_handle * mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - struct mlx5_esw_flow_attr *attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5_flow_handle *rule; int err; err = mlx5_eswitch_add_vlan_action(esw, attr); - if (err) - return ERR_PTR(err); + if (err) { + rule = ERR_PTR(err); + goto err_add_vlan; + } - return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); -} + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB, + parse_attr->num_mod_hdr_actions, + parse_attr->mod_hdr_actions, + &attr->mod_hdr_id); + kfree(parse_attr->mod_hdr_actions); + if (err) { + rule = ERR_PTR(err); + goto err_mod_hdr; + } + } -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow); + rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); + if (IS_ERR(rule)) + goto err_add_rule; + + return rule; + +err_add_rule: + if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + mlx5_modify_header_dealloc(priv->mdev, + attr->mod_hdr_id); +err_mod_hdr: + mlx5_eswitch_del_vlan_action(esw, attr); +err_add_vlan: + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + mlx5e_detach_encap(priv, flow); + return rule; +} static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->esw_attr; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); + } - mlx5_eswitch_del_vlan_action(esw, flow->attr); + mlx5_eswitch_del_vlan_action(esw, flow->esw_attr); - if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { mlx5e_detach_encap(priv, flow); + kvfree(flow->esw_attr->parse_attr); + } + + if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + mlx5_modify_header_dealloc(priv->mdev, + attr->mod_hdr_id); +} + +void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_tc_flow *flow; + int err; + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + e->encap_size, e->encap_header, + &e->encap_id); + if (err) { + mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n", + err); + return; + } + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(priv); + + list_for_each_entry(flow, &e->flows, encap) { + flow->esw_attr->encap_id = e->encap_id; + flow->rule = mlx5e_tc_add_fdb_flow(priv, + flow->esw_attr->parse_attr, + flow); + if (IS_ERR(flow->rule)) { + err = PTR_ERR(flow->rule); + mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", + err); + continue; + } + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + } +} + +void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_tc_flow *flow; + struct mlx5_fc *counter; + + list_for_each_entry(flow, &e->flows, encap) { + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); + } + } + + if (e->flags & MLX5_ENCAP_ENTRY_VALID) { + e->flags &= ~MLX5_ENCAP_ENTRY_VALID; + mlx5_encap_dealloc(priv->mdev, e->encap_id); + } +} + +void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_neigh *m_neigh = &nhe->m_neigh; + u64 bytes, packets, lastuse = 0; + struct mlx5e_tc_flow *flow; + struct mlx5e_encap_entry *e; + struct mlx5_fc *counter; + struct neigh_table *tbl; + bool neigh_used = false; + struct neighbour *n; + + if (m_neigh->family == AF_INET) + tbl = &arp_tbl; +#if IS_ENABLED(CONFIG_IPV6) + else if (m_neigh->family == AF_INET6) + tbl = ipv6_stub->nd_tbl; +#endif + else + return; + + list_for_each_entry(e, &nhe->encap_list, encap_list) { + if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) + continue; + list_for_each_entry(flow, &e->flows, encap) { + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { + neigh_used = true; + break; + } + } + } + } + + if (neigh_used) { + nhe->reported_lastuse = jiffies; + + /* find the relevant neigh according to the cached device and + * dst ip pair + */ + n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); + if (!n) { + WARN(1, "The neighbour already freed\n"); + return; + } + + neigh_event_send(n, NULL); + neigh_release(n); + } } static void mlx5e_detach_encap(struct mlx5e_priv *priv, @@ -188,22 +376,20 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, list_del(&flow->encap); if (list_empty(next)) { - struct mlx5_encap_entry *e; + struct mlx5e_encap_entry *e; + + e = list_entry(next, struct mlx5e_encap_entry, flows); + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); - e = list_entry(next, struct mlx5_encap_entry, flows); - if (e->n) { + if (e->flags & MLX5_ENCAP_ENTRY_VALID) mlx5_encap_dealloc(priv->mdev, e->encap_id); - neigh_release(e->n); - } + hlist_del_rcu(&e->encap_hlist); + kfree(e->encap_header); kfree(e); } } -/* we get here also when setting rule to the FW failed, etc. It means that the - * flow rule itself might not exist, but some offloading related to the actions - * should be cleaned. - */ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { @@ -631,16 +817,18 @@ static int parse_cls_flower(struct mlx5e_priv *priv, { struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep; u8 min_inline; int err; err = __parse_cls_flower(priv, spec, f, &min_inline); - if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && - rep->vport != FDB_UPLINK_VPORT) { - if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < min_inline) { + if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { + rep = rpriv->rep; + if (rep->vport != FDB_UPLINK_VPORT && + (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && + esw->offloads.inline_mode < min_inline)) { netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", min_inline, esw->offloads.inline_mode); @@ -651,29 +839,313 @@ static int parse_cls_flower(struct mlx5e_priv *priv, return err; } +struct pedit_headers { + struct ethhdr eth; + struct iphdr ip4; + struct ipv6hdr ip6; + struct tcphdr tcp; + struct udphdr udp; +}; + +static int pedit_header_offsets[] = { + [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), + [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), + [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), + [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), + [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), +}; + +#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) + +static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, + struct pedit_headers *masks, + struct pedit_headers *vals) +{ + u32 *curr_pmask, *curr_pval; + + if (hdr_type >= __PEDIT_HDR_TYPE_MAX) + goto out_err; + + curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset); + curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset); + + if (*curr_pmask & mask) /* disallow acting twice on the same location */ + goto out_err; + + *curr_pmask |= mask; + *curr_pval |= (val & mask); + + return 0; + +out_err: + return -EOPNOTSUPP; +} + +struct mlx5_fields { + u8 field; + u8 size; + u32 offset; +}; + +static struct mlx5_fields fields[] = { + {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])}, + {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])}, + {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])}, + {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, + {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, + + {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, + {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, + {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, + {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, + + {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])}, + {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])}, + {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])}, + {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])}, + {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])}, + {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])}, + {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])}, + {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])}, + + {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)}, + {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)}, + {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5}, + + {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)}, + {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)}, +}; + +/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at + * max from the SW pedit action. On success, it says how many HW actions were + * actually parsed. + */ +static int offload_pedit_fields(struct pedit_headers *masks, + struct pedit_headers *vals, + struct mlx5e_tc_flow_parse_attr *parse_attr) +{ + struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; + int i, action_size, nactions, max_actions, first, last; + void *s_masks_p, *a_masks_p, *vals_p; + u32 s_mask, a_mask, val; + struct mlx5_fields *f; + u8 cmd, field_bsize; + unsigned long mask; + void *action; + + set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET]; + add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD]; + set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET]; + add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; + + action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + action = parse_attr->mod_hdr_actions; + max_actions = parse_attr->num_mod_hdr_actions; + nactions = 0; + + for (i = 0; i < ARRAY_SIZE(fields); i++) { + f = &fields[i]; + /* avoid seeing bits set from previous iterations */ + s_mask = a_mask = mask = val = 0; + + s_masks_p = (void *)set_masks + f->offset; + a_masks_p = (void *)add_masks + f->offset; + + memcpy(&s_mask, s_masks_p, f->size); + memcpy(&a_mask, a_masks_p, f->size); + + if (!s_mask && !a_mask) /* nothing to offload here */ + continue; + + if (s_mask && a_mask) { + printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); + return -EOPNOTSUPP; + } + + if (nactions == max_actions) { + printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); + return -EOPNOTSUPP; + } + + if (s_mask) { + cmd = MLX5_ACTION_TYPE_SET; + mask = s_mask; + vals_p = (void *)set_vals + f->offset; + /* clear to denote we consumed this field */ + memset(s_masks_p, 0, f->size); + } else { + cmd = MLX5_ACTION_TYPE_ADD; + mask = a_mask; + vals_p = (void *)add_vals + f->offset; + /* clear to denote we consumed this field */ + memset(a_masks_p, 0, f->size); + } + + memcpy(&val, vals_p, f->size); + + field_bsize = f->size * BITS_PER_BYTE; + first = find_first_bit(&mask, field_bsize); + last = find_last_bit(&mask, field_bsize); + if (first > 0 || last != (field_bsize - 1)) { + printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", + mask); + return -EOPNOTSUPP; + } + + MLX5_SET(set_action_in, action, action_type, cmd); + MLX5_SET(set_action_in, action, field, f->field); + + if (cmd == MLX5_ACTION_TYPE_SET) { + MLX5_SET(set_action_in, action, offset, 0); + /* length is num of bits to be written, zero means length of 32 */ + MLX5_SET(set_action_in, action, length, field_bsize); + } + + if (field_bsize == 32) + MLX5_SET(set_action_in, action, data, ntohl(val)); + else if (field_bsize == 16) + MLX5_SET(set_action_in, action, data, ntohs(val)); + else if (field_bsize == 8) + MLX5_SET(set_action_in, action, data, val); + + action += action_size; + nactions++; + } + + parse_attr->num_mod_hdr_actions = nactions; + return 0; +} + +static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, + const struct tc_action *a, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr) +{ + int nkeys, action_size, max_actions; + + nkeys = tcf_pedit_nkeys(a); + action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + + if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ + max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions); + else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ + max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions); + + /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */ + max_actions = min(max_actions, nkeys * 16); + + parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL); + if (!parse_attr->mod_hdr_actions) + return -ENOMEM; + + parse_attr->num_mod_hdr_actions = max_actions; + return 0; +} + +static const struct pedit_headers zero_masks = {}; + +static int parse_tc_pedit_action(struct mlx5e_priv *priv, + const struct tc_action *a, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr) +{ + struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; + int nkeys, i, err = -EOPNOTSUPP; + u32 mask, val, offset; + u8 cmd, htype; + + nkeys = tcf_pedit_nkeys(a); + + memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); + memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); + + for (i = 0; i < nkeys; i++) { + htype = tcf_pedit_htype(a, i); + cmd = tcf_pedit_cmd(a, i); + err = -EOPNOTSUPP; /* can't be all optimistic */ + + if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { + printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n"); + goto out_err; + } + + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { + printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd); + goto out_err; + } + + mask = tcf_pedit_mask(a, i); + val = tcf_pedit_val(a, i); + offset = tcf_pedit_offset(a, i); + + err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]); + if (err) + goto out_err; + } + + err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); + if (err) + goto out_err; + + err = offload_pedit_fields(masks, vals, parse_attr); + if (err < 0) + goto out_dealloc_parsed_actions; + + for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { + cmd_masks = &masks[cmd]; + if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { + printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n", + cmd); + print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, + 16, 1, cmd_masks, sizeof(zero_masks), true); + err = -EOPNOTSUPP; + goto out_dealloc_parsed_actions; + } + } + + return 0; + +out_dealloc_parsed_actions: + kfree(parse_attr->mod_hdr_actions); +out_err: + return err; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, - u32 *action, u32 *flow_tag) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) { + struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; LIST_HEAD(actions); + int err; if (tc_no_actions(exts)) return -EINVAL; - *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - *action = 0; + attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + attr->action = 0; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { /* Only support a single action per rule */ - if (*action) + if (attr->action) return -EINVAL; if (is_tcf_gact_shot(a)) { - *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.flow_counter)) - *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + continue; + } + + if (is_tcf_pedit(a)) { + err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr); + if (err) + return err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } @@ -686,8 +1158,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EINVAL; } - *flow_tag = mark; - *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + attr->flow_tag = mark; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } @@ -853,16 +1325,17 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev, static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct mlx5_encap_entry *e, - struct net_device **out_dev) + struct mlx5e_encap_entry *e) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN; struct ip_tunnel_key *tun_key = &e->tun_info.key; + struct net_device *out_dev; struct neighbour *n = NULL; struct flowi4 fl4 = {}; char *encap_header; int ttl, err; + u8 nud_state; if (max_encap_size < ipv4_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -887,25 +1360,36 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, fl4.daddr = tun_key->u.ipv4.dst; fl4.saddr = tun_key->u.ipv4.src; - err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, + err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &fl4, &n, &ttl); if (err) goto out; - if (!(n->nud_state & NUD_VALID)) { - pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); - err = -EOPNOTSUPP; + /* used by mlx5e_detach_encap to lookup a neigh hash table + * entry in the neigh hash table when a user deletes a rule + */ + e->m_neigh.dev = n->dev; + e->m_neigh.family = n->ops->family; + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + e->out_dev = out_dev; + + /* It's importent to add the neigh to the hash table before checking + * the neigh validity state. So if we'll get a notification, in case the + * neigh changes it's validity state, we would find the relevant neigh + * in the hash. + */ + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + if (err) goto out; - } - e->n = n; - e->out_dev = *out_dev; - - neigh_ha_snapshot(e->h_dest, n, *out_dev); + read_lock_bh(&n->lock); + nud_state = n->nud_state; + ether_addr_copy(e->h_dest, n->ha); + read_unlock_bh(&n->lock); switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: - gen_vxlan_header_ipv4(*out_dev, encap_header, + gen_vxlan_header_ipv4(out_dev, encap_header, ipv4_encap_size, e->h_dest, ttl, fl4.daddr, fl4.saddr, tun_key->tp_dst, @@ -913,31 +1397,49 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, break; default: err = -EOPNOTSUPP; - goto out; + goto destroy_neigh_entry; + } + e->encap_size = ipv4_encap_size; + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + neigh_release(n); + return -EAGAIN; } err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, ipv4_encap_size, encap_header, &e->encap_id); + if (err) + goto destroy_neigh_entry; + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); + neigh_release(n); + return err; + +destroy_neigh_entry: + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); out: - if (err && n) - neigh_release(n); kfree(encap_header); + if (n) + neigh_release(n); return err; } static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, - struct mlx5_encap_entry *e, - struct net_device **out_dev) - + struct mlx5e_encap_entry *e) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; struct ip_tunnel_key *tun_key = &e->tun_info.key; + struct net_device *out_dev; struct neighbour *n = NULL; struct flowi6 fl6 = {}; char *encap_header; int err, ttl = 0; + u8 nud_state; if (max_encap_size < ipv6_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -963,25 +1465,36 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, fl6.daddr = tun_key->u.ipv6.dst; fl6.saddr = tun_key->u.ipv6.src; - err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev, + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &fl6, &n, &ttl); if (err) goto out; - if (!(n->nud_state & NUD_VALID)) { - pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr); - err = -EOPNOTSUPP; + /* used by mlx5e_detach_encap to lookup a neigh hash table + * entry in the neigh hash table when a user deletes a rule + */ + e->m_neigh.dev = n->dev; + e->m_neigh.family = n->ops->family; + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + e->out_dev = out_dev; + + /* It's importent to add the neigh to the hash table before checking + * the neigh validity state. So if we'll get a notification, in case the + * neigh changes it's validity state, we would find the relevant neigh + * in the hash. + */ + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + if (err) goto out; - } - - e->n = n; - e->out_dev = *out_dev; - neigh_ha_snapshot(e->h_dest, n, *out_dev); + read_lock_bh(&n->lock); + nud_state = n->nud_state; + ether_addr_copy(e->h_dest, n->ha); + read_unlock_bh(&n->lock); switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: - gen_vxlan_header_ipv6(*out_dev, encap_header, + gen_vxlan_header_ipv6(out_dev, encap_header, ipv6_encap_size, e->h_dest, ttl, &fl6.daddr, &fl6.saddr, tun_key->tp_dst, @@ -989,31 +1502,51 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, break; default: err = -EOPNOTSUPP; - goto out; + goto destroy_neigh_entry; + } + + e->encap_size = ipv6_encap_size; + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + neigh_release(n); + return -EAGAIN; } err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, ipv6_encap_size, encap_header, &e->encap_id); + if (err) + goto destroy_neigh_entry; + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); + neigh_release(n); + return err; + +destroy_neigh_entry: + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); out: - if (err && n) - neigh_release(n); kfree(encap_header); + if (n) + neigh_release(n); return err; } static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, - struct mlx5_esw_flow_attr *attr) + struct net_device **encap_dev, + struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); - struct mlx5e_priv *up_priv = netdev_priv(up_dev); unsigned short family = ip_tunnel_info_af(tun_info); + struct mlx5e_priv *up_priv = netdev_priv(up_dev); + struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct ip_tunnel_key *key = &tun_info->key; - struct mlx5_encap_entry *e; - struct net_device *out_dev; - int tunnel_type, err = -EOPNOTSUPP; + struct mlx5e_encap_entry *e; + int tunnel_type, err = 0; uintptr_t hash_key; bool found = false; @@ -1048,10 +1581,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, } } - if (found) { - attr->encap = e; - return 0; - } + if (found) + goto attach_flow; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) @@ -1062,16 +1593,21 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, INIT_LIST_HEAD(&e->flows); if (family == AF_INET) - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) - err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev); + err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e); - if (err) + if (err && err != -EAGAIN) goto out_err; - attr->encap = e; hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); +attach_flow: + list_add(&flow->encap, &e->flows); + *encap_dev = e->out_dev; + if (e->flags & MLX5_ENCAP_ENTRY_VALID) + attr->encap_id = e->encap_id; + return err; out_err: @@ -1080,20 +1616,22 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, } static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) { - struct mlx5_esw_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_rep_priv *rpriv = priv->ppriv; struct ip_tunnel_info *info = NULL; const struct tc_action *a; LIST_HEAD(actions); bool encap = false; - int err; + int err = 0; if (tc_no_actions(exts)) return -EINVAL; memset(attr, 0, sizeof(*attr)); - attr->in_rep = priv->ppriv; + attr->in_rep = rpriv->rep; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { @@ -1103,9 +1641,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } + if (is_tcf_pedit(a)) { + err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, + parse_attr); + if (err) + return err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + continue; + } + if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); - struct net_device *out_dev; + struct net_device *out_dev, *encap_dev = NULL; struct mlx5e_priv *out_priv; out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); @@ -1115,18 +1663,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); - attr->out_rep = out_priv->ppriv; + rpriv = out_priv->ppriv; + attr->out_rep = rpriv->rep; } else if (encap) { err = mlx5e_attach_encap(priv, info, - out_dev, attr); - if (err) + out_dev, &encap_dev, flow); + if (err && err != -EAGAIN) return err; - list_add(&flow->encap, &attr->encap->flows); attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - out_priv = netdev_priv(attr->encap->out_dev); - attr->out_rep = out_priv->ppriv; + out_priv = netdev_priv(encap_dev); + rpriv = out_priv->ppriv; + attr->out_rep = rpriv->rep; + attr->parse_attr = parse_attr; } else { pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); @@ -1166,28 +1716,30 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EINVAL; } - return 0; + return err; } int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_table *tc = &priv->fs.tc; - int err, attr_size = 0; - u32 flow_tag, action; struct mlx5e_tc_flow *flow; - struct mlx5_flow_spec *spec; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int attr_size, err = 0; u8 flow_flags = 0; if (esw && esw->mode == SRIOV_OFFLOADS) { flow_flags = MLX5E_TC_FLOW_ESWITCH; attr_size = sizeof(struct mlx5_esw_flow_attr); + } else { + flow_flags = MLX5E_TC_FLOW_NIC; + attr_size = sizeof(struct mlx5_nic_flow_attr); } flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); - spec = mlx5_vzalloc(sizeof(*spec)); - if (!spec || !flow) { + parse_attr = mlx5_vzalloc(sizeof(*parse_attr)); + if (!parse_attr || !flow) { err = -ENOMEM; goto err_free; } @@ -1195,42 +1747,54 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, flow->cookie = f->cookie; flow->flags = flow_flags; - err = parse_cls_flower(priv, flow, spec, f); + err = parse_cls_flower(priv, flow, &parse_attr->spec, f); if (err < 0) goto err_free; if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); - err = parse_tc_fdb_actions(priv, f->exts, flow); + err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); if (err < 0) - goto err_free; - flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr); + goto err_handle_encap_flow; + flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); } else { - err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); + err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag); + flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); } if (IS_ERR(flow->rule)) { err = PTR_ERR(flow->rule); - goto err_del_rule; + goto err_free; } + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); if (err) goto err_del_rule; - goto out; + if (flow->flags & MLX5E_TC_FLOW_ESWITCH && + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + kvfree(parse_attr); + return err; err_del_rule: mlx5e_tc_del_flow(priv, flow); +err_handle_encap_flow: + if (err == -EAGAIN) { + err = rhashtable_insert_fast(&tc->ht, &flow->node, + tc->ht_params); + if (err) + mlx5e_tc_del_flow(priv, flow); + else + return 0; + } + err_free: + kvfree(parse_attr); kfree(flow); -out: - kvfree(spec); return err; } @@ -1249,7 +1813,6 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, mlx5e_tc_del_flow(priv, flow); - kfree(flow); return 0; @@ -1272,6 +1835,9 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, if (!flow) return -EINVAL; + if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) + return 0; + counter = mlx5_flow_rule_counter(flow->rule); if (!counter) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 34bf903fc8863b14367afdb674d96ee5ec5468bc..ecbe30d808ae0c1712a31a0d8b1f425236595255 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -46,6 +46,15 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, int mlx5e_stats_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); +struct mlx5e_encap_entry; +void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); +void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); + +struct mlx5e_neigh_hash_entry; +void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); + static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return atomic_read(&priv->fs.tc.ht.nelems); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 57f5e2d7ebd1a91a1a1cf618e60caf1f015914a3..ab3bb026ff9e8a92632e607d998665f1d8bf03bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -33,34 +33,12 @@ #include #include #include "en.h" +#include "ipoib.h" #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ MLX5E_SQ_NOPS_ROOM) -void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) -{ - struct mlx5_wq_cyc *wq = &sq->wq; - - u16 pi = sq->pc & wq->sz_m1; - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - - memset(cseg, 0, sizeof(*cseg)); - - cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); - - sq->pc++; - sq->stats.nop++; - - if (notify_hw) { - cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); - } -} - static inline void mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) { @@ -76,25 +54,25 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev, } } -static inline void mlx5e_dma_push(struct mlx5e_sq *sq, +static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, enum mlx5e_dma_map_type map_type) { u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; - sq->db.txq.dma_fifo[i].addr = addr; - sq->db.txq.dma_fifo[i].size = size; - sq->db.txq.dma_fifo[i].type = map_type; + sq->db.dma_fifo[i].addr = addr; + sq->db.dma_fifo[i].size = size; + sq->db.dma_fifo[i].type = map_type; sq->dma_fifo_pc++; } -static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) +static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) { - return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask]; + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; } -static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) { int i; @@ -111,6 +89,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); + u16 num_channels; int up = 0; if (!netdev_get_num_tc(dev)) @@ -122,11 +101,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, /* channel_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc */ - if (channel_ix >= priv->params.num_channels) - channel_ix = reciprocal_scale(channel_ix, - priv->params.num_channels); + num_channels = priv->channels.params.num_channels; + if (channel_ix >= num_channels) + channel_ix = reciprocal_scale(channel_ix, num_channels); - return priv->channeltc_to_txq_map[channel_ix][up]; + return priv->channel_tc2txq[channel_ix][up]; } static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) @@ -175,25 +154,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, } } -static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, - struct sk_buff *skb, bool bf) -{ - /* Some NIC TX decisions, e.g loopback, are based on the packet - * headers and occur before the data gather. - * Therefore these headers must be copied into the WQE - */ - if (bf) { - u16 ihs = skb_headlen(skb); - - if (skb_vlan_tag_present(skb)) - ihs += VLAN_HLEN; - - if (ihs <= sq->max_inline) - return skb_headlen(skb); - } - return mlx5e_calc_min_inline(sq->min_inline_mode, skb); -} - static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, unsigned int *skb_len, unsigned int len) @@ -218,31 +178,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); } -static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) +static inline void +mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { - struct mlx5_wq_cyc *wq = &sq->wq; - - u16 pi = sq->pc & wq->sz_m1; - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi]; - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - struct mlx5_wqe_data_seg *dseg; - - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - dma_addr_t dma_addr = 0; - unsigned int num_bytes; - bool bf = false; - u16 headlen; - u16 ds_cnt; - u16 ihs; - int i; - - memset(wqe, 0, sizeof(*wqe)); - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; if (skb->encapsulation) { @@ -254,74 +192,51 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) } } else sq->stats.csum_none++; +} - if (sq->cc != sq->prev_cc) { - sq->prev_cc = sq->cc; - sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; - } - - if (skb_is_gso(skb)) { - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); - opcode = MLX5_OPCODE_LSO; +static inline u16 +mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) +{ + u16 ihs; - if (skb->encapsulation) { - ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - sq->stats.tso_inner_packets++; - sq->stats.tso_inner_bytes += skb->len - ihs; - } else { - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - sq->stats.tso_packets++; - sq->stats.tso_bytes += skb->len - ihs; - } + eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); - sq->stats.packets += skb_shinfo(skb)->gso_segs; - num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + sq->stats.tso_inner_packets++; + sq->stats.tso_inner_bytes += skb->len - ihs; } else { - bf = sq->bf_budget && - !skb->xmit_more && - !skb_shinfo(skb)->nr_frags; - ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); - sq->stats.packets++; - num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - } - - sq->stats.bytes += num_bytes; - wi->num_bytes = num_bytes; - - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - if (ihs) { - if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); - ihs += VLAN_HLEN; - } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); - } - eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); - } else if (skb_vlan_tag_present(skb)) { - eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); - eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + sq->stats.tso_packets++; + sq->stats.tso_bytes += skb->len - ihs; } - dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + return ihs; +} - wi->num_dma = 0; +static inline int +mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, + unsigned char *skb_data, u16 headlen, + struct mlx5_wqe_data_seg *dseg) +{ + dma_addr_t dma_addr = 0; + u8 num_dma = 0; + int i; - headlen = skb_len - skb->data_len; if (headlen) { dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - goto dma_unmap_wqe_err; + return -ENOMEM; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); - wi->num_dma++; - + num_dma++; dseg++; } @@ -330,59 +245,120 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - goto dma_unmap_wqe_err; + return -ENOMEM; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - wi->num_dma++; - + num_dma++; dseg++; } - ds_cnt += wi->num_dma; - - cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + return num_dma; +} - sq->db.txq.skb[pi] = skb; +static inline void +mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, + struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi; + wi->num_bytes = num_bytes; + wi->num_dma = num_dma; wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); - sq->pc += wi->num_wqebbs; + wi->skb = skb; + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - netdev_tx_sent_queue(sq->txq, wi->num_bytes); + netdev_tx_sent_queue(sq->txq, num_bytes); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { + sq->pc += wi->num_wqebbs; + if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); sq->stats.stopped++; } - sq->stats.xmit_more += skb->xmit_more; - if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { - int bf_sz = 0; + if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); - if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state)) - bf_sz = wi->num_wqebbs << 3; + /* fill sq edge with nops to avoid wqe wrap around */ + while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + sq->db.wqe_info[pi].skb = NULL; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + sq->stats.nop++; + } +} - cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); +static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + + unsigned char *skb_data = skb->data; + unsigned int skb_len = skb->len; + u8 opcode = MLX5_OPCODE_SEND; + unsigned int num_bytes; + int num_dma; + u16 headlen; + u16 ds_cnt; + u16 ihs; + + memset(wqe, 0, sizeof(*wqe)); + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + if (skb_is_gso(skb)) { + opcode = MLX5_OPCODE_LSO; + ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); + sq->stats.packets += skb_shinfo(skb)->gso_segs; + } else { + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + sq->stats.packets++; } + sq->stats.bytes += num_bytes; + sq->stats.xmit_more += skb->xmit_more; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.txq.skb[pi] = NULL; - mlx5e_send_nop(sq, false); + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + if (ihs) { + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); + ihs += VLAN_HLEN; + } else { + memcpy(eseg->inline_hdr.start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + } + eseg->inline_hdr.sz = cpu_to_be16(ihs); + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + } else if (skb_vlan_tag_present(skb)) { + eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); + eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); } - if (bf) - sq->bf_budget--; + headlen = skb_len - skb->data_len; + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, + (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + if (unlikely(num_dma < 0)) + goto dma_unmap_wqe_err; + + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, + num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; @@ -398,21 +374,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; + struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; return mlx5e_sq_xmit(sq, skb); } bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { - struct mlx5e_sq *sq; + struct mlx5e_txqsq *sq; u32 dma_fifo_cc; u32 nbytes; u16 npkts; u16 sqcc; int i; - sq = container_of(cq, struct mlx5e_sq, cq); + sq = container_of(cq, struct mlx5e_txqsq, cq); if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; @@ -450,8 +426,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) last_wqe = (sqcc == wqe_counter); ci = sqcc & sq->wq.sz_m1; - skb = sq->db.txq.skb[ci]; - wi = &sq->db.txq.wqe_info[ci]; + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; if (unlikely(!skb)) { /* nop */ sqcc++; @@ -492,7 +468,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && - mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) { netif_tx_wake_queue(sq->txq); sq->stats.wake++; } @@ -500,7 +476,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) return (i == MLX5E_TX_CQ_POLL_BUDGET); } -static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) +void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; @@ -509,8 +485,8 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) while (sq->cc != sq->pc) { ci = sq->cc & sq->wq.sz_m1; - skb = sq->db.txq.skb[ci]; - wi = &sq->db.txq.wqe_info[ci]; + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; if (!skb) { /* nop */ sq->cc++; @@ -529,36 +505,89 @@ static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq) } } -static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq) +#ifdef CONFIG_MLX5_CORE_IPOIB + +struct mlx5_wqe_eth_pad { + u8 rsvd0[16]; +}; + +struct mlx5i_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_datagram_seg datagram; + struct mlx5_wqe_eth_pad pad; + struct mlx5_wqe_eth_seg eth; +}; + +static inline void +mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, + struct mlx5_wqe_datagram_seg *dseg) { - struct mlx5e_sq_wqe_info *wi; - struct mlx5e_dma_info *di; - u16 ci; + memcpy(&dseg->av, av, sizeof(struct mlx5_av)); + dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); + dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); +} - while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; - di = &sq->db.xdp.di[ci]; - wi = &sq->db.xdp.wqe_info[ci]; +netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_av *av, u32 dqpn, u32 dqkey) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi = sq->pc & wq->sz_m1; + struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - if (wi->opcode == MLX5_OPCODE_NOP) { - sq->cc++; - continue; - } + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - sq->cc += wi->num_wqebbs; + unsigned char *skb_data = skb->data; + unsigned int skb_len = skb->len; + u8 opcode = MLX5_OPCODE_SEND; + unsigned int num_bytes; + int num_dma; + u16 headlen; + u16 ds_cnt; + u16 ihs; - mlx5e_page_release(&sq->channel->rq, di, false); + memset(wqe, 0, sizeof(*wqe)); + + mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + if (skb_is_gso(skb)) { + opcode = MLX5_OPCODE_LSO; + ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); + } else { + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } -} -void mlx5e_free_sq_descs(struct mlx5e_sq *sq) -{ - switch (sq->type) { - case MLX5E_SQ_TXQ: - mlx5e_free_txq_sq_descs(sq); - break; - case MLX5E_SQ_XDP: - mlx5e_free_xdp_sq_descs(sq); - break; + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + if (ihs) { + memcpy(eseg->inline_hdr.start, skb_data, ihs); + mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + eseg->inline_hdr.sz = cpu_to_be16(ihs); + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); } + + headlen = skb_len - skb->data_len; + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, + (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + if (unlikely(num_dma < 0)) + goto dma_unmap_wqe_err; + + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, + num_bytes, num_dma, wi, cseg); + + return NETDEV_TX_OK; + +dma_unmap_wqe_err: + sq->stats.dropped++; + mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); + + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; } + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index e5c12a732aa1212274943183ed83696ce2606639..5ca6714e3e02bd976515de96fa010d348b349458 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -37,124 +37,69 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) struct mlx5_cqwq *wq = &cq->wq; u32 ci = mlx5_cqwq_get_ci(wq); struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); - int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; - int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; + u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; + u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; if (cqe_ownership_bit != sw_ownership_val) return NULL; /* ensure cqe content is read after cqe ownership bit */ - rmb(); + dma_rmb(); return cqe; } -static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) +static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, + struct mlx5e_icosq *sq, + struct mlx5_cqe64 *cqe, + u16 *sqcc) { - struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); - struct mlx5_wq_cyc *wq; - struct mlx5_cqe64 *cqe; - u16 sqcc; - - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + struct mlx5_wq_cyc *wq = &sq->wq; + u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; + struct mlx5e_rq *rq = &sq->channel->rq; + + prefetch(rq); + mlx5_cqwq_pop(&cq->wq); + *sqcc += icowi->num_wqebbs; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { + WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", + cqe->op_own); return; + } - cqe = mlx5e_get_cqe(cq); - if (likely(!cqe)) + if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { + mlx5e_post_rx_mpwqe(rq); return; + } - wq = &sq->wq; - - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), - * otherwise a cq overrun may occur - */ - sqcc = sq->cc; - - do { - u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; - struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; - - mlx5_cqwq_pop(&cq->wq); - sqcc += icowi->num_wqebbs; - - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { - WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", - cqe->op_own); - break; - } - - switch (icowi->opcode) { - case MLX5_OPCODE_NOP: - break; - case MLX5_OPCODE_UMR: - mlx5e_post_rx_mpwqe(&sq->channel->rq); - break; - default: - WARN_ONCE(true, - "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", - icowi->opcode); - } - - } while ((cqe = mlx5e_get_cqe(cq))); - - mlx5_cqwq_update_db_record(&cq->wq); - - /* ensure cq space is freed before enabling more cqes */ - wmb(); - - sq->cc = sqcc; + if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) + WARN_ONCE(true, + "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", + icowi->opcode); } -static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq) +static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { - struct mlx5e_sq *sq; + struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); + struct mlx5_cqe64 *cqe; u16 sqcc; - int i; - - sq = container_of(cq, struct mlx5e_sq, cq); if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return false; + return; + + cqe = mlx5e_get_cqe(cq); + if (likely(!cqe)) + return; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur */ sqcc = sq->cc; - for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { - struct mlx5_cqe64 *cqe; - u16 wqe_counter; - bool last_wqe; - - cqe = mlx5e_get_cqe(cq); - if (!cqe) - break; - - mlx5_cqwq_pop(&cq->wq); - - wqe_counter = be16_to_cpu(cqe->wqe_counter); - - do { - struct mlx5e_sq_wqe_info *wi; - struct mlx5e_dma_info *di; - u16 ci; - - last_wqe = (sqcc == wqe_counter); - - ci = sqcc & sq->wq.sz_m1; - di = &sq->db.xdp.di[ci]; - wi = &sq->db.xdp.wqe_info[ci]; - - if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) { - sqcc++; - continue; - } - - sqcc += wi->num_wqebbs; - /* Recycle RX page */ - mlx5e_page_release(&sq->channel->rq, di, true); - } while (!last_wqe); - } + /* by design, there's only a single cqe */ + mlx5e_poll_ico_single_cqe(cq, sq, cqe, &sqcc); mlx5_cqwq_update_db_record(&cq->wq); @@ -162,7 +107,6 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq) wmb(); sq->cc = sqcc; - return (i == MLX5E_TX_CQ_POLL_BUDGET); } int mlx5e_napi_poll(struct napi_struct *napi, int budget) @@ -178,12 +122,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); + if (c->xdp) + busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); + work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; - if (c->xdp) - busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq); - mlx5e_poll_ico_cq(&c->icosq.cq); busy |= mlx5e_post_rx_wqes(&c->rq); @@ -224,8 +168,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); struct mlx5e_channel *c = cq->channel; - struct mlx5e_priv *priv = c->priv; - struct net_device *netdev = priv->netdev; + struct net_device *netdev = c->netdev; netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", __func__, mcq->cqn, event); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index fcd5bc7e31db5432bb8da33bc335694412e737a9..2e34d95ea7761c548dc84ba03cb087e479ec909b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -53,13 +53,6 @@ struct esw_uc_addr { u32 vport; }; -/* E-Switch MC FDB table hash node */ -struct esw_mc_addr { /* SRIOV only */ - struct l2addr_node node; - struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ - u32 refcnt; -}; - /* Vport UC/MC hash node */ struct vport_addr { struct l2addr_node node; @@ -337,6 +330,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb; @@ -362,7 +356,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0); + + ft_attr.max_fte = table_size; + fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create FDB Table err %d\n", err); @@ -814,7 +810,7 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, bool promisc, bool mc_promisc) { - struct esw_mc_addr *allmulti_addr = esw->mc_promisc; + struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; struct mlx5_vport *vport = &esw->vports[vport_num]; if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) @@ -1685,7 +1681,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", esw->enabled_vports, esw->mode); - mc_promisc = esw->mc_promisc; + mc_promisc = &esw->mc_promisc; nvports = esw->enabled_vports; for (i = 0; i < esw->total_vports; i++) @@ -1729,7 +1725,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int total_vports = MLX5_TOTAL_VPORTS(dev); - struct esw_mc_addr *mc_promisc; struct mlx5_eswitch *esw; int vport_num; int err; @@ -1758,13 +1753,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) } esw->l2_table.size = l2_table_size; - mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL); - if (!mc_promisc) { - err = -ENOMEM; - goto abort; - } - esw->mc_promisc = mc_promisc; - esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -1803,6 +1791,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->enabled_vports = 0; esw->mode = SRIOV_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; + else + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; dev->priv.eswitch = esw; return 0; @@ -1827,7 +1820,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); - kfree(esw->mc_promisc); kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ad329b1680b455ddd4cdfda56bf512bfbea37529..b746f62c8c79b17a64c0923665917170a075092a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -36,7 +36,6 @@ #include #include #include -#include #include #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -210,6 +209,14 @@ struct mlx5_esw_offload { DECLARE_HASHTABLE(encap_tbl, 8); u8 inline_mode; u64 num_flows; + u8 encap; +}; + +/* E-Switch MC FDB table hash node */ +struct esw_mc_addr { /* SRIOV only */ + struct l2addr_node node; + struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ + u32 refcnt; }; struct mlx5_eswitch { @@ -225,7 +232,7 @@ struct mlx5_eswitch { * and async SRIOV admin state changes */ struct mutex state_lock; - struct esw_mc_addr *mc_promisc; + struct esw_mc_addr mc_promisc; struct { bool enabled; @@ -285,20 +292,8 @@ enum { SET_VLAN_INSERT = BIT(1) }; -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 -#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 - -struct mlx5_encap_entry { - struct hlist_node encap_hlist; - struct list_head flows; - u32 encap_id; - struct neighbour *n; - struct ip_tunnel_info tun_info; - unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ - - struct net_device *out_dev; - int tunnel_type; -}; +#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000 +#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000 struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; @@ -307,7 +302,9 @@ struct mlx5_esw_flow_attr { int action; u16 vlan; bool vlan_handled; - struct mlx5_encap_entry *encap; + u32 encap_id; + u32 mod_hdr_id; + struct mlx5e_tc_flow_parse_attr *parse_attr; }; int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, @@ -321,6 +318,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); +int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d111cebca9f1ea57d57a70b108a436af1adcc6aa..f991f669047e5df2531f043c3934d248bf3099b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw->dev, true); - if (IS_ERR(counter)) - return ERR_CAST(counter); + if (IS_ERR(counter)) { + rule = ERR_CAST(counter); + goto err_counter_alloc; + } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].counter = counter; i++; @@ -86,17 +88,25 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - if (attr->encap) - flow_act.encap_id = attr->encap->encap_id; + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + flow_act.modify_id = attr->mod_hdr_id; + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + flow_act.encap_id = attr->encap_id; rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, spec, &flow_act, dest, i); if (IS_ERR(rule)) - mlx5_fc_destroy(esw->dev, counter); + goto err_add_rule; else esw->offloads.num_flows++; return rule; + +err_add_rule: + mlx5_fc_destroy(esw->dev, counter); +err_counter_alloc: + return rule; } void @@ -106,12 +116,10 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, { struct mlx5_fc *counter = NULL; - if (!IS_ERR(rule)) { - counter = mlx5_flow_rule_counter(rule); - mlx5_del_flow_rules(rule); - mlx5_fc_destroy(esw->dev, counter); - esw->offloads.num_flows--; - } + counter = mlx5_flow_rule_counter(rule); + mlx5_del_flow_rules(rule); + mlx5_fc_destroy(esw->dev, counter); + esw->offloads.num_flows--; } static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) @@ -418,30 +426,21 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) return err; } -#define MAX_PF_SQ 256 #define ESW_OFFLOADS_NUM_GROUPS 4 -static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) { - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int table_size, ix, esw_size, err = 0; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *fdb = NULL; - struct mlx5_flow_group *g; - u32 *flow_group_in; - void *match_criteria; + int esw_size, err = 0; u32 flags = 0; - flow_group_in = mlx5_vzalloc(inlen); - if (!flow_group_in) - return -ENOMEM; - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); err = -EOPNOTSUPP; - goto ns_err; + goto out; } esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", @@ -451,8 +450,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, @@ -462,12 +460,55 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); - goto fast_fdb_err; + goto out; } esw->fdb_table.fdb = fdb; +out: + return err; +} + +static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) +{ + mlx5_destroy_flow_table(esw->fdb_table.fdb); +} + +#define MAX_PF_SQ 256 + +static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb = NULL; + int table_size, ix, err = 0; + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; + + esw_debug(esw->dev, "Create offloads FDB Tables\n"); + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + err = -EOPNOTSUPP; + goto ns_err; + } + + err = esw_create_offloads_fast_fdb_table(esw); + if (err) + goto fast_fdb_err; + table_size = nvports + MAX_PF_SQ + 1; - fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0); + + ft_attr.max_fte = table_size; + ft_attr.prio = FDB_SLOW_PATH; + + fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); @@ -532,25 +573,26 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) return err; } -static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) +static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) { if (!esw->fdb_table.fdb) return; - esw_debug(esw->dev, "Destroy offloads FDB Table\n"); + esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); - mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw_destroy_offloads_fast_fdb_table(esw); } static int esw_create_offloads_table(struct mlx5_eswitch *esw) { - struct mlx5_flow_namespace *ns; - struct mlx5_flow_table *ft_offloads; + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_table *ft_offloads; + struct mlx5_flow_namespace *ns; int err = 0; ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); @@ -559,7 +601,9 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -EOPNOTSUPP; } - ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); + ft_attr.max_fte = dev->priv.sriov.num_vfs + 2; + + ft_offloads = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft_offloads)) { err = PTR_ERR(ft_offloads); esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); @@ -700,7 +744,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_dev_list_unlock(); - err = esw_create_offloads_fdb_table(esw, nvports); + err = esw_create_offloads_fdb_tables(esw, nvports); if (err) goto create_fdb_err; @@ -737,7 +781,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) esw_destroy_offloads_table(esw); create_ft_err: - esw_destroy_offloads_fdb_table(esw); + esw_destroy_offloads_fdb_tables(esw); create_fdb_err: /* enable back PF RoCE */ @@ -783,7 +827,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); - esw_destroy_offloads_fdb_table(esw); + esw_destroy_offloads_fdb_tables(esw); } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -1012,6 +1056,66 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) return 0; } +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && + (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || + !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) + return -EOPNOTSUPP; + + if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_LEGACY) { + esw->offloads.encap = encap; + return 0; + } + + if (esw->offloads.encap == encap) + return 0; + + if (esw->offloads.num_flows > 0) { + esw_warn(dev, "Can't set encapsulation when flows are configured\n"); + return -EOPNOTSUPP; + } + + esw_destroy_offloads_fast_fdb_table(esw); + + esw->offloads.encap = encap; + err = esw_create_offloads_fast_fdb_table(esw); + if (err) { + esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); + esw->offloads.encap = !encap; + (void) esw_create_offloads_fast_fdb_table(esw); + } + return err; +} + +int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + *encap = esw->offloads.encap; + return 0; +} + void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *__rep) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index b64a781c7e855fd1d38cb7303d26a27073626435..19e3d2fc2099e09a987d0d475629580c72cd37c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -45,6 +45,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; + if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && + ft->underlay_qpn == 0) + return 0; + MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); @@ -54,6 +58,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_SET(set_flow_table_root_in, in, other_vport, 1); } + if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && + ft->underlay_qpn != 0) + MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -249,6 +257,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action); MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id); + MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); @@ -515,3 +524,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } + +int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 namespace, u8 num_actions, + void *modify_actions, u32 *modify_header_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)]; + int max_actions, actions_size, inlen, err; + void *actions_in; + u8 table_type; + u32 *in; + + switch (namespace) { + case MLX5_FLOW_NAMESPACE_FDB: + max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions); + table_type = FS_FT_FDB; + break; + case MLX5_FLOW_NAMESPACE_KERNEL: + max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); + table_type = FS_FT_NIC_RX; + break; + default: + return -EOPNOTSUPP; + } + + if (num_actions > max_actions) { + mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n", + num_actions, max_actions); + return -EOPNOTSUPP; + } + + actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions; + inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(alloc_modify_header_context_in, in, opcode, + MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT); + MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type); + MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions); + + actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions); + memcpy(actions_in, modify_actions, actions_size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + + *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); + kfree(in); + return err; +} + +void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(dealloc_modify_header_context_in, in, opcode, + MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); + MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, + modify_header_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index ded27bb9a3b6049ff4bad1606443dbeff53be8f1..b8a176503d384f863de75cacf7bd81d1f40ad9d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, fte->index = index; fte->action = flow_act->action; fte->encap_id = flow_act->encap_id; + fte->modify_id = flow_act->modify_id; return fte; } @@ -777,18 +778,16 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, } static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + struct mlx5_flow_table_attr *ft_attr, enum fs_flow_table_op_mod op_mod, - u16 vport, int prio, - int max_fte, u32 level, - u32 flags) + u16 vport) { + struct mlx5_flow_root_namespace *root = find_root(&ns->node); struct mlx5_flow_table *next_ft = NULL; + struct fs_prio *fs_prio = NULL; struct mlx5_flow_table *ft; - int err; int log_table_sz; - struct mlx5_flow_root_namespace *root = - find_root(&ns->node); - struct fs_prio *fs_prio = NULL; + int err; if (!root) { pr_err("mlx5: flow steering failed to find root of namespace\n"); @@ -796,29 +795,31 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa } mutex_lock(&root->chain_lock); - fs_prio = find_prio(ns, prio); + fs_prio = find_prio(ns, ft_attr->prio); if (!fs_prio) { err = -EINVAL; goto unlock_root; } - if (level >= fs_prio->num_levels) { + if (ft_attr->level >= fs_prio->num_levels) { err = -ENOSPC; goto unlock_root; } /* The level is related to the * priority level range. */ - level += fs_prio->start_level; - ft = alloc_flow_table(level, + ft_attr->level += fs_prio->start_level; + ft = alloc_flow_table(ft_attr->level, vport, - max_fte ? roundup_pow_of_two(max_fte) : 0, + ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, root->table_type, - op_mod, flags); + op_mod, ft_attr->flags); if (!ft) { err = -ENOMEM; goto unlock_root; } + ft->underlay_qpn = ft_attr->underlay_qpn; + tree_init_node(&ft->node, 1, del_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); @@ -848,44 +849,56 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa } struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - int prio, int max_fte, - u32 level, - u32 flags) + struct mlx5_flow_table_attr *ft_attr) { - return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio, - max_fte, level, flags); + return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0); } struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level, u16 vport) { - return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio, - max_fte, level, 0); + struct mlx5_flow_table_attr ft_attr = {}; + + ft_attr.max_fte = max_fte; + ft_attr.level = level; + ft_attr.prio = prio; + + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); } -struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( - struct mlx5_flow_namespace *ns, - int prio, u32 level) +struct mlx5_flow_table* +mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns, + int prio, u32 level) { - return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0, - level, 0); + struct mlx5_flow_table_attr ft_attr = {}; + + ft_attr.level = level; + ft_attr.prio = prio; + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0); } EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); -struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int num_flow_table_entries, - int max_num_groups, - u32 level, - u32 flags) +struct mlx5_flow_table* +mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups, + u32 level, + u32 flags) { + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table *ft; if (max_num_groups > num_flow_table_entries) return ERR_PTR(-EINVAL); - ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags); + ft_attr.max_fte = num_flow_table_entries; + ft_attr.prio = prio; + ft_attr.level = level; + ft_attr.flags = flags; + + ft = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft)) return ft; @@ -1827,12 +1840,18 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) static int create_anchor_flow_table(struct mlx5_flow_steering *steering) { struct mlx5_flow_namespace *ns = NULL; + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table *ft; ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (WARN_ON(!ns)) return -EINVAL; - ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); + + ft_attr.max_fte = ANCHOR_SIZE; + ft_attr.level = ANCHOR_LEVEL; + ft_attr.prio = ANCHOR_PRIO; + + ft = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft)) { mlx5_core_err(steering->dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); @@ -1886,9 +1905,6 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering = dev->priv.steering; - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return; - cleanup_root_ns(steering->root_ns); cleanup_root_ns(steering->esw_egress_root_ns); cleanup_root_ns(steering->esw_ingress_root_ns); @@ -1991,9 +2007,6 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) struct mlx5_flow_steering *steering; int err = 0; - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return 0; - err = mlx5_init_fc_stats(dev); if (err) return err; @@ -2004,7 +2017,10 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) steering->dev = dev; dev->priv.steering = steering; - if (MLX5_CAP_GEN(dev, nic_flow_table) && + if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && + (MLX5_CAP_GEN(dev, nic_flow_table))) || + ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && + MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) && MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { err = init_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 8e668c63f69ec4afefb197f1f4c0a32ca3760179..81eafc7b9dd93fc4c8411aa8ad473299ca6c623e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -118,6 +118,7 @@ struct mlx5_flow_table { /* FWD rules that point on this flow table */ struct list_head fwd_rules; u32 flags; + u32 underlay_qpn; }; struct mlx5_fc_cache { @@ -152,6 +153,7 @@ struct fs_fte { u32 index; u32 action; u32 encap_id; + u32 modify_id; enum fs_fte_status status; struct mlx5_fc *counter; }; @@ -197,6 +199,11 @@ struct mlx5_flow_root_namespace { int mlx5_init_fc_stats(struct mlx5_core_dev *dev); void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); +void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, + struct delayed_work *dwork, + unsigned long delay); +void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, + unsigned long interval); int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 7431f633de3135f5ccee6a9f506892d5f13dff35..6507d8acc54d460163717dc9a58a719c6f761671 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -165,7 +165,8 @@ static void mlx5_fc_stats_work(struct work_struct *work) list_splice_tail_init(&fc_stats->addlist, &tmplist); if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) - queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); + queue_delayed_work(fc_stats->wq, &fc_stats->work, + fc_stats->sampling_interval); spin_unlock(&fc_stats->addlist_lock); @@ -200,7 +201,7 @@ static void mlx5_fc_stats_work(struct work_struct *work) node = mlx5_fc_stats_query(dev, counter, last->id); } - fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; + fc_stats->next_query = now + fc_stats->sampling_interval; } struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) @@ -265,6 +266,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) if (!fc_stats->wq) return -ENOMEM; + fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); return 0; @@ -317,3 +319,21 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter, counter->lastbytes = c.bytes; counter->lastpackets = c.packets; } + +void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, + struct delayed_work *dwork, + unsigned long delay) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + queue_delayed_work(fc_stats->wq, dwork, delay); +} + +void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, + unsigned long interval) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + fc_stats->sampling_interval = min_t(unsigned long, interval, + fc_stats->sampling_interval); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index d0bbefa08af78cac0b5b095ea283ab6e50ecd7e4..1bc14d0fded858e8e7f2b23fa5c3b748958d1c72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -137,7 +137,8 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (MLX5_CAP_GEN(dev, nic_flow_table)) { + if (MLX5_CAP_GEN(dev, nic_flow_table) || + MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c new file mode 100644 index 0000000000000000000000000000000000000000..3c84e36af0186bea101736500cf0b20bca57ba6a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c @@ -0,0 +1,498 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en.h" +#include "ipoib.h" + +#define IB_DEFAULT_Q_KEY 0xb1b + +static int mlx5i_open(struct net_device *netdev); +static int mlx5i_close(struct net_device *netdev); +static int mlx5i_dev_init(struct net_device *dev); +static void mlx5i_dev_cleanup(struct net_device *dev); + +static const struct net_device_ops mlx5i_netdev_ops = { + .ndo_open = mlx5i_open, + .ndo_stop = mlx5i_close, + .ndo_init = mlx5i_dev_init, + .ndo_uninit = mlx5i_dev_cleanup, +}; + +/* IPoIB mlx5 netdev profile */ + +/* Called directly after IPoIB netdevice was created to initialize SW structs */ +static void mlx5i_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + priv->mdev = mdev; + priv->netdev = netdev; + priv->profile = profile; + priv->ppriv = ppriv; + + mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); + + mutex_init(&priv->state_lock); + + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + netdev->hw_features |= NETIF_F_GRO; + netdev->hw_features |= NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_RXHASH; + + netdev->netdev_ops = &mlx5i_netdev_ops; +} + +/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ +static void mlx5i_cleanup(struct mlx5e_priv *priv) +{ + /* Do nothing .. */ +} + +#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 + +static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +{ + struct mlx5_qp_context *context = NULL; + u32 *in = NULL; + void *addr_path; + int ret = 0; + int inlen; + void *qpc; + + inlen = MLX5_ST_SZ_BYTES(create_qp_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, + MLX5_QP_ENHANCED_ULP_STATELESS_MODE); + + addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); + MLX5_SET(ads, addr_path, port, 1); + MLX5_SET(ads, addr_path, grh, 1); + + ret = mlx5_core_create_qp(mdev, qp, in, inlen); + if (ret) { + mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); + goto out; + } + + /* QP states */ + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) { + ret = -ENOMEM; + goto out; + } + + context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + context->pri_path.port = 1; + context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); + goto out; + } + memset(context, 0, sizeof(*context)); + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); + goto out; + } + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); + goto out; + } + +out: + kfree(context); + kvfree(in); + return ret; +} + +static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +{ + mlx5_core_destroy_qp(mdev, qp); +} + +static int mlx5i_init_tx(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + int err; + + err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); + if (err) { + mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); + return err; + } + + err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); + if (err) { + mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); + return err; + } + + return 0; +} + +static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + + mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); + mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); +} + +static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + int err; + + priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + + if (!priv->fs.ns) + return -EINVAL; + + err = mlx5e_arfs_create_tables(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", + err); + priv->netdev->hw_features &= ~NETIF_F_NTUPLE; + } + + err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); + if (err) { + netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", + err); + goto err_destroy_arfs_tables; + } + + return 0; + +err_destroy_arfs_tables: + mlx5e_arfs_destroy_tables(priv); + + return err; +} + +static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) +{ + mlx5e_destroy_ttc_table(priv); + mlx5e_arfs_destroy_tables(priv); +} + +static int mlx5i_init_rx(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_indirect_rqt(priv); + if (err) + return err; + + err = mlx5e_create_direct_rqts(priv); + if (err) + goto err_destroy_indirect_rqts; + + err = mlx5e_create_indirect_tirs(priv); + if (err) + goto err_destroy_direct_rqts; + + err = mlx5e_create_direct_tirs(priv); + if (err) + goto err_destroy_indirect_tirs; + + err = mlx5i_create_flow_steering(priv); + if (err) + goto err_destroy_direct_tirs; + + return 0; + +err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); +err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv); +err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv); +err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + return err; +} + +static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) +{ + mlx5i_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); +} + +static const struct mlx5e_profile mlx5i_nic_profile = { + .init = mlx5i_init, + .cleanup = mlx5i_cleanup, + .init_tx = mlx5i_init_tx, + .cleanup_tx = mlx5i_cleanup_tx, + .init_rx = mlx5i_init_rx, + .cleanup_rx = mlx5i_cleanup_rx, + .enable = NULL, /* mlx5i_enable */ + .disable = NULL, /* mlx5i_disable */ + .update_stats = NULL, /* mlx5i_update_stats */ + .max_nch = mlx5e_get_max_num_channels, + .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, + .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .max_tc = MLX5I_MAX_NUM_TC, +}; + +/* mlx5i netdev NDos */ + +static int mlx5i_dev_init(struct net_device *dev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5i_priv *ipriv = priv->ppriv; + + /* Set dev address using underlay QP */ + dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; + dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; + dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; + + return 0; +} + +static void mlx5i_dev_cleanup(struct net_device *dev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5i_priv *ipriv = priv->ppriv; + struct mlx5_qp_context context; + + /* detach qp from flow-steering by reset it */ + mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp); +} + +static int mlx5i_open(struct net_device *netdev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + set_bit(MLX5E_STATE_OPENED, &priv->state); + + err = mlx5e_open_channels(priv, &priv->channels); + if (err) + goto err_clear_state_opened_flag; + + mlx5e_refresh_tirs(priv, false); + mlx5e_activate_priv_channels(priv); + mutex_unlock(&priv->state_lock); + return 0; + +err_clear_state_opened_flag: + clear_bit(MLX5E_STATE_OPENED, &priv->state); + mutex_unlock(&priv->state_lock); + return err; +} + +static int mlx5i_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + /* May already be CLOSED in case a previous configuration operation + * (e.g RX/TX queue size change) that involves close&open failed. + */ + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + netif_carrier_off(priv->netdev); + mlx5e_deactivate_priv_channels(priv); + mlx5e_close_channels(&priv->channels); +unlock: + mutex_unlock(&priv->state_lock); + return 0; +} + +#ifdef notusedyet +/* IPoIB RDMA netdev callbacks */ +static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, + union ib_gid *gid, u16 lid, int set_qkey) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = epriv->mdev; + struct mlx5i_priv *ipriv = epriv->ppriv; + int err; + + mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); + err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); + if (err) + mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", + ipriv->qp.qpn, gid->raw); + + return err; +} + +static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, + union ib_gid *gid, u16 lid) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = epriv->mdev; + struct mlx5i_priv *ipriv = epriv->ppriv; + int err; + + mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); + + err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); + if (err) + mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n", + ipriv->qp.qpn, gid->raw); + + return err; +} + +static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, + struct ib_ah *address, u32 dqpn, u32 dqkey) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(dev); + struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; + struct mlx5_ib_ah *mah = to_mah(address); + + return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey); +} +#endif + +static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) +{ + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) + return -EOPNOTSUPP; + + if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { + mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); + return -ENOTSUPP; + } + + return 0; +} + +static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, + struct ib_device *ibdev, + const char *name, + void (*setup)(struct net_device *)) +{ + const struct mlx5e_profile *profile = &mlx5i_nic_profile; + int nch = profile->max_nch(mdev); + struct net_device *netdev; + struct mlx5i_priv *ipriv; + struct mlx5e_priv *epriv; + int err; + + if (mlx5i_check_required_hca_cap(mdev)) { + mlx5_core_warn(mdev, "Accelerated mode is not supported\n"); + return ERR_PTR(-EOPNOTSUPP); + } + + /* This function should only be called once per mdev */ + err = mlx5e_create_mdev_resources(mdev); + if (err) + return NULL; + + netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), + name, NET_NAME_UNKNOWN, + setup, + nch * MLX5E_MAX_NUM_TC, + nch); + if (!netdev) { + mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n"); + goto free_mdev_resources; + } + + ipriv = netdev_priv(netdev); + epriv = mlx5i_epriv(netdev); + + epriv->wq = create_singlethread_workqueue("mlx5i"); + if (!epriv->wq) + goto err_free_netdev; + + profile->init(mdev, netdev, profile, ipriv); + + mlx5e_attach_netdev(epriv); + netif_carrier_off(netdev); + + /* TODO: set rdma_netdev func pointers + * rn = &ipriv->rn; + * rn->hca = ibdev; + * rn->send = mlx5i_xmit; + * rn->attach_mcast = mlx5i_attach_mcast; + * rn->detach_mcast = mlx5i_detach_mcast; + */ + return netdev; + +err_free_netdev: + free_netdev(netdev); +free_mdev_resources: + mlx5e_destroy_mdev_resources(mdev); + + return NULL; +} +EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); + +static void mlx5_rdma_netdev_free(struct net_device *netdev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + const struct mlx5e_profile *profile = priv->profile; + + mlx5e_detach_netdev(priv); + profile->cleanup(priv); + destroy_workqueue(priv->wq); + free_netdev(netdev); + + mlx5e_destroy_mdev_resources(priv->mdev); +} +EXPORT_SYMBOL(mlx5_rdma_netdev_free); + diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h similarity index 60% rename from drivers/infiniband/hw/qedr/qedr_hsi.h rename to drivers/net/ethernet/mellanox/mlx5/core/ipoib.h index 66d27521373f293ae66ba34d9b9287770d1f8972..bae0a5cbc8ad396c846c27848f717ebc6d68a613 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h @@ -1,5 +1,5 @@ -/* QLogic qedr NIC Driver - * Copyright (c) 2015-2016 QLogic Corporation +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -17,7 +17,7 @@ * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials + * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, @@ -29,28 +29,26 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef __QED_HSI_ROCE__ -#define __QED_HSI_ROCE__ -#include -#include -#include "qedr_hsi_rdma.h" +#ifndef __MLX5E_IPOB_H__ +#define __MLX5E_IPOB_H__ -/* Affiliated asynchronous events / errors enumeration */ -enum roce_async_events_type { - ROCE_ASYNC_EVENT_NONE = 0, - ROCE_ASYNC_EVENT_COMM_EST = 1, - ROCE_ASYNC_EVENT_SQ_DRAINED, - ROCE_ASYNC_EVENT_SRQ_LIMIT, - ROCE_ASYNC_EVENT_LAST_WQE_REACHED, - ROCE_ASYNC_EVENT_CQ_ERR, - ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, - ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, - ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, - ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, - ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, - ROCE_ASYNC_EVENT_SRQ_EMPTY, - MAX_ROCE_ASYNC_EVENTS_TYPE +#include +#include "en.h" + +#define MLX5I_MAX_NUM_TC 1 + +/* ipoib rdma netdev's private data structure */ +struct mlx5i_priv { + struct mlx5_core_qp qp; + char *mlx5e_priv[0]; }; -#endif /* __QED_HSI_ROCE__ */ +/* Extract mlx5e_priv from IPoIB netdev */ +#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) + +netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5_av *av, u32 dqpn, u32 dqkey); +void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + +#endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0ad66324247f71c212f44f98679c80f05a2650d3..0c123d571b4cf52a0eb3b833208156766b5de6be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1280,6 +1280,8 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, + .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set, + .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, #endif }; @@ -1514,8 +1516,10 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ - { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ - { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */ + { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */ + { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */ + { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */ + { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b3dabe6e88366133fd07dab68f059d4f5d7e5e3a..fbc6e9e9e3053a7527cf49e7d00c54104acf50af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, u32 *encap_id); void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); +int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 namespace, u8 num_actions, + void *modify_actions, u32 *modify_header_id); +void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id); + bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 6b6c30deee83ca289ba0bfcb924678efe55e65e7..2fb8c6585ac711c748d18355e9d4877f779ccbf5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ - spectrum_acl.o spectrum_flower.o + spectrum_acl.o spectrum_flower.o \ + spectrum_cnt.o spectrum_dpipe.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index a1b48421648a3c11e25e7a5c148a25bf07d322c0..479511cf79bc1ca3f02ec3dd1b8cb578416f1cc8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); */ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1); -/* cmd_mbox_sw2hw_cq_oi - * When set, overrun ignore is enabled. When set, updates of - * CQ consumer counter (poll for completion) or Request completion - * notifications (Arm CQ) DoorBells should not be rung on that CQ. - */ -MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1); - /* cmd_mbox_sw2hw_cq_st * Event delivery state machine * 0x0 - FIRED @@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); -/* cmd_mbox_sw2hw_eq_oi - * When set, overrun ignore is enabled. - */ -MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); - /* cmd_mbox_sw2hw_eq_st * Event delivery state machine * 0x0 - FIRED diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a4c07841aaf6254c844eb8d8512687b447928ba8..affe84eb4bff5717e5ddba4835395a8a8989f8ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -40,9 +40,6 @@ #include #include #include -#include -#include -#include #include #include #include @@ -74,23 +71,9 @@ static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); static const char mlxsw_core_driver_name[] = "mlxsw_core"; -static struct dentry *mlxsw_core_dbg_root; - static struct workqueue_struct *mlxsw_wq; static struct workqueue_struct *mlxsw_owq; -struct mlxsw_core_pcpu_stats { - u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; - u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; - u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; - u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; - struct u64_stats_sync syncp; - u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; - u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; - u32 trap_rx_invalid; - u32 port_rx_invalid; -}; - struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; @@ -121,23 +104,48 @@ struct mlxsw_core { spinlock_t trans_list_lock; /* protects trans_list writes */ bool use_emad; } emad; - struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; - struct dentry *dbg_dir; - struct { - struct debugfs_blob_wrapper vsd_blob; - struct debugfs_blob_wrapper psid_blob; - } dbg; struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; struct mlxsw_res res; struct mlxsw_hwmon *hwmon; struct mlxsw_thermal *thermal; - struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_core_port *ports; + unsigned int max_ports; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; +#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 + +static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) +{ + /* Switch ports are numbered from 1 to queried value */ + if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) + mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_SYSTEM_PORT) + 1; + else + mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; + + mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, + sizeof(struct mlxsw_core_port), GFP_KERNEL); + if (!mlxsw_core->ports) + return -ENOMEM; + + return 0; +} + +static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) +{ + kfree(mlxsw_core->ports); +} + +unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->max_ports; +} +EXPORT_SYMBOL(mlxsw_core_max_ports); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) { return mlxsw_core->driver_priv; @@ -703,91 +711,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, * Core functions *****************/ -static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_core *mlxsw_core = file->private; - struct mlxsw_core_pcpu_stats *p; - u64 rx_packets, rx_bytes; - u64 tmp_rx_packets, tmp_rx_bytes; - u32 rx_dropped, rx_invalid; - unsigned int start; - int i; - int j; - static const char hdr[] = - " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; - - seq_printf(file, hdr); - for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { - rx_packets = 0; - rx_bytes = 0; - rx_dropped = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - do { - start = u64_stats_fetch_begin(&p->syncp); - tmp_rx_packets = p->trap_rx_packets[i]; - tmp_rx_bytes = p->trap_rx_bytes[i]; - } while (u64_stats_fetch_retry(&p->syncp, start)); - - rx_packets += tmp_rx_packets; - rx_bytes += tmp_rx_bytes; - rx_dropped += p->trap_rx_dropped[i]; - } - seq_printf(file, "trap %3d %12llu %12llu %10u\n", - i, rx_packets, rx_bytes, rx_dropped); - } - rx_invalid = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - rx_invalid += p->trap_rx_invalid; - } - seq_printf(file, "trap INV %10u\n", - rx_invalid); - - for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { - rx_packets = 0; - rx_bytes = 0; - rx_dropped = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - do { - start = u64_stats_fetch_begin(&p->syncp); - tmp_rx_packets = p->port_rx_packets[i]; - tmp_rx_bytes = p->port_rx_bytes[i]; - } while (u64_stats_fetch_retry(&p->syncp, start)); - - rx_packets += tmp_rx_packets; - rx_bytes += tmp_rx_bytes; - rx_dropped += p->port_rx_dropped[i]; - } - seq_printf(file, "port %3d %12llu %12llu %10u\n", - i, rx_packets, rx_bytes, rx_dropped); - } - rx_invalid = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - rx_invalid += p->port_rx_invalid; - } - seq_printf(file, "port INV %10u\n", - rx_invalid); - return 0; -} - -static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) -{ - struct mlxsw_core *mlxsw_core = inode->i_private; - - return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); -} - -static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { - .owner = THIS_MODULE, - .open = mlxsw_core_rx_stats_dbg_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; - int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) { spin_lock(&mlxsw_core_driver_list_lock); @@ -835,39 +758,13 @@ static void mlxsw_core_driver_put(const char *kind) spin_unlock(&mlxsw_core_driver_list_lock); } -static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) -{ - const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; - - mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, - mlxsw_core_dbg_root); - if (!mlxsw_core->dbg_dir) - return -ENOMEM; - debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, - mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); - mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; - mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); - debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, - &mlxsw_core->dbg.vsd_blob); - mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; - mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); - debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, - &mlxsw_core->dbg.psid_blob); - return 0; -} - -static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) -{ - debugfs_remove_recursive(mlxsw_core->dbg_dir); -} - static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, unsigned int count) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= MLXSW_PORT_MAX_PORTS) + if (port_index >= mlxsw_core->max_ports) return -EINVAL; if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; @@ -879,7 +776,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= MLXSW_PORT_MAX_PORTS) + if (port_index >= mlxsw_core->max_ports) return -EINVAL; if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; @@ -1101,18 +998,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, mlxsw_core->bus_priv = bus_priv; mlxsw_core->bus_info = mlxsw_bus_info; - mlxsw_core->pcpu_stats = - netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); - if (!mlxsw_core->pcpu_stats) { - err = -ENOMEM; - goto err_alloc_stats; - } - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, &mlxsw_core->res); if (err) goto err_bus_init; + err = mlxsw_ports_init(mlxsw_core); + if (err) + goto err_ports_init; + if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { alloc_size = sizeof(u8) * @@ -1148,15 +1042,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_driver_init; } - err = mlxsw_core_debugfs_init(mlxsw_core); - if (err) - goto err_debugfs_init; - return 0; -err_debugfs_init: - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); err_driver_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: @@ -1167,10 +1054,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_emad_init: kfree(mlxsw_core->lag.mapping); err_alloc_lag_mapping: + mlxsw_ports_fini(mlxsw_core); +err_ports_init: mlxsw_bus->fini(bus_priv); err_bus_init: - free_percpu(mlxsw_core->pcpu_stats); -err_alloc_stats: devlink_free(devlink); err_devlink_alloc: mlxsw_core_driver_put(device_kind); @@ -1183,15 +1070,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) const char *device_kind = mlxsw_core->bus_info->device_kind; struct devlink *devlink = priv_to_devlink(mlxsw_core); - mlxsw_core_debugfs_fini(mlxsw_core); if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); + mlxsw_ports_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); - free_percpu(mlxsw_core->pcpu_stats); devlink_free(devlink); mlxsw_core_driver_put(device_kind); } @@ -1639,7 +1525,6 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, { struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; - struct mlxsw_core_pcpu_stats *pcpu_stats; u8 local_port; bool found = false; @@ -1661,7 +1546,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, __func__, local_port, rx_info->trap_id); if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || - (local_port >= MLXSW_PORT_MAX_PORTS)) + (local_port >= mlxsw_core->max_ports)) goto drop; rcu_read_lock(); @@ -1678,26 +1563,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, if (!found) goto drop; - pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); - u64_stats_update_begin(&pcpu_stats->syncp); - pcpu_stats->port_rx_packets[local_port]++; - pcpu_stats->port_rx_bytes[local_port] += skb->len; - pcpu_stats->trap_rx_packets[rx_info->trap_id]++; - pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; - u64_stats_update_end(&pcpu_stats->syncp); - rxl->func(skb, local_port, rxl_item->priv); return; drop: - if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) - this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); - else - this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); - if (local_port >= MLXSW_PORT_MAX_PORTS) - this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); - else - this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); dev_kfree_skb(skb); } EXPORT_SYMBOL(mlxsw_core_skb_receive); @@ -1926,15 +1795,8 @@ static int __init mlxsw_core_module_init(void) err = -ENOMEM; goto err_alloc_ordered_workqueue; } - mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); - if (!mlxsw_core_dbg_root) { - err = -ENOMEM; - goto err_debugfs_create_dir; - } return 0; -err_debugfs_create_dir: - destroy_workqueue(mlxsw_owq); err_alloc_ordered_workqueue: destroy_workqueue(mlxsw_wq); return err; @@ -1942,7 +1804,6 @@ static int __init mlxsw_core_module_init(void) static void __exit mlxsw_core_module_exit(void) { - debugfs_remove_recursive(mlxsw_core_dbg_root); destroy_workqueue(mlxsw_owq); destroy_workqueue(mlxsw_wq); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index cf38cf9027f80a95a4f8a744de7551cb0810bf51..7fb35395adf52076ead77cbcbe0381317c197627 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -57,6 +57,8 @@ struct mlxsw_driver; struct mlxsw_bus; struct mlxsw_bus_info; +unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 5f337715a4da64dcd94178bd627189648d6f775b..46304ffb94491d26bc08b85e38249088f7f8338a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, return oneact + MLXSW_AFA_PAYLOAD_OFFSET; } +/* VLAN Action + * ----------- + * VLAN action is used for manipulating VLANs. It can be used to implement QinQ, + * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs + * and more. + */ + +#define MLXSW_AFA_VLAN_CODE 0x02 +#define MLXSW_AFA_VLAN_SIZE 1 + +enum mlxsw_afa_vlan_vlan_tag_cmd { + MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, + MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG, + MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG, +}; + +enum mlxsw_afa_vlan_cmd { + MLXSW_AFA_VLAN_CMD_NOP, + MLXSW_AFA_VLAN_CMD_SET_OUTER, + MLXSW_AFA_VLAN_CMD_SET_INNER, + MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER, + MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER, + MLXSW_AFA_VLAN_CMD_SWAP, +}; + +/* afa_vlan_vlan_tag_cmd + * Tag command: push, pop, nop VLAN header. + */ +MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3); + +/* afa_vlan_vid_cmd */ +MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3); + +/* afa_vlan_vid */ +MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12); + +/* afa_vlan_ethertype_cmd */ +MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3); + +/* afa_vlan_ethertype + * Index to EtherTypes in Switch VLAN EtherType Register (SVER). + */ +MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3); + +/* afa_vlan_pcp_cmd */ +MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3); + +/* afa_vlan_pcp */ +MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3); + +static inline void +mlxsw_afa_vlan_pack(char *payload, + enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd, + enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid, + enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp, + enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype) +{ + mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd); + mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd); + mlxsw_afa_vlan_vid_set(payload, vid); + mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd); + mlxsw_afa_vlan_pcp_set(payload, pcp); + mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd); + mlxsw_afa_vlan_ethertype_set(payload, ethertype); +} + +int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, + u16 vid, u8 pcp, u8 et) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_VLAN_CODE, + MLXSW_AFA_VLAN_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, + MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, + MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, + MLXSW_AFA_VLAN_CMD_SET_OUTER, et); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify); + /* Trap / Discard Action * --------------------- * The Trap / Discard action enables trapping / mirroring packets to the CPU @@ -677,3 +760,98 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, return err; } EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); + +/* Policing and Counting Action + * ---------------------------- + * Policing and Counting action is used for binding policer and counter + * to ACL rules. + */ + +#define MLXSW_AFA_POLCNT_CODE 0x08 +#define MLXSW_AFA_POLCNT_SIZE 1 + +enum mlxsw_afa_polcnt_counter_set_type { + /* No count */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Count packets and bytes */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + +/* afa_polcnt_counter_set_type + * Counter set type for flow counters. + */ +MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8); + +/* afa_polcnt_counter_index + * Counter index for flow counters. + */ +MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24); + +static inline void +mlxsw_afa_polcnt_pack(char *payload, + enum mlxsw_afa_polcnt_counter_set_type set_type, + u32 counter_index) +{ + mlxsw_afa_polcnt_counter_set_type_set(payload, set_type); + mlxsw_afa_polcnt_counter_index_set(payload, counter_index); +} + +int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, + u32 counter_index) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_POLCNT_CODE, + MLXSW_AFA_POLCNT_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES, + counter_index); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_counter); + +/* Virtual Router and Forwarding Domain Action + * ------------------------------------------- + * Virtual Switch action is used for manipulate the Virtual Router (VR), + * MPLS label space and the Forwarding Identifier (FID). + */ + +#define MLXSW_AFA_VIRFWD_CODE 0x0E +#define MLXSW_AFA_VIRFWD_SIZE 1 + +enum mlxsw_afa_virfwd_fid_cmd { + /* Do nothing */ + MLXSW_AFA_VIRFWD_FID_CMD_NOOP, + /* Set the Forwarding Identifier (FID) to fid */ + MLXSW_AFA_VIRFWD_FID_CMD_SET, +}; + +/* afa_virfwd_fid_cmd */ +MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3); + +/* afa_virfwd_fid + * The FID value. + */ +MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16); + +static inline void mlxsw_afa_virfwd_pack(char *payload, + enum mlxsw_afa_virfwd_fid_cmd fid_cmd, + u16 fid) +{ + mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd); + mlxsw_afa_virfwd_fid_set(payload, fid); +} + +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_VIRFWD_CODE, + MLXSW_AFA_VIRFWD_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 43f78dcfe3942b87c5167054eff93f4f45e19a52..bd8b91d02880af893065c374da51965a4350d0f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -62,5 +62,10 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); +int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, + u16 vid, u8 pcp, u8 et); +int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, + u32 counter_index); +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index e4fcba7c2af202002e9382cee5a0a83857707f6b..c75e9141e3ec57b9ca47f1b35cc717c4dae14c83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -54,6 +54,8 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_DST_IP6_LO, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_VID, + MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_MAX, }; @@ -88,7 +90,7 @@ struct mlxsw_afk_element_info { MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ _element, _offset, 0, _size) -/* For the purpose of the driver, define a internal storage scratchpad +/* For the purpose of the driver, define an internal storage scratchpad * that will be used to store key/mask values. For each defined element type * define an internal storage geometry. */ @@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), + MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index a223c85dfde064eee873eb6ffd6aae818a4f46ba..23f7d828cf676244766e582674a2946ea467bb6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -44,8 +44,6 @@ #include #include #include -#include -#include #include #include "pci_hw.h" @@ -57,8 +55,6 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; -static struct dentry *mlxsw_pci_dbg_root; - #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) #define mlxsw_pci_read32(mlxsw_pci, reg) \ @@ -71,21 +67,6 @@ enum mlxsw_pci_queue_type { MLXSW_PCI_QUEUE_TYPE_EQ, }; -static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type) -{ - switch (q_type) { - case MLXSW_PCI_QUEUE_TYPE_SDQ: - return "sdq"; - case MLXSW_PCI_QUEUE_TYPE_RDQ: - return "rdq"; - case MLXSW_PCI_QUEUE_TYPE_CQ: - return "cq"; - case MLXSW_PCI_QUEUE_TYPE_EQ: - return "eq"; - } - BUG(); -} - #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 static const u16 mlxsw_pci_doorbell_type_offset[] = { @@ -155,7 +136,6 @@ struct mlxsw_pci { u8 __iomem *hw_addr; struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; - struct msix_entry msix_entry; struct mlxsw_core *core; struct { struct mlxsw_pci_mem_item *items; @@ -174,7 +154,6 @@ struct mlxsw_pci { } comp; } cmd; struct mlxsw_bus_info bus_info; - struct dentry *dbg_dir; }; static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) @@ -261,21 +240,11 @@ static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); } -static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ); -} - static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) { return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); } -static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ); -} - static struct mlxsw_pci_queue * __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type, u8 q_num) @@ -390,26 +359,6 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM PROD_COUNT CONS_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) { - q = mlxsw_pci_sdq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %5d\n", - i, q->producer_counter, q->consumer_counter, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, int index, char *frag_data, size_t frag_len, int direction) @@ -544,26 +493,6 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, } } -static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM PROD_COUNT CONS_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) { - q = mlxsw_pci_rdq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %5d\n", - i, q->producer_counter, q->consumer_counter, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -580,7 +509,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); - mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -602,27 +530,6 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) { - q = mlxsw_pci_cq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %10d %5d\n", - i, q->consumer_counter, q->u.cq.comp_sdq_count, - q->u.cq.comp_rdq_count, q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, @@ -755,7 +662,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, } mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ - mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -777,27 +683,6 @@ static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) { - q = mlxsw_pci_eq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %10d %10d %5d\n", - i, q->consumer_counter, q->u.eq.ev_cmd_count, - q->u.eq.ev_comp_count, q->u.eq.ev_other_count, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) { mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); @@ -868,7 +753,6 @@ struct mlxsw_pci_queue_ops { void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); void (*tasklet)(unsigned long data); - int (*dbg_read)(struct seq_file *s, void *data); u16 elem_count; u8 elem_size; }; @@ -877,7 +761,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_SDQ, .init = mlxsw_pci_sdq_init, .fini = mlxsw_pci_sdq_fini, - .dbg_read = mlxsw_pci_sdq_dbg_read, .elem_count = MLXSW_PCI_WQE_COUNT, .elem_size = MLXSW_PCI_WQE_SIZE, }; @@ -886,7 +769,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_RDQ, .init = mlxsw_pci_rdq_init, .fini = mlxsw_pci_rdq_fini, - .dbg_read = mlxsw_pci_rdq_dbg_read, .elem_count = MLXSW_PCI_WQE_COUNT, .elem_size = MLXSW_PCI_WQE_SIZE }; @@ -896,7 +778,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, .tasklet = mlxsw_pci_cq_tasklet, - .dbg_read = mlxsw_pci_cq_dbg_read, .elem_count = MLXSW_PCI_CQE_COUNT, .elem_size = MLXSW_PCI_CQE_SIZE }; @@ -906,7 +787,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { .init = mlxsw_pci_eq_init, .fini = mlxsw_pci_eq_fini, .tasklet = mlxsw_pci_eq_tasklet, - .dbg_read = mlxsw_pci_eq_dbg_read, .elem_count = MLXSW_PCI_EQE_COUNT, .elem_size = MLXSW_PCI_EQE_SIZE }; @@ -984,9 +864,7 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_pci_queue_ops *q_ops, u8 num_qs) { - struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_type_group *queue_group; - char tmp[16]; int i; int err; @@ -1003,10 +881,6 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, } queue_group->count = num_qs; - sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type)); - debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir, - q_ops->dbg_read); - return 0; err_queue_init: @@ -1534,7 +1408,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_aqs_init; - err = request_irq(mlxsw_pci->msix_entry.vector, + err = request_irq(pci_irq_vector(pdev, 0), mlxsw_pci_eq_irq_handler, 0, mlxsw_pci->bus_info.device_kind, mlxsw_pci); if (err) { @@ -1567,7 +1441,7 @@ static void mlxsw_pci_fini(void *bus_priv) { struct mlxsw_pci *mlxsw_pci = bus_priv; - free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci); + free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); @@ -1842,8 +1716,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_sw_reset; } - err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1); - if (err) { + err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) { dev_err(&pdev->dev, "MSI-X init failed\n"); goto err_msix_init; } @@ -1852,14 +1726,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; - mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name, - mlxsw_pci_dbg_root); - if (!mlxsw_pci->dbg_dir) { - dev_err(&pdev->dev, "Failed to create debugfs dir\n"); - err = -ENOMEM; - goto err_dbg_create_dir; - } - err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus, mlxsw_pci); if (err) { @@ -1870,9 +1736,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - debugfs_remove_recursive(mlxsw_pci->dbg_dir); -err_dbg_create_dir: - pci_disable_msix(mlxsw_pci->pdev); + pci_free_irq_vectors(mlxsw_pci->pdev); err_msix_init: err_sw_reset: iounmap(mlxsw_pci->hw_addr); @@ -1892,8 +1756,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core); - debugfs_remove_recursive(mlxsw_pci->dbg_dir); - pci_disable_msix(mlxsw_pci->pdev); + pci_free_irq_vectors(mlxsw_pci->pdev); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); @@ -1916,15 +1779,11 @@ EXPORT_SYMBOL(mlxsw_pci_driver_unregister); static int __init mlxsw_pci_module_init(void) { - mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); - if (!mlxsw_pci_dbg_root) - return -ENOMEM; return 0; } static void __exit mlxsw_pci_module_exit(void) { - debugfs_remove_recursive(mlxsw_pci_dbg_root); } module_init(mlxsw_pci_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index 3d42146473b30a786629ec06091eb4364cdfde37..c580abba8d342b844b8f776e32fd8d683722da18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -49,20 +49,12 @@ #define MLXSW_PORT_MID 0xd000 -#define MLXSW_PORT_MAX_PHY_PORTS 0x40 -#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) - #define MLXSW_PORT_MAX_IB_PHY_PORTS 36 #define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1) -#define MLXSW_PORT_DEVID_BITS_OFFSET 10 -#define MLXSW_PORT_PHY_BITS_OFFSET 4 -#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) - #define MLXSW_PORT_CPU_PORT 0x0 -#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2) -#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) +#define MLXSW_PORT_DONT_CARE 0xFF #define MLXSW_PORT_MODULE_MAX_WIDTH 4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index d9616daf8a705645eb5b14e5af889e0b5020ab0d..83b277c8090e3d16a4b32e380eee387aac95a1a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4125,6 +4125,60 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); */ MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); +/* Shared between ingress/egress */ +enum mlxsw_reg_ritr_counter_set_type { + /* No Count. */ + MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0, + /* Basic. Used for router interfaces, counting the following: + * - Error and Discard counters. + * - Unicast, Multicast and Broadcast counters. Sharing the + * same set of counters for the different type of traffic + * (IPv4, IPv6 and mpls). + */ + MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9, +}; + +/* reg_ritr_ingress_counter_index + * Counter Index for flow counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24); + +/* reg_ritr_ingress_counter_set_type + * Igress Counter Set Type for router interface counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8); + +/* reg_ritr_egress_counter_index + * Counter Index for flow counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24); + +/* reg_ritr_egress_counter_set_type + * Egress Counter Set Type for router interface counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8); + +static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index, + bool enable, bool egress) +{ + enum mlxsw_reg_ritr_counter_set_type set_type; + + if (enable) + set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC; + else + set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT; + mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type); + + if (egress) + mlxsw_reg_ritr_egress_counter_index_set(payload, index); + else + mlxsw_reg_ritr_ingress_counter_index_set(payload, index); +} + static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) { MLXSW_REG_ZERO(ritr, payload); @@ -4141,7 +4195,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, enum mlxsw_reg_ritr_if_type type, - u16 rif, u16 mtu, const char *mac) + u16 rif, u16 vr_id, u16 mtu, + const char *mac) { bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; @@ -4153,6 +4208,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); + mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); } @@ -4285,6 +4341,129 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); } +/* RICNT - Router Interface Counter Register + * ----------------------------------------- + * The RICNT register retrieves per port performance counters + */ +#define MLXSW_REG_RICNT_ID 0x800B +#define MLXSW_REG_RICNT_LEN 0x100 + +MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN); + +/* reg_ricnt_counter_index + * Counter index + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24); + +enum mlxsw_reg_ricnt_counter_set_type { + /* No Count. */ + MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Basic. Used for router interfaces, counting the following: + * - Error and Discard counters. + * - Unicast, Multicast and Broadcast counters. Sharing the + * same set of counters for the different type of traffic + * (IPv4, IPv6 and mpls). + */ + MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09, +}; + +/* reg_ricnt_counter_set_type + * Counter Set Type for router interface counter + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8); + +enum mlxsw_reg_ricnt_opcode { + /* Nop. Supported only for read access*/ + MLXSW_REG_RICNT_OPCODE_NOP = 0x00, + /* Clear. Setting the clr bit will reset the counter value for + * all counters of the specified Router Interface. + */ + MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08, +}; + +/* reg_ricnt_opcode + * Opcode + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4); + +/* reg_ricnt_good_unicast_packets + * good unicast packets. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64); + +/* reg_ricnt_good_multicast_packets + * good multicast packets. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64); + +/* reg_ricnt_good_broadcast_packets + * good broadcast packets + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64); + +/* reg_ricnt_good_unicast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good unicast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64); + +/* reg_ricnt_good_multicast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good multicast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64); + +/* reg_ritr_good_broadcast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good broadcast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64); + +/* reg_ricnt_error_packets + * A count of errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64); + +/* reg_ricnt_discrad_packets + * A count of non-errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64); + +/* reg_ricnt_error_bytes + * A count of L3 data and padding octets not including L2 headers + * for errored frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64); + +/* reg_ricnt_discard_bytes + * A count of L3 data and padding octets not including L2 headers + * for non-errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64); + +static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index, + enum mlxsw_reg_ricnt_opcode op) +{ + MLXSW_REG_ZERO(ricnt, payload); + mlxsw_reg_ricnt_op_set(payload, op); + mlxsw_reg_ricnt_counter_index_set(payload, index); + mlxsw_reg_ricnt_counter_set_type_set(payload, + MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC); +} + /* RALTA - Router Algorithmic LPM Tree Allocation Register * ------------------------------------------------------- * RALTA is used to allocate the LPM trees of the SHSPM method. @@ -5504,6 +5683,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, mlxsw_reg_mpsc_rate_set(payload, rate); } +/* MGPC - Monitoring General Purpose Counter Set Register + * The MGPC register retrieves and sets the General Purpose Counter Set. + */ +#define MLXSW_REG_MGPC_ID 0x9081 +#define MLXSW_REG_MGPC_LEN 0x18 + +MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN); + +enum mlxsw_reg_mgpc_counter_set_type { + /* No count */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00, + /* Count packets and bytes */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + +/* reg_mgpc_counter_set_type + * Counter set type. + * Access: OP + */ +MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8); + +/* reg_mgpc_counter_index + * Counter index. + * Access: Index + */ +MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24); + +enum mlxsw_reg_mgpc_opcode { + /* Nop */ + MLXSW_REG_MGPC_OPCODE_NOP = 0x00, + /* Clear counters */ + MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08, +}; + +/* reg_mgpc_opcode + * Opcode. + * Access: OP + */ +MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4); + +/* reg_mgpc_byte_counter + * Byte counter value. + * Access: RW + */ +MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64); + +/* reg_mgpc_packet_counter + * Packet counter value. + * Access: RW + */ +MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64); + +static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, + enum mlxsw_reg_mgpc_opcode opcode, + enum mlxsw_reg_mgpc_counter_set_type set_type) +{ + MLXSW_REG_ZERO(mgpc, payload); + mlxsw_reg_mgpc_counter_index_set(payload, counter_index); + mlxsw_reg_mgpc_counter_set_type_set(payload, set_type); + mlxsw_reg_mgpc_opcode_set(payload, opcode); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5960,6 +6203,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rgcr), MLXSW_REG(ritr), MLXSW_REG(ratr), + MLXSW_REG(ricnt), MLXSW_REG(ralta), MLXSW_REG(ralst), MLXSW_REG(raltb), @@ -5977,6 +6221,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpar), MLXSW_REG(mlcr), MLXSW_REG(mpsc), + MLXSW_REG(mgpc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index bce8c2e006302db45ce4eaedf8b3a368ec2660c4..9556d934714b0871119d52258813a23f94227d09 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -43,11 +43,15 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, MLXSW_RES_ID_MAX_TRAP_GROUPS, + MLXSW_RES_ID_COUNTER_POOL_SIZE, MLXSW_RES_ID_MAX_SPAN, + MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, + MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC, MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_RULES, MLXSW_RES_ID_ACL_MAX_REGIONS, @@ -59,6 +63,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, + MLXSW_RES_ID_MAX_LPM_TREES, /* Internal resources. * Determined by the SW, not queried from the HW. @@ -75,11 +80,15 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, + [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, [MLXSW_RES_ID_MAX_SPAN] = 0x2420, + [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, + [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449, [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, @@ -91,6 +100,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, + [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, }; struct mlxsw_res { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 16484f24b7dbbaa2fe10170bd7cb46fee9832938..88357cee767986073ae02f9560454d1b538d72b5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -66,6 +66,8 @@ #include "port.h" #include "trap.h" #include "txheader.h" +#include "spectrum_cnt.h" +#include "spectrum_dpipe.h" static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp_driver_version[] = "1.0"; @@ -138,6 +140,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); +int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index, u64 *packets, + u64 *bytes) +{ + char mgpc_pl[MLXSW_REG_MGPC_LEN]; + int err; + + mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); + if (err) + return err; + *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); + *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); + return 0; +} + +static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + char mgpc_pl[MLXSW_REG_MGPC_LEN]; + + mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); +} + +int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, + unsigned int *p_counter_index) +{ + int err; + + err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + p_counter_index); + if (err) + return err; + err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); + if (err) + goto err_counter_clear; + return 0; + +err_counter_clear: + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + *p_counter_index); + return err; +} + +void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + counter_index); +} + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -304,9 +360,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) return false; } -static int mlxsw_sp_span_mtu_to_buffsize(int mtu) +static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, + int mtu) { - return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; + return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; } static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) @@ -319,8 +376,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) * updated according to the mtu value */ if (mlxsw_sp_span_is_egress_mirror(port)) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, - mlxsw_sp_span_mtu_to_buffsize(mtu)); + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); if (err) { netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); @@ -357,8 +415,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, /* if it is an egress SPAN, bind a shared buffer to it */ if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, - mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, + port->dev->mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); if (err) { netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); @@ -745,19 +805,47 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, - bool pause_en, bool pfc_en, u16 delay) +static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, + int mtu) { - u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); + return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); +} - delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : - MLXSW_SP_PAUSE_DELAY; +#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ + +static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, + u16 delay) +{ + delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, + BITS_PER_BYTE)); + return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, + mtu); +} + +/* Maximum delay buffer needed in case of PAUSE frames, in bytes. + * Assumes 100m cable and maximum MTU. + */ +#define MLXSW_SP_PAUSE_DELAY 58752 - if (pause_en || pfc_en) - mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, - pg_size + delay, pg_size); +static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, + u16 delay, bool pfc, bool pause) +{ + if (pfc) + return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); + else if (pause) + return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); + else + return 0; +} + +static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, + bool lossy) +{ + if (lossy) + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); else - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); + mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, + thres); } int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, @@ -778,6 +866,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + bool lossy; + u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -789,7 +879,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, if (!configure) continue; - mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); + + lossy = !(pfc || pause_en); + thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, + pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -966,8 +1061,9 @@ mlxsw_sp_port_get_stats64(struct net_device *dev, memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); } -int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, - u16 vid_end, bool is_member, bool untagged) +static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool is_member, bool untagged) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *spvm_pl; @@ -984,6 +1080,26 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, return err; } +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, + is_member, untagged); + if (err) + return err; + } + + return 0; +} + static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; @@ -1368,7 +1484,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, tc->cls_mall); return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } case TC_SETUP_CLSFLOWER: switch (tc->cls_flower->command) { @@ -1379,6 +1495,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, tc->cls_flower); return 0; + case TC_CLSFLOWER_STATS: + return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, + tc->cls_flower); default: return -EOPNOTSUPP; } @@ -1492,6 +1611,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, struct mlxsw_sp_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(const char *payload); + bool cells_bytes; }; static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { @@ -1612,17 +1732,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) -static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl) -{ - u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); - - return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); -} - static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { { .str = "tc_transmit_queue_tc", - .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, + .cells_bytes = true, }, { .str = "tc_no_buffer_discard_uc_tc", @@ -1734,6 +1848,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, enum mlxsw_reg_ppcnt_grp grp, int prio, u64 *data, int data_index) { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_hw_stats *hw_stats; char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; int i, len; @@ -1743,8 +1859,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, if (err) return; mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); - for (i = 0; i < len; i++) + for (i = 0; i < len; i++) { data[data_index + i] = hw_stats[i].getter(ppcnt_pl); + if (!hw_stats[i].cells_bytes) + continue; + data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, + data[data_index + i]); + } } static void mlxsw_sp_port_get_stats(struct net_device *dev, @@ -2537,25 +2658,33 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { int i; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->port_to_module); kfree(mlxsw_sp->ports); } static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); u8 module, width, lane; size_t alloc_size; int i; int err; - alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; + alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); if (!mlxsw_sp->ports) return -ENOMEM; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); + if (!mlxsw_sp->port_to_module) { + err = -ENOMEM; + goto err_port_to_module_alloc; + } + + for (i = 1; i < max_ports; i++) { err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, &width, &lane); if (err) @@ -2575,6 +2704,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) for (i--; i >= 1; i--) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->port_to_module); +err_port_to_module_alloc: kfree(mlxsw_sp->ports); return err; } @@ -2877,6 +3008,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), + MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), /* L3 traps */ MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), @@ -3158,6 +3290,18 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); } +static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create); + +static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true); +} + +static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -3224,6 +3368,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_acl_init; } + err = mlxsw_sp_counter_pool_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); + goto err_counter_pool_init; + } + + err = mlxsw_sp_dpipe_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); + goto err_dpipe_init; + } + + err = mlxsw_sp_dummy_fid_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n"); + goto err_dummy_fid_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -3233,6 +3395,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_dummy_fid_fini(mlxsw_sp); +err_dummy_fid_init: + mlxsw_sp_dpipe_fini(mlxsw_sp); +err_dpipe_init: + mlxsw_sp_counter_pool_fini(mlxsw_sp); +err_counter_pool_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); @@ -3255,6 +3423,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_dummy_fid_fini(mlxsw_sp); + mlxsw_sp_dpipe_fini(mlxsw_sp); + mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); @@ -3326,13 +3497,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } -static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) +static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) { - struct mlxsw_sp_port **port = data; + struct mlxsw_sp_port **p_mlxsw_sp_port = data; int ret = 0; if (mlxsw_sp_port_dev_check(lower_dev)) { - *port = netdev_priv(lower_dev); + *p_mlxsw_sp_port = netdev_priv(lower_dev); ret = 1; } @@ -3341,18 +3512,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) { - struct mlxsw_sp_port *port; + struct mlxsw_sp_port *mlxsw_sp_port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - port = NULL; - netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); + mlxsw_sp_port = NULL; + netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); - return port; + return mlxsw_sp_port; } -static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) +struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port; @@ -3362,15 +3533,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) { - struct mlxsw_sp_port *port; + struct mlxsw_sp_port *mlxsw_sp_port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - port = NULL; - netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); + mlxsw_sp_port = NULL; + netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, + &mlxsw_sp_port); - return port; + return mlxsw_sp_port; } struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) @@ -3390,546 +3562,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } -static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, - unsigned long event) -{ - switch (event) { - case NETDEV_UP: - if (!r) - return true; - r->ref_count++; - return false; - case NETDEV_DOWN: - if (r && --r->ref_count == 0) - return true; - /* It is possible we already removed the RIF ourselves - * if it was assigned to a netdev that is now a bridge - * or LAG slave. - */ - return false; - } - - return false; -} - -static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - if (!mlxsw_sp->rifs[i]) - return i; - - return MLXSW_SP_INVALID_RIF; -} - -static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, - bool *p_lagged, u16 *p_system_port) -{ - u8 local_port = mlxsw_sp_vport->local_port; - - *p_lagged = mlxsw_sp_vport->lagged; - *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; -} - -static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev, u16 rif, - bool create) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - bool lagged = mlxsw_sp_vport->lagged; - char ritr_pl[MLXSW_REG_RITR_LEN]; - u16 system_port; - - mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, - l3_dev->mtu, l3_dev->dev_addr); - - mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); - mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, - mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); - -static struct mlxsw_sp_fid * -mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) -{ - struct mlxsw_sp_fid *f; - - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - return NULL; - - f->leave = mlxsw_sp_vport_rif_sp_leave; - f->ref_count = 0; - f->dev = l3_dev; - f->fid = fid; - - return f; -} - -static struct mlxsw_sp_rif * -mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) -{ - struct mlxsw_sp_rif *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (!r) - return NULL; - - INIT_LIST_HEAD(&r->nexthop_list); - INIT_LIST_HEAD(&r->neigh_list); - ether_addr_copy(r->addr, l3_dev->dev_addr); - r->mtu = l3_dev->mtu; - r->ref_count = 1; - r->dev = l3_dev; - r->rif = rif; - r->f = f; - - return r; -} - -static struct mlxsw_sp_rif * -mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct mlxsw_sp_fid *f; - struct mlxsw_sp_rif *r; - u16 fid, rif; - int err; - - rif = mlxsw_sp_avail_rif_get(mlxsw_sp); - if (rif == MLXSW_SP_INVALID_RIF) - return ERR_PTR(-ERANGE); - - err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); - if (err) - return ERR_PTR(err); - - fid = mlxsw_sp_rif_sp_to_fid(rif); - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); - if (err) - goto err_rif_fdb_op; - - f = mlxsw_sp_rfid_alloc(fid, l3_dev); - if (!f) { - err = -ENOMEM; - goto err_rfid_alloc; - } - - r = mlxsw_sp_rif_alloc(rif, l3_dev, f); - if (!r) { - err = -ENOMEM; - goto err_rif_alloc; - } - - f->r = r; - mlxsw_sp->rifs[rif] = r; - - return r; - -err_rif_alloc: - kfree(f); -err_rfid_alloc: - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); -err_rif_fdb_op: - mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); - return ERR_PTR(err); -} - -static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, - struct mlxsw_sp_rif *r) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct net_device *l3_dev = r->dev; - struct mlxsw_sp_fid *f = r->f; - u16 fid = f->fid; - u16 rif = r->rif; - - mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); - - mlxsw_sp->rifs[rif] = NULL; - f->r = NULL; - - kfree(r); - - kfree(f); - - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); - - mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); -} - -static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct mlxsw_sp_rif *r; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); - if (!r) { - r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); - if (IS_ERR(r)) - return PTR_ERR(r); - } - - mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); - r->f->ref_count++; - - netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); - - return 0; -} - -static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) -{ - struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); - - netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); - - mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); - if (--f->ref_count == 0) - mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); -} - -static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, - struct net_device *port_dev, - unsigned long event, u16 vid) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); - struct mlxsw_sp_port *mlxsw_sp_vport; - - mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (WARN_ON(!mlxsw_sp_vport)) - return -EINVAL; - - switch (event) { - case NETDEV_UP: - return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); - case NETDEV_DOWN: - mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); - break; - } - - return 0; -} - -static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, - unsigned long event) -{ - if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) - return 0; - - return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); -} - -static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, - struct net_device *lag_dev, - unsigned long event, u16 vid) -{ - struct net_device *port_dev; - struct list_head *iter; - int err; - - netdev_for_each_lower_dev(lag_dev, port_dev, iter) { - if (mlxsw_sp_port_dev_check(port_dev)) { - err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, - event, vid); - if (err) - return err; - } - } - - return 0; -} - -static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, - unsigned long event) -{ - if (netif_is_bridge_port(lag_dev)) - return 0; - - return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); -} - -static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev) -{ - u16 fid; - - if (is_vlan_dev(l3_dev)) - fid = vlan_dev_vlan_id(l3_dev); - else if (mlxsw_sp->master_bridge.dev == l3_dev) - fid = 1; - else - return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); - - return mlxsw_sp_fid_find(mlxsw_sp, fid); -} - -static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) -{ - return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; -} - -static u16 mlxsw_sp_flood_table_index_get(u16 fid) -{ - return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; -} - -static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, - bool set) -{ - enum mlxsw_flood_table_type table_type; - char *sftr_pl; - u16 index; - int err; - - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) - return -ENOMEM; - - table_type = mlxsw_sp_flood_table_type_get(fid); - index = mlxsw_sp_flood_table_index_get(fid); - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, - 1, MLXSW_PORT_ROUTER_PORT, set); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - - kfree(sftr_pl); - return err; -} - -static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) -{ - if (mlxsw_sp_fid_is_vfid(fid)) - return MLXSW_REG_RITR_FID_IF; - else - return MLXSW_REG_RITR_VLAN_IF; -} - -static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - u16 fid, u16 rif, - bool create) -{ - enum mlxsw_reg_ritr_if_type rif_type; - char ritr_pl[MLXSW_REG_RITR_LEN]; - - rif_type = mlxsw_sp_rif_type_get(fid); - mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, - l3_dev->dev_addr); - mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - struct mlxsw_sp_fid *f) -{ - struct mlxsw_sp_rif *r; - u16 rif; - int err; - - rif = mlxsw_sp_avail_rif_get(mlxsw_sp); - if (rif == MLXSW_SP_INVALID_RIF) - return -ERANGE; - - err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); - if (err) - return err; - - err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); - if (err) - goto err_rif_bridge_op; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); - if (err) - goto err_rif_fdb_op; - - r = mlxsw_sp_rif_alloc(rif, l3_dev, f); - if (!r) { - err = -ENOMEM; - goto err_rif_alloc; - } - - f->r = r; - mlxsw_sp->rifs[rif] = r; - - netdev_dbg(l3_dev, "RIF=%d created\n", rif); - - return 0; - -err_rif_alloc: - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); -err_rif_fdb_op: - mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); -err_rif_bridge_op: - mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); - return err; -} - -void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) -{ - struct net_device *l3_dev = r->dev; - struct mlxsw_sp_fid *f = r->f; - u16 rif = r->rif; - - mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); - - mlxsw_sp->rifs[rif] = NULL; - f->r = NULL; - - kfree(r); - - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); - - mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); - - mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); - - netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); -} - -static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, - struct net_device *br_dev, - unsigned long event) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); - struct mlxsw_sp_fid *f; - - /* FID can either be an actual FID if the L3 device is the - * VLAN-aware bridge or a VLAN device on top. Otherwise, the - * L3 device is a VLAN-unaware bridge and we get a vFID. - */ - f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); - if (WARN_ON(!f)) - return -EINVAL; - - switch (event) { - case NETDEV_UP: - return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); - case NETDEV_DOWN: - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); - break; - } - - return 0; -} - -static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, - unsigned long event) -{ - struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); - u16 vid = vlan_dev_vlan_id(vlan_dev); - - if (mlxsw_sp_port_dev_check(real_dev)) - return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, - vid); - else if (netif_is_lag_master(real_dev)) - return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, - vid); - else if (netif_is_bridge_master(real_dev) && - mlxsw_sp->master_bridge.dev == real_dev) - return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, - event); - - return 0; -} - -static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; - struct net_device *dev = ifa->ifa_dev->dev; - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_rif *r; - int err = 0; - - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - goto out; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!mlxsw_sp_rif_should_config(r, event)) - goto out; - - if (mlxsw_sp_port_dev_check(dev)) - err = mlxsw_sp_inetaddr_port_event(dev, event); - else if (netif_is_lag_master(dev)) - err = mlxsw_sp_inetaddr_lag_event(dev, event); - else if (netif_is_bridge_master(dev)) - err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); - else if (is_vlan_dev(dev)) - err = mlxsw_sp_inetaddr_vlan_event(dev, event); - -out: - return notifier_from_errno(err); -} - -static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, - const char *mac, int mtu) -{ - char ritr_pl[MLXSW_REG_RITR_LEN]; - int err; - - mlxsw_reg_ritr_rif_pack(ritr_pl, rif); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (err) - return err; - - mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); - mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); - mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) -{ - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_rif *r; - int err; - - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - return 0; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!r) - return 0; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); - if (err) - return err; - - err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); - if (err) - goto err_rif_edit; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); - if (err) - goto err_rif_fdb_op; - - ether_addr_copy(r->addr, dev->dev_addr); - r->mtu = dev->mtu; - - netdev_dbg(dev, "Updated RIF=%d\n", r->rif); - - return 0; - -err_rif_fdb_op: - mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); -err_rif_edit: - mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); - return err; -} - static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, u16 fid) { @@ -4220,7 +3852,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, - u16 lag_id) + struct net_device *lag_dev, u16 lag_id) { struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_fid *f; @@ -4238,6 +3870,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->lag_id = lag_id; mlxsw_sp_vport->lagged = 1; + mlxsw_sp_vport->dev = lag_dev; } static void @@ -4254,6 +3887,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) if (f) f->leave(mlxsw_sp_vport); + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; mlxsw_sp_vport->lagged = 0; } @@ -4293,7 +3927,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->lagged = 1; lag->ref_count++; - mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); + mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id); return 0; @@ -4403,6 +4037,56 @@ static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->dev = mlxsw_sp_port->dev; } +static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_reg_spms_state spms_state; + char *spms_pl; + u16 vid; + int err; + + spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : + MLXSW_REG_SPMS_STATE_DISCARDING; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + + for (vid = 0; vid < VLAN_N_VID; vid++) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); + if (err) + return err; + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + true, false); + if (err) + goto err_port_vlan_set; + return 0; + +err_port_vlan_set: + mlxsw_sp_port_stp_set(mlxsw_sp_port, false); + return err; +} + +static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + false, false); + mlxsw_sp_port_stp_set(mlxsw_sp_port, false); +} + static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, unsigned long event, void *ptr) { @@ -4421,7 +4105,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, upper_dev = info->upper_dev; if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && - !netif_is_bridge_master(upper_dev)) + !netif_is_bridge_master(upper_dev) && + !netif_is_ovs_master(upper_dev)) return -EINVAL; if (!info->linking) break; @@ -4438,6 +4123,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) return -EINVAL; + if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4446,8 +4135,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); else - mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, @@ -4461,6 +4150,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, else mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); + } else if (netif_is_ovs_master(upper_dev)) { + if (info->linking) + err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); + else + mlxsw_sp_port_ovs_leave(mlxsw_sp_port); } else { err = -EINVAL; WARN_ON(1); @@ -4552,8 +4246,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f; f = mlxsw_sp_fid_find(mlxsw_sp, fid); - if (f && f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f && f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); if (f && --f->ref_count == 0) mlxsw_sp_fid_destroy(mlxsw_sp, f); } @@ -4564,33 +4258,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, struct netdev_notifier_changeupper_info *info; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0; mlxsw_sp = mlxsw_sp_lower_get(br_dev); if (!mlxsw_sp) return 0; - if (br_dev != mlxsw_sp->master_bridge.dev) - return 0; info = ptr; switch (event) { - case NETDEV_CHANGEUPPER: + case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!is_vlan_dev(upper_dev)) - break; - if (info->linking) { - err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, - upper_dev); - if (err) - return err; + return -EINVAL; + if (is_vlan_dev(upper_dev) && + br_dev != mlxsw_sp->master_bridge.dev) + return -EINVAL; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (is_vlan_dev(upper_dev)) { + if (info->linking) + err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, + upper_dev); + else + mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, + upper_dev); } else { - mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); + err = -EINVAL; + WARN_ON(1); } break; } - return 0; + return err; } static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) @@ -4657,8 +4358,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, clear_bit(vfid, mlxsw_sp->vfids.mapped); list_del(&f->list); - if (f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); kfree(f); @@ -4810,6 +4511,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, int err = 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) + return 0; switch (event) { case NETDEV_PRECHANGEUPPER: @@ -4821,22 +4524,24 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ - if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + if (netif_is_bridge_master(upper_dev) && + !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->linking) { - if (WARN_ON(!mlxsw_sp_vport)) - return -EINVAL; - err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, - upper_dev); + if (netif_is_bridge_master(upper_dev)) { + if (info->linking) + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + else + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } else { - if (!mlxsw_sp_vport) - return 0; - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); + err = -EINVAL; + WARN_ON(1); } + break; } return err; @@ -4878,6 +4583,15 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return 0; } +static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + + if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) + return false; + return netif_is_l3_master(info->upper_dev); +} + static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -4886,6 +4600,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); + else if (mlxsw_sp_is_vrf_event(event, ptr)) + err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) err = mlxsw_sp_netdevice_port_event(dev, event, ptr); else if (netif_is_lag_master(dev)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 13ec85e7c392f8941ecf6441333d416ba4609f3a..0c23bc1e946df52c729471be138eae3b8de783ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -57,41 +57,21 @@ #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ +#define MLXSW_SP_DUMMY_FID 15359 + #define MLXSW_SP_RFID_BASE 15360 -#define MLXSW_SP_INVALID_RIF 0xffff #define MLXSW_SP_MID_MAX 7000 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 -#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ -#define MLXSW_SP_LPM_TREE_MAX 22 -#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN) - #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ -#define MLXSW_SP_BYTES_PER_CELL 96 - -#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) -#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) - #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ #define MLXSW_SP_KVD_GRANULARITY 128 -/* Maximum delay buffer needed in case of PAUSE frames, in cells. - * Assumes 100m cable and maximum MTU. - */ -#define MLXSW_SP_PAUSE_DELAY 612 - -#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ - -static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) -{ - delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); - return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu); -} - struct mlxsw_sp_port; +struct mlxsw_sp_rif; struct mlxsw_sp_upper { struct net_device *dev; @@ -103,21 +83,10 @@ struct mlxsw_sp_fid { struct list_head list; unsigned int ref_count; struct net_device *dev; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; u16 fid; }; -struct mlxsw_sp_rif { - struct list_head nexthop_list; - struct list_head neigh_list; - struct net_device *dev; - unsigned int ref_count; - struct mlxsw_sp_fid *f; - unsigned char addr[ETH_ALEN]; - int mtu; - u16 rif; -}; - struct mlxsw_sp_mid { struct list_head list; unsigned char addr[ETH_ALEN]; @@ -138,17 +107,7 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) static inline bool mlxsw_sp_fid_is_vfid(u16 fid) { - return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; -} - -static inline bool mlxsw_sp_fid_is_rfid(u16 fid) -{ - return fid >= MLXSW_SP_RFID_BASE; -} - -static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif) -{ - return MLXSW_SP_RFID_BASE + rif; + return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID; } struct mlxsw_sp_sb_pr { @@ -177,12 +136,15 @@ struct mlxsw_sp_sb_pm { #define MLXSW_SP_SB_POOL_COUNT 4 #define MLXSW_SP_SB_TC_COUNT 8 +struct mlxsw_sp_sb_port { + struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; +}; + struct mlxsw_sp_sb { struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; - struct { - struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; - } ports[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_sp_sb_port *ports; + u32 cell_size; }; #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) @@ -207,11 +169,9 @@ struct mlxsw_sp_fib; struct mlxsw_sp_vr { u16 id; /* virtual router ID */ - bool used; - enum mlxsw_sp_l3proto proto; u32 tb_id; /* kernel fib table id */ - struct mlxsw_sp_lpm_tree *lpm_tree; - struct mlxsw_sp_fib *fib; + unsigned int rif_count; + struct mlxsw_sp_fib *fib4; }; enum mlxsw_sp_span_type { @@ -253,11 +213,14 @@ struct mlxsw_sp_port_mall_tc_entry { }; struct mlxsw_sp_router { - struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; struct rhashtable nexthop_group_ht; struct rhashtable nexthop_ht; + struct { + struct mlxsw_sp_lpm_tree *trees; + unsigned int tree_count; + } lpm; struct { struct delayed_work dw; unsigned long interval; /* ms */ @@ -269,6 +232,7 @@ struct mlxsw_sp_router { }; struct mlxsw_sp_acl; +struct mlxsw_sp_counter_pool; struct mlxsw_sp { struct { @@ -296,7 +260,7 @@ struct mlxsw_sp { u32 ageing_time; struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper *lags; - u8 port_to_module[MLXSW_PORT_MAX_PORTS]; + u8 *port_to_module; struct mlxsw_sp_sb sb; struct mlxsw_sp_router router; struct mlxsw_sp_acl *acl; @@ -304,6 +268,7 @@ struct mlxsw_sp { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; + struct mlxsw_sp_counter_pool *counter_pool; struct { struct mlxsw_sp_span_entry *entries; int entries_count; @@ -317,6 +282,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) return &mlxsw_sp->lags[lag_id]; } +static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, + u32 cells) +{ + return mlxsw_sp->sb.cell_size * cells; +} + +static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, + u32 bytes) +{ + return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size); +} + struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; @@ -386,6 +363,7 @@ struct mlxsw_sp_port { }; bool mlxsw_sp_port_dev_check(const struct net_device *dev); +struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); @@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, return NULL; } -static inline struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) -{ - int i; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) - return mlxsw_sp->rifs[i]; - - return NULL; -} - enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BC, @@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, bool adding); struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); -void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, bool dwrr, u8 dwrr_weight); @@ -608,10 +571,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr); -void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r); +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr); +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif); +int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, + struct netdev_notifier_changeupper_info *info); -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, + u32 *p_entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); @@ -620,6 +589,8 @@ struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; + unsigned int counter_index; + bool counter_valid; }; enum mlxsw_sp_acl_profile { @@ -639,6 +610,8 @@ struct mlxsw_sp_acl_profile_ops { void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, + bool *activity); }; struct mlxsw_sp_acl_ops { @@ -679,6 +652,14 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct net_device *out_dev); +int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 action, u16 vid, u16 proto, u8 prio); +int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u16 fid); struct mlxsw_sp_acl_rule; @@ -698,6 +679,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, unsigned long cookie); struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use); int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); @@ -708,5 +692,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, __be16 protocol, struct tc_cls_flower_offload *f); void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); +int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f); +int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index, u64 *packets, + u64 *bytes); +int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, + unsigned int *p_counter_index); +void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 8a18b3aa70dc20d7464a14805e60cc838a87ce17..317f7b14627f83dac576ebfca48e30f4e3b458e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "reg.h" #include "core.h" @@ -49,10 +50,17 @@ #include "spectrum_acl_flex_keys.h" struct mlxsw_sp_acl { + struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; struct mlxsw_afa *afa; const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; + struct list_head rules; + struct { + struct delayed_work dw; + unsigned long interval; /* ms */ +#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 + } rule_activity_update; unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset { struct mlxsw_sp_acl_rule { struct rhash_head ht_node; /* Member of rule HT */ + struct list_head list; unsigned long cookie; /* HT key */ struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule_info *rulei; + u64 last_used; + u64 last_packets; + u64 last_bytes; unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); } +static int +mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index); + if (err) + return err; + rulei->counter_valid = true; + return 0; +} + +static void +mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + rulei->counter_valid = false; + mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index); +} + struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) { @@ -335,6 +368,48 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port, in_port); } +int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 action, u16 vid, u16 proto, u8 prio) +{ + u8 ethertype; + + if (action == TCA_VLAN_ACT_MODIFY) { + switch (proto) { + case ETH_P_8021Q: + ethertype = 0; + break; + case ETH_P_8021AD: + ethertype = 1; + break; + default: + dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", + proto); + return -EINVAL; + } + + return mlxsw_afa_block_append_vlan_modify(rulei->act_block, + vid, prio, ethertype); + } else { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); + return -EINVAL; + } +} + +int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_append_counter(rulei->act_block, + rulei->counter_index); +} + +int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u16 fid) +{ + return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); +} + struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, @@ -358,8 +433,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, err = PTR_ERR(rule->rulei); goto err_rulei_create; } + + err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei); + if (err) + goto err_counter_alloc; return rule; +err_counter_alloc: + mlxsw_sp_acl_rulei_destroy(rule->rulei); err_rulei_create: kfree(rule); err_alloc: @@ -372,6 +453,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei); mlxsw_sp_acl_rulei_destroy(rule->rulei); kfree(rule); mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); @@ -393,6 +475,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_insert; + list_add_tail(&rule->list, &mlxsw_sp->acl->rules); return 0; err_rhashtable_insert: @@ -406,6 +489,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + list_del(&rule->list); rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, mlxsw_sp_acl_rule_ht_params); ops->rule_del(mlxsw_sp, rule->priv); @@ -426,6 +510,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) return rule->rulei; } +static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + bool active; + int err; + + err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active); + if (err) + return err; + if (active) + rule->last_used = jiffies; + return 0; +} + +static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) +{ + struct mlxsw_sp_acl_rule *rule; + int err; + + /* Protect internal structures from changes */ + rtnl_lock(); + list_for_each_entry(rule, &acl->rules, list) { + err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, + rule); + if (err) + goto err_rule_update; + } + rtnl_unlock(); + return 0; + +err_rule_update: + rtnl_unlock(); + return err; +} + +static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl) +{ + unsigned long interval = acl->rule_activity_update.interval; + + mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work) +{ + struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, + rule_activity_update.dw.work); + int err; + + err = mlxsw_sp_acl_rules_activity_update(acl); + if (err) + dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity"); + + mlxsw_sp_acl_rule_activity_work_schedule(acl); +} + +int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) + +{ + struct mlxsw_sp_acl_rule_info *rulei; + u64 current_packets; + u64 current_bytes; + int err; + + rulei = mlxsw_sp_acl_rule_rulei(rule); + err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index, + ¤t_packets, ¤t_bytes); + if (err) + return err; + + *packets = current_packets - rule->last_packets; + *bytes = current_bytes - rule->last_bytes; + *last_use = rule->last_used; + + rule->last_bytes = current_bytes; + rule->last_packets = current_packets; + + return 0; +} + #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, @@ -434,7 +602,6 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, struct mlxsw_sp *mlxsw_sp = priv; char pefa_pl[MLXSW_REG_PEFA_LEN]; u32 kvdl_index; - int ret; int err; /* The first action set of a TCAM entry is stored directly in TCAM, @@ -443,10 +610,10 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, if (is_first) return 0; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); - if (ret < 0) - return ret; - kvdl_index = ret; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE, + &kvdl_index); + if (err) + return err; mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); if (err) @@ -475,13 +642,11 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, struct mlxsw_sp *mlxsw_sp = priv; char ppbs_pl[MLXSW_REG_PPBS_LEN]; u32 kvdl_index; - int ret; int err; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); - if (ret < 0) - return ret; - kvdl_index = ret; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + if (err) + return err; mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); if (err) @@ -518,7 +683,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) if (!acl) return -ENOMEM; mlxsw_sp->acl = acl; - + acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), mlxsw_sp_afk_blocks, @@ -541,11 +706,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_rhashtable_init; + INIT_LIST_HEAD(&acl->rules); err = acl_ops->init(mlxsw_sp, acl->priv); if (err) goto err_acl_ops_init; acl->ops = acl_ops; + + /* Create the delayed work for the rule activity_update */ + INIT_DELAYED_WORK(&acl->rule_activity_update.dw, + mlxsw_sp_acl_rul_activity_update_work); + acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS; + mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0); return 0; err_acl_ops_init: @@ -564,7 +736,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_acl *acl = mlxsw_sp->acl; const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; + cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); acl_ops->fini(mlxsw_sp, acl->priv); + WARN_ON(!list_empty(&acl->rules)); rhashtable_destroy(&acl->ruleset_ht); mlxsw_afa_destroy(acl->afa); mlxsw_afk_destroy(acl->afk); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 82b81cf7f4a7de875191c1b7dcf69b0fd27854d1..af7b7bad48df7746946c9d0dab44cb8bdf13f6b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -39,11 +39,15 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; @@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 7382832215faa0d2211625a53ee6d7f328686ba2..3a24289979d9a0bf41ec87d008e0e55c404bfba6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); } +static int +mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + bool *activity) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + int err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, + region->tcam_region_info, offset); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + return err; + *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); + return 0; +} + #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) static int @@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); } +static int +mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry, + bool *activity) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, + entry->parman_item.index, + activity); +} + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, MLXSW_AFK_ELEMENT_DMAC, @@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_DST_IP4, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_VID, + MLXSW_AFK_ELEMENT_PCP, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { @@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); } +static int +mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, + void *rule_priv, bool *activity) +{ + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, + activity); +} + static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, @@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, + .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, }; static const struct mlxsw_sp_acl_profile_ops * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index a7468262f118979c8914e8acea042361b838f69e..997189cfe7fd58c7e93419d69cf609420eb1633f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -162,8 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, } static const u16 mlxsw_sp_pbs[] = { - [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), - [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), + [0] = 2 * ETH_FRAME_LEN, + [9] = 2 * MLXSW_PORT_MAX_MTU, }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) @@ -171,20 +171,22 @@ static const u16 mlxsw_sp_pbs[] = { static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; int i; mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { + u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]); + if (i == MLXSW_SP_PB_UNUSED) continue; - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); } mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); - return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(pbmc), pbmc_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -209,11 +211,25 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } -#define MLXSW_SP_SB_PR_INGRESS_SIZE \ - (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) +static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + + mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port), + GFP_KERNEL); + if (!mlxsw_sp->sb.ports) + return -ENOMEM; + return 0; +} + +static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->sb.ports); +} + +#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) -#define MLXSW_SP_SB_PR_EGRESS_SIZE \ - (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) +#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000 #define MLXSW_SP_SB_PR(_mode, _size) \ { \ @@ -223,18 +239,17 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), + MLXSW_SP_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)), + MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), }; #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), @@ -251,11 +266,9 @@ static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, int err; for (i = 0; i < prs_len; i++) { - const struct mlxsw_sp_sb_pr *pr; + u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size); - pr = &prs[i]; - err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, - pr->mode, pr->size); + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size); if (err) return err; } @@ -284,7 +297,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) } static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), + MLXSW_SP_SB_CM(10000, 8, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), @@ -293,20 +306,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), + MLXSW_SP_SB_CM(20000, 1, 3), }; #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), @@ -330,7 +343,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), + MLXSW_SP_SB_CM(10000, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -370,13 +383,17 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; + u32 min_buff; if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; + /* All pools are initialized using dynamic thresholds, + * therefore 'max_buff' isn't specified in cells. + */ + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, - cm->min_buff, cm->max_buff, - cm->pool); + min_buff, cm->max_buff, cm->pool); if (err) return err; } @@ -484,21 +501,21 @@ struct mlxsw_sp_sb_mm { } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -511,10 +528,15 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { const struct mlxsw_sp_sb_mm *mc; + u32 min_buff; mc = &mlxsw_sp_sb_mms[i]; - mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff, - mc->max_buff, mc->pool); + /* All pools are initialized using dynamic thresholds, + * therefore 'max_buff' isn't specified in cells. + */ + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff); + mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff, + mc->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) return err; @@ -522,32 +544,53 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } -#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024) - int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { + u64 sb_size; int err; - err = mlxsw_sp_sb_prs_init(mlxsw_sp); + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) + return -EIO; + mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) + return -EIO; + sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); + + err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) return err; + err = mlxsw_sp_sb_prs_init(mlxsw_sp); + if (err) + goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); if (err) - return err; + goto err_sb_cpu_port_sb_cms_init; err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) - return err; - return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, - MLXSW_SP_SB_SIZE, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_TC_COUNT, - MLXSW_SP_SB_TC_COUNT); + goto err_sb_mms_init; + err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_TC_COUNT, + MLXSW_SP_SB_TC_COUNT); + if (err) + goto err_devlink_sb_register; + + return 0; + +err_devlink_sb_register: +err_sb_mms_init: +err_sb_cpu_port_sb_cms_init: +err_sb_prs_init: + mlxsw_sp_sb_ports_fini(mlxsw_sp); + return err; } void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) { devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); + mlxsw_sp_sb_ports_fini(mlxsw_sp); } int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -596,7 +639,7 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); pool_info->pool_type = (enum devlink_sb_pool_type) dir; - pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size); + pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; return 0; } @@ -606,9 +649,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, enum devlink_sb_threshold_type threshold_type) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); u8 pool = pool_get(pool_index); enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); - u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); enum mlxsw_reg_sbpr_mode mode; if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) @@ -627,7 +670,7 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; - return MLXSW_SP_CELLS_TO_BYTES(max_buff); + return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff); } static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, @@ -645,7 +688,7 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, return -EINVAL; *p_max_buff = val; } else { - *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold); + *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold); } return 0; } @@ -761,7 +804,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, masked_count = 0; for (local_port = cb_ctx.local_port_1; - local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { @@ -775,7 +818,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, } masked_count = 0; for (local_port = cb_ctx.local_port_1; - local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { @@ -817,7 +860,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); } - for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); @@ -847,7 +890,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, cb_priv); if (err) goto out; - if (local_port < MLXSW_PORT_MAX_PORTS) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) goto next_batch; out: @@ -882,7 +925,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); } - for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); @@ -908,7 +951,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, &bulk_list, NULL, 0); if (err) goto out; - if (local_port < MLXSW_PORT_MAX_PORTS) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) goto next_batch; out: @@ -932,8 +975,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); - *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur); - *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max); + *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur); + *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max); return 0; } @@ -951,7 +994,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); - *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur); - *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max); + *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur); + *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c new file mode 100644 index 0000000000000000000000000000000000000000..0f46775e030706e0d6d5e72c85e00c393cd17659 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -0,0 +1,207 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum_cnt.h" + +#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096 + +struct mlxsw_sp_counter_sub_pool { + unsigned int base_index; + unsigned int size; + unsigned int entry_size; + unsigned int bank_count; +}; + +struct mlxsw_sp_counter_pool { + unsigned int pool_size; + unsigned long *usage; /* Usage bitmap */ + struct mlxsw_sp_counter_sub_pool *sub_pools; +}; + +static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { + [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = { + .bank_count = 6, + }, + [MLXSW_SP_COUNTER_SUB_POOL_RIF] = { + .bank_count = 2, + } +}; + +static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int total_bank_config = 0; + unsigned int pool_size; + int i; + + pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); + /* Check config is valid, no bank over subscription */ + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) + total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count; + if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1) + return -EINVAL; + return 0; +} + +static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_sub_pool *sub_pool; + + /* Prepare generic flow pool*/ + sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES)) + return -EIO; + sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + COUNTER_SIZE_PACKETS_BYTES); + /* Prepare erif pool*/ + sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC)) + return -EIO; + sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + COUNTER_SIZE_ROUTER_BASIC); + return 0; +} + +int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_sub_pool *sub_pool; + struct mlxsw_sp_counter_pool *pool; + unsigned int base_index; + unsigned int map_size; + int i; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE)) + return -EIO; + + err = mlxsw_sp_counter_pool_validate(mlxsw_sp); + if (err) + return err; + + err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp); + if (err) + return err; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return -ENOMEM; + + pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); + map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long); + + pool->usage = kzalloc(map_size, GFP_KERNEL); + if (!pool->usage) { + err = -ENOMEM; + goto err_usage_alloc; + } + + pool->sub_pools = mlxsw_sp_counter_sub_pools; + /* Allocation is based on bank count which should be + * specified for each sub pool statically. + */ + base_index = 0; + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) { + sub_pool = &pool->sub_pools[i]; + sub_pool->size = sub_pool->bank_count * + MLXSW_SP_COUNTER_POOL_BANK_SIZE; + sub_pool->base_index = base_index; + base_index += sub_pool->size; + /* The last bank can't be fully used */ + if (sub_pool->base_index + sub_pool->size > pool->pool_size) + sub_pool->size = pool->pool_size - sub_pool->base_index; + } + + mlxsw_sp->counter_pool = pool; + return 0; + +err_usage_alloc: + kfree(pool); + return err; +} + +void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + + WARN_ON(find_first_bit(pool->usage, pool->pool_size) != + pool->pool_size); + kfree(pool->usage); + kfree(pool); +} + +int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int *p_counter_index) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + struct mlxsw_sp_counter_sub_pool *sub_pool; + unsigned int entry_index; + unsigned int stop_index; + int i; + + sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; + stop_index = sub_pool->base_index + sub_pool->size; + entry_index = sub_pool->base_index; + + entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index); + if (entry_index == stop_index) + return -ENOBUFS; + /* The sub-pools can contain non-integer number of entries + * so we must check for overflow + */ + if (entry_index + sub_pool->entry_size > stop_index) + return -ENOBUFS; + for (i = 0; i < sub_pool->entry_size; i++) + __set_bit(entry_index + i, pool->usage); + + *p_counter_index = entry_index; + return 0; +} + +void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int counter_index) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + struct mlxsw_sp_counter_sub_pool *sub_pool; + int i; + + if (WARN_ON(counter_index >= pool->pool_size)) + return; + sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; + for (i = 0; i < sub_pool->entry_size; i++) + __clear_bit(counter_index + i, pool->usage); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h new file mode 100644 index 0000000000000000000000000000000000000000..fd34d0a0107352c1ca6a767257c7d0e89279b547 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -0,0 +1,54 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_CNT_H +#define _MLXSW_SPECTRUM_CNT_H + +#include "spectrum.h" + +enum mlxsw_sp_counter_sub_pool_id { + MLXSW_SP_COUNTER_SUB_POOL_FLOW, + MLXSW_SP_COUNTER_SUB_POOL_RIF, +}; + +int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int *p_counter_index); +void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int counter_index); +int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c new file mode 100644 index 0000000000000000000000000000000000000000..ea56f6ade6b429585f35a2f72dae20f8d51d4a0f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -0,0 +1,351 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" +#include "spectrum_dpipe.h" +#include "spectrum_router.h" + +enum mlxsw_sp_field_metadata_id { + MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, + MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, + MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, +}; + +static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { + { .name = "erif_port", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, + .bitwidth = 32, + .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX, + }, + { .name = "l3_forward", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, + .bitwidth = 1, + }, + { .name = "l3_drop", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, + .bitwidth = 1, + }, +}; + +enum mlxsw_sp_dpipe_header_id { + MLXSW_SP_DPIPE_HEADER_METADATA, +}; + +static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = { + .name = "mlxsw_meta", + .id = MLXSW_SP_DPIPE_HEADER_METADATA, + .fields = mlxsw_sp_dpipe_fields_metadata, + .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata), +}; + +static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = { + &mlxsw_sp_dpipe_header_metadata, +}; + +static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = { + .headers = mlxsw_dpipe_headers, + .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers), +}; + +static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_action action = {0}; + int err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &mlxsw_sp_dpipe_header_metadata; + action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD; + + err = devlink_dpipe_action_put(skb, &action); + if (err) + return err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &mlxsw_sp_dpipe_header_metadata; + action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP; + + return devlink_dpipe_action_put(skb, &action); +} + +static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_match match = {0}; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; + + return devlink_dpipe_match_put(skb, &match); +} + +static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry) +{ + unsigned int value_count, value_index; + struct devlink_dpipe_value *value; + + value = entry->action_values; + value_count = entry->action_values_count; + for (value_index = 0; value_index < value_count; value_index++) { + kfree(value[value_index].value); + kfree(value[value_index].mask); + } + + value = entry->match_values; + value_count = entry->match_values_count; + for (value_index = 0; value_index < value_count; value_index++) { + kfree(value[value_index].value); + kfree(value[value_index].mask); + } +} + +static void +mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match, + struct devlink_dpipe_action *action) +{ + action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action->header = &mlxsw_sp_dpipe_header_metadata; + action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD; + + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; +} + +static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry, + struct devlink_dpipe_value *match_value, + struct devlink_dpipe_match *match, + struct devlink_dpipe_value *action_value, + struct devlink_dpipe_action *action) +{ + entry->match_values = match_value; + entry->match_values_count = 1; + + entry->action_values = action_value; + entry->action_values_count = 1; + + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + action_value->action = action; + action_value->value_size = sizeof(u32); + action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); + if (!action_value->value) + goto err_action_alloc; + return 0; + +err_action_alloc: + kfree(match_value->value); + return -ENOMEM; +} + +static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp, + struct devlink_dpipe_entry *entry, + struct mlxsw_sp_rif *rif, + bool counters_enabled) +{ + u32 *action_value; + u32 *rif_value; + u64 cnt; + int err; + + /* Set Match RIF index */ + rif_value = entry->match_values->value; + *rif_value = mlxsw_sp_rif_index(rif); + entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif); + entry->match_values->mapping_valid = true; + + /* Set Action Forwarding */ + action_value = entry->action_values->value; + *action_value = 1; + + entry->counter_valid = false; + entry->counter = 0; + if (!counters_enabled) + return 0; + + entry->index = mlxsw_sp_rif_index(rif); + err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, + MLXSW_SP_RIF_COUNTER_EGRESS, + &cnt); + if (!err) { + entry->counter = cnt; + entry->counter_valid = true; + } + return 0; +} + +static int +mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}}; + struct devlink_dpipe_action action = {0}; + struct devlink_dpipe_match match = {0}; + struct devlink_dpipe_entry entry = {0}; + struct mlxsw_sp *mlxsw_sp = priv; + unsigned int rif_count; + int i, j; + int err; + + mlxsw_sp_erif_match_action_prepare(&match, &action); + err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match, + &action_value, &action); + if (err) + return err; + + rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + rtnl_lock(); + i = 0; +start_again: + err = devlink_dpipe_entry_ctx_prepare(dump_ctx); + if (err) + return err; + j = 0; + for (; i < rif_count; i++) { + if (!mlxsw_sp->rifs[i]) + continue; + err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, + mlxsw_sp->rifs[i], + counters_enabled); + if (err) + goto err_entry_get; + err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry); + if (err) { + if (err == -EMSGSIZE) { + if (!j) + goto err_entry_append; + break; + } + goto err_entry_append; + } + j++; + } + + devlink_dpipe_entry_ctx_close(dump_ctx); + if (i != rif_count) + goto start_again; + rtnl_unlock(); + + mlxsw_sp_erif_entry_clear(&entry); + return 0; +err_entry_append: +err_entry_get: + rtnl_unlock(); + mlxsw_sp_erif_entry_clear(&entry); + return err; +} + +static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = priv; + int i; + + rtnl_lock(); + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + if (!mlxsw_sp->rifs[i]) + continue; + if (enable) + mlxsw_sp_rif_counter_alloc(mlxsw_sp, + mlxsw_sp->rifs[i], + MLXSW_SP_RIF_COUNTER_EGRESS); + else + mlxsw_sp_rif_counter_free(mlxsw_sp, + mlxsw_sp->rifs[i], + MLXSW_SP_RIF_COUNTER_EGRESS); + } + rtnl_unlock(); + return 0; +} + +static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { + .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump, + .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump, + .entries_dump = mlxsw_sp_table_erif_entries_dump, + .counters_set_update = mlxsw_sp_table_erif_counters_update, +}; + +static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + u64 table_size; + + table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + return devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ERIF, + &mlxsw_sp_erif_ops, + mlxsw_sp, table_size, + false); +} + +static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF); +} + +int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + int err; + + err = devlink_dpipe_headers_register(devlink, + &mlxsw_sp_dpipe_headers); + if (err) + return err; + err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp); + if (err) + goto err_erif_register; + return 0; + +err_erif_register: + devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core)); + return err; +} + +void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp); + devlink_dpipe_headers_unregister(devlink); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h new file mode 100644 index 0000000000000000000000000000000000000000..d2089298cba3b91efeddac46579a324ffbfcb8e0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -0,0 +1,43 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_PIPELINE_H_ +#define _MLXSW_PIPELINE_H_ + +int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp); + +#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif" + +#endif /* _MLXSW_PIPELINE_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ae6cccc666e4619bbb1a8360b5cc090d303da60b..7d87e23578a3a305f334906456dd4fda55b0182e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "spectrum.h" #include "core_acl_flex_keys.h" @@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (tc_no_actions(exts)) return 0; + /* Count action is inserted first */ + err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei); + if (err) + return err; + tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { @@ -65,6 +71,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; + err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, + MLXSW_SP_DUMMY_FID); + if (err) + return err; + out_dev = __dev_get_by_index(dev_net(dev), ifindex); if (out_dev == dev) out_dev = NULL; @@ -73,6 +84,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, out_dev); if (err) return err; + } else if (is_tcf_vlan(a)) { + u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); + u32 action = tcf_vlan_action(a); + u8 prio = tcf_vlan_push_prio(a); + u16 vid = tcf_vlan_push_vid(a); + + return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + action, vid, + proto, prio); } else { dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; @@ -173,7 +193,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); return -EOPNOTSUPP; } @@ -234,6 +255,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, sizeof(key->src)); } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + if (mask->vlan_id != 0) + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_VID, + key->vlan_id, + mask->vlan_id); + if (mask->vlan_priority != 0) + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_PCP, + key->vlan_priority, + mask->vlan_priority); + } + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) mlxsw_sp_flower_parse_ipv4(rulei, f); @@ -314,3 +356,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); } + +int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + struct tc_action *a; + LIST_HEAD(actions); + u64 packets; + u64 lastuse; + u64 bytes; + int err; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, + ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (WARN_ON(IS_ERR(ruleset))) + return -EINVAL; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); + if (!rule) + return -EINVAL; + + err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, + &lastuse); + if (err) + goto err_rule_get_stats; + + preempt_disable(); + + tcf_exts_to_list(f->exts, &actions); + list_for_each_entry(a, &actions, list) + tcf_action_stats_update(a, bytes, packets, lastuse); + + preempt_enable(); + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return 0; + +err_rule_get_stats: + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index ac321e8e5c1ac4cb85ce90b53b7907cddd3e859b..26c26cd30c3d4038948fcd79028a3b853d3651c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -45,7 +45,8 @@ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) #define MLXSW_SP_CHUNK_MAX 32 -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, + u32 *p_entry_index) { int entry_index; int size; @@ -72,7 +73,8 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) for (i = 0; i < type_entries; i++) set_bit(entry_index + i, mlxsw_sp->kvdl.usage); - return entry_index; + *p_entry_index = entry_index; + return 0; } return -ENOBUFS; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index bd8de6b9be718f967ca6967a06c00be21d2e3b6c..33cec1cc164259ad9d7dd022a811e957447a61ff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -41,14 +41,184 @@ #include #include #include +#include #include #include #include #include +#include +#include #include "spectrum.h" #include "core.h" #include "reg.h" +#include "spectrum_cnt.h" +#include "spectrum_dpipe.h" +#include "spectrum_router.h" + +struct mlxsw_sp_rif { + struct list_head nexthop_list; + struct list_head neigh_list; + struct net_device *dev; + struct mlxsw_sp_fid *f; + unsigned char addr[ETH_ALEN]; + int mtu; + u16 rif_index; + u16 vr_id; + unsigned int counter_ingress; + bool counter_ingress_valid; + unsigned int counter_egress; + bool counter_egress_valid; +}; + +static unsigned int * +mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + return &rif->counter_egress; + case MLXSW_SP_RIF_COUNTER_INGRESS: + return &rif->counter_ingress; + } + return NULL; +} + +static bool +mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + return rif->counter_egress_valid; + case MLXSW_SP_RIF_COUNTER_INGRESS: + return rif->counter_ingress_valid; + } + return false; +} + +static void +mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, + bool valid) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + rif->counter_egress_valid = valid; + break; + case MLXSW_SP_RIF_COUNTER_INGRESS: + rif->counter_ingress_valid = valid; + break; + } +} + +static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + unsigned int counter_index, bool enable, + enum mlxsw_sp_rif_counter_dir dir) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + bool is_egress = false; + int err; + + if (dir == MLXSW_SP_RIF_COUNTER_EGRESS) + is_egress = true; + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable, + is_egress); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, u64 *cnt) +{ + char ricnt_pl[MLXSW_REG_RICNT_LEN]; + unsigned int *p_counter_index; + bool valid; + int err; + + valid = mlxsw_sp_rif_counter_valid_get(rif, dir); + if (!valid) + return -EINVAL; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (!p_counter_index) + return -EINVAL; + mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index, + MLXSW_REG_RICNT_OPCODE_NOP); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); + if (err) + return err; + *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl); + return 0; +} + +static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + char ricnt_pl[MLXSW_REG_RICNT_LEN]; + + mlxsw_reg_ricnt_pack(ricnt_pl, counter_index, + MLXSW_REG_RICNT_OPCODE_CLEAR); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); +} + +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + unsigned int *p_counter_index; + int err; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (!p_counter_index) + return -EINVAL; + err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + p_counter_index); + if (err) + return err; + + err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index); + if (err) + goto err_counter_clear; + + err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, + *p_counter_index, true, dir); + if (err) + goto err_counter_edit; + mlxsw_sp_rif_counter_valid_set(rif, dir, true); + return 0; + +err_counter_edit: +err_counter_clear: + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + *p_counter_index); + return err; +} + +void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + unsigned int *p_counter_index; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (WARN_ON(!p_counter_index)) + return; + mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, + *p_counter_index, false, dir); + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + *p_counter_index); + mlxsw_sp_rif_counter_valid_set(rif, dir, false); +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) @@ -88,12 +258,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); } -static void -mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage) -{ - memset(prefix_usage, 0, sizeof(*prefix_usage)); -} - static void mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, unsigned char prefix_len) @@ -125,7 +289,7 @@ struct mlxsw_sp_fib_node { struct list_head entry_list; struct list_head list; struct rhash_head ht_node; - struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fib *fib; struct mlxsw_sp_fib_key key; }; @@ -149,13 +313,17 @@ struct mlxsw_sp_fib_entry { struct mlxsw_sp_fib { struct rhashtable ht; struct list_head node_list; + struct mlxsw_sp_vr *vr; + struct mlxsw_sp_lpm_tree *lpm_tree; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; + enum mlxsw_sp_l3proto proto; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; -static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) +static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_fib *fib; int err; @@ -167,6 +335,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) if (err) goto err_rhashtable_init; INIT_LIST_HEAD(&fib->node_list); + fib->proto = proto; + fib->vr = vr; return fib; err_rhashtable_init: @@ -177,24 +347,21 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) { WARN_ON(!list_empty(&fib->node_list)); + WARN_ON(fib->lpm_tree); rhashtable_destroy(&fib->ht); kfree(fib); } static struct mlxsw_sp_lpm_tree * -mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) +mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) { static struct mlxsw_sp_lpm_tree *lpm_tree; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; - if (lpm_tree->ref_count == 0) { - if (one_reserved) - one_reserved = false; - else - return lpm_tree; - } + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; + if (lpm_tree->ref_count == 0) + return lpm_tree; } return NULL; } @@ -248,12 +415,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_lpm_tree * mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_prefix_usage *prefix_usage, - enum mlxsw_sp_l3proto proto, bool one_reserved) + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_lpm_tree *lpm_tree; int err; - lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); + lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp); if (!lpm_tree) return ERR_PTR(-EBUSY); lpm_tree->proto = proto; @@ -283,13 +450,13 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_lpm_tree * mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_prefix_usage *prefix_usage, - enum mlxsw_sp_l3proto proto, bool one_reserved) + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_lpm_tree *lpm_tree; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; if (lpm_tree->ref_count != 0 && lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, @@ -297,7 +464,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, goto inc_ref_count; } lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, - proto, one_reserved); + proto); if (IS_ERR(lpm_tree)) return lpm_tree; @@ -314,15 +481,41 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) +#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ + +static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_lpm_tree *lpm_tree; + u64 max_trees; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES)) + return -EIO; + + max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES); + mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN; + mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count, + sizeof(struct mlxsw_sp_lpm_tree), + GFP_KERNEL); + if (!mlxsw_sp->router.lpm.trees) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; } + + return 0; +} + +static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->router.lpm.trees); +} + +static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) +{ + return !!vr->fib4; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -332,31 +525,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; - if (!vr->used) + if (!mlxsw_sp_vr_is_used(vr)) return vr; } return NULL; } static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) + const struct mlxsw_sp_fib *fib) { char raltb_pl[MLXSW_REG_RALTB_LEN]; - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - (enum mlxsw_reg_ralxx_protocol) vr->proto, - vr->lpm_tree->id); + mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, + fib->lpm_tree->id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) + const struct mlxsw_sp_fib *fib) { char raltb_pl[MLXSW_REG_RALTB_LEN]; /* Bind to tree 0 which is default */ - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - (enum mlxsw_reg_ralxx_protocol) vr->proto, 0); + mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } @@ -369,8 +562,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id) } static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, - u32 tb_id, - enum mlxsw_sp_l3proto proto) + u32 tb_id) { struct mlxsw_sp_vr *vr; int i; @@ -379,69 +571,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; - if (vr->used && vr->proto == proto && vr->tb_id == tb_id) + if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id) return vr; } return NULL; } +static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return vr->fib4; + case MLXSW_SP_L3_PROTO_IPV6: + BUG_ON(1); + } + return NULL; +} + static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, - unsigned char prefix_len, - u32 tb_id, - enum mlxsw_sp_l3proto proto) + u32 tb_id) { - struct mlxsw_sp_prefix_usage req_prefix_usage; - struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_vr *vr; - int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) return ERR_PTR(-EBUSY); - vr->fib = mlxsw_sp_fib_create(); - if (IS_ERR(vr->fib)) - return ERR_CAST(vr->fib); - - vr->proto = proto; + vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr->fib4)) + return ERR_CAST(vr->fib4); vr->tb_id = tb_id; - mlxsw_sp_prefix_usage_zero(&req_prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, - proto, true); - if (IS_ERR(lpm_tree)) { - err = PTR_ERR(lpm_tree); - goto err_tree_get; - } - vr->lpm_tree = lpm_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); - if (err) - goto err_tree_bind; - - vr->used = true; return vr; - -err_tree_bind: - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); -err_tree_get: - mlxsw_sp_fib_destroy(vr->fib); - - return ERR_PTR(err); } -static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); - mlxsw_sp_fib_destroy(vr->fib); - vr->used = false; + mlxsw_sp_fib_destroy(vr->fib4); + vr->fib4 = NULL; } static int -mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, +mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, struct mlxsw_sp_prefix_usage *req_prefix_usage) { - struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; + struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree; struct mlxsw_sp_lpm_tree *new_tree; int err; @@ -449,7 +622,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, return 0; new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, - vr->proto, false); + fib->proto); if (IS_ERR(new_tree)) { /* We failed to get a tree according to the required * prefix usage. However, the current tree might be still good @@ -463,8 +636,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, } /* Prevent packet loss by overwriting existing binding */ - vr->lpm_tree = new_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + fib->lpm_tree = new_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); if (err) goto err_tree_bind; mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); @@ -472,53 +645,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, return 0; err_tree_bind: - vr->lpm_tree = lpm_tree; + fib->lpm_tree = lpm_tree; mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); return err; } -static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, - unsigned char prefix_len, - u32 tb_id, - enum mlxsw_sp_l3proto proto) +static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; - int err; tb_id = mlxsw_sp_fix_tb_id(tb_id); - vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); - if (!vr) { - vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); - if (IS_ERR(vr)) - return vr; - } else { - struct mlxsw_sp_prefix_usage req_prefix_usage; - - mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, - &vr->fib->prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); - /* Need to replace LPM tree in case new prefix is required. */ - err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, - &req_prefix_usage); - if (err) - return ERR_PTR(err); - } + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); + if (!vr) + vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id); return vr; } -static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { - /* Destroy virtual router entity in case the associated FIB is empty - * and allow it to be used for other tables in future. Otherwise, - * check if some prefix usage did not disappear and change tree if - * that is the case. Note that in case new, smaller tree cannot be - * allocated, the original one will be kept being used. - */ - if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage)) - mlxsw_sp_vr_destroy(mlxsw_sp, vr); - else - mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, - &vr->fib->prefix_usage); + if (!vr->rif_count && list_empty(&vr->fib4->node_list)) + mlxsw_sp_vr_destroy(vr); } static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) @@ -627,14 +773,14 @@ static struct mlxsw_sp_neigh_entry * mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; int err; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); + if (!rif) return ERR_PTR(-EINVAL); - neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif); + neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index); if (!neigh_entry) return ERR_PTR(-ENOMEM); @@ -642,7 +788,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) if (err) goto err_neigh_entry_insert; - list_add(&neigh_entry->rif_list_node, &r->neigh_list); + list_add(&neigh_entry->rif_list_node, &rif->neigh_list); return neigh_entry; @@ -1050,22 +1196,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) } static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif *r) + const struct mlxsw_sp_rif *rif) { char rauht_pl[MLXSW_REG_RAUHT_LEN]; mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, - r->rif, r->addr); + rif->rif_index, rif->addr); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; - mlxsw_sp_neigh_rif_flush(mlxsw_sp, r); - list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list, + mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); + list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, rif_list_node) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); } @@ -1082,7 +1228,7 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. */ @@ -1109,7 +1255,7 @@ struct mlxsw_sp_nexthop_group { u16 ecmp_size; u16 count; struct mlxsw_sp_nexthop nexthops[0]; -#define nh_rif nexthops[0].r +#define nh_rif nexthops[0].rif }; static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { @@ -1171,7 +1317,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr, + const struct mlxsw_sp_fib *fib, u32 adj_index, u16 ecmp_size, u32 new_adj_index, u16 new_ecmp_size) @@ -1179,8 +1325,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, char raleu_pl[MLXSW_REG_RALEU_LEN]; mlxsw_reg_raleu_pack(raleu_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id, - adj_index, ecmp_size, new_adj_index, + (enum mlxsw_reg_ralxx_protocol) fib->proto, + fib->vr->id, adj_index, ecmp_size, new_adj_index, new_ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); } @@ -1190,14 +1336,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, u32 old_adj_index, u16 old_ecmp_size) { struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_vr *vr = NULL; + struct mlxsw_sp_fib *fib = NULL; int err; list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { - if (vr == fib_entry->fib_node->vr) + if (fib == fib_entry->fib_node->fib) continue; - vr = fib_entry->fib_node->vr; - err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, + fib = fib_entry->fib_node->fib; + err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib, old_adj_index, old_ecmp_size, nh_grp->adj_index, @@ -1280,7 +1426,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, bool old_adj_index_valid; u32 old_adj_index; u16 old_ecmp_size; - int ret; int i; int err; @@ -1318,15 +1463,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, */ goto set_trap; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size); - if (ret < 0) { + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. */ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); goto set_trap; } - adj_index = ret; old_adj_index_valid = nh_grp->adj_index_valid; old_adj_index = nh_grp->adj_index; old_ecmp_size = nh_grp->ecmp_size; @@ -1399,22 +1543,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { - if (nh->r) + if (nh->rif) return; - nh->r = r; - list_add(&nh->rif_list_node, &r->nexthop_list); + nh->rif = rif; + list_add(&nh->rif_list_node, &rif->nexthop_list); } static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) { - if (!nh->r) + if (!nh->rif) return; list_del(&nh->rif_list_node); - nh->r = NULL; + nh->rif = NULL; } static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, @@ -1505,7 +1649,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, { struct net_device *dev = fib_nh->nh_dev; struct in_device *in_dev; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; int err; nh->nh_grp = nh_grp; @@ -1514,15 +1658,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + if (!dev) + return 0; + in_dev = __in_dev_get_rtnl(dev); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && fib_nh->nh_flags & RTNH_F_LINKDOWN) return 0; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) return 0; - mlxsw_sp_nexthop_rif_init(nh, r); + mlxsw_sp_nexthop_rif_init(nh, rif); err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); if (err) @@ -1548,7 +1695,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_key key; struct mlxsw_sp_nexthop *nh; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; if (mlxsw_sp->router.aborted) return; @@ -1558,13 +1705,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, if (WARN_ON_ONCE(!nh)) return; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); + if (!rif) return; switch (event) { case FIB_EVENT_NH_ADD: - mlxsw_sp_nexthop_rif_init(nh, r); + mlxsw_sp_nexthop_rif_init(nh, rif); mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); break; case FIB_EVENT_NH_DEL: @@ -1577,11 +1724,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { struct mlxsw_sp_nexthop *nh, *tmp; - list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) { + list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); @@ -1699,7 +1846,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { fib_entry->offloaded = true; - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_inc(fib_entry->nh_group->key.fi); break; @@ -1711,7 +1858,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) static void mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_dec(fib_entry->nh_group->key.fi); break; @@ -1751,8 +1898,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -1772,8 +1919,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, } mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); @@ -1784,27 +1931,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; + struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; u16 trap_id = 0; - u16 rif = 0; + u16 rif_index = 0; if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - rif = r->rif; + rif_index = rif->rif_index; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); - mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, + rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1812,13 +1960,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -1845,7 +1993,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, { int err = -EINVAL; - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); break; @@ -1877,17 +2025,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, { struct fib_info *fi = fen_info->fi; - if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { + switch (fen_info->type) { + case RTN_BROADCAST: /* fall through */ + case RTN_LOCAL: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; - } - if (fen_info->type != RTN_UNICAST) - return -EINVAL; - if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) + case RTN_UNREACHABLE: /* fall through */ + case RTN_BLACKHOLE: /* fall through */ + case RTN_PROHIBIT: + /* Packets hitting these routes need to be trapped, but + * can do so with a lower priority than packets directed + * at the host, so use action type local instead of trap. + */ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; - else - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; - return 0; + return 0; + case RTN_UNICAST: + if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + return 0; + default: + return -EINVAL; + } } static struct mlxsw_sp_fib_entry * @@ -1996,7 +2156,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, +mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr, size_t addr_len, unsigned char prefix_len) { struct mlxsw_sp_fib_node *fib_node; @@ -2006,18 +2166,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, return NULL; INIT_LIST_HEAD(&fib_node->entry_list); - list_add(&fib_node->list, &vr->fib->node_list); + list_add(&fib_node->list, &fib->node_list); memcpy(fib_node->key.addr, addr, addr_len); fib_node->key.prefix_len = prefix_len; - mlxsw_sp_fib_node_insert(vr->fib, fib_node); - fib_node->vr = vr; return fib_node; } static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) { - mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node); list_del(&fib_node->list); WARN_ON(!list_empty(&fib_node->entry_list)); kfree(fib_node); @@ -2034,7 +2191,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; - struct mlxsw_sp_fib *fib = fib_node->vr->fib; + struct mlxsw_sp_fib *fib = fib_node->fib; if (fib->prefix_ref_count[prefix_len]++ == 0) mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); @@ -2043,32 +2200,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; - struct mlxsw_sp_fib *fib = fib_node->vr->fib; + struct mlxsw_sp_fib *fib = fib_node->fib; if (--fib->prefix_ref_count[prefix_len] == 0) mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); } +static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage; + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + err = mlxsw_sp_fib_node_insert(fib, fib_node); + if (err) + return err; + fib_node->fib = fib; + + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); + + if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { + err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, + &req_prefix_usage); + if (err) + goto err_tree_check; + } else { + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + return PTR_ERR(lpm_tree); + fib->lpm_tree = lpm_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); + if (err) + goto err_tree_bind; + } + + mlxsw_sp_fib_node_prefix_inc(fib_node); + + return 0; + +err_tree_bind: + fib->lpm_tree = NULL; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); +err_tree_check: + fib_node->fib = NULL; + mlxsw_sp_fib_node_remove(fib, fib_node); + return err; +} + +static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; + struct mlxsw_sp_fib *fib = fib_node->fib; + + mlxsw_sp_fib_node_prefix_dec(fib_node); + + if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); + fib->lpm_tree = NULL; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + } else { + mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage); + } + + fib_node->fib = NULL; + mlxsw_sp_fib_node_remove(fib, fib_node); +} + static struct mlxsw_sp_fib_node * mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, - MLXSW_SP_L3_PROTO_IPV4); + vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id); if (IS_ERR(vr)) return ERR_CAST(vr); + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); - fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, + fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, sizeof(fen_info->dst), fen_info->dst_len); if (fib_node) return fib_node; - fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, + fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst, sizeof(fen_info->dst), fen_info->dst_len); if (!fib_node) { @@ -2076,22 +2299,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, goto err_fib_node_create; } + err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib); + if (err) + goto err_fib_node_init; + return fib_node; +err_fib_node_init: + mlxsw_sp_fib_node_destroy(fib_node); err_fib_node_create: - mlxsw_sp_vr_put(mlxsw_sp, vr); + mlxsw_sp_vr_put(vr); return ERR_PTR(err); } static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_vr *vr = fib_node->vr; + struct mlxsw_sp_vr *vr = fib_node->fib->vr; if (!list_empty(&fib_node->entry_list)) return; + mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node); mlxsw_sp_fib_node_destroy(fib_node); - mlxsw_sp_vr_put(mlxsw_sp, vr); + mlxsw_sp_vr_put(vr); } static struct mlxsw_sp_fib_entry * @@ -2236,8 +2466,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, if (err) goto err_fib4_node_entry_add; - mlxsw_sp_fib_node_prefix_inc(fib_node); - return 0; err_fib4_node_entry_add: @@ -2251,7 +2479,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - mlxsw_sp_fib_node_prefix_dec(fib_node); mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); mlxsw_sp_fib4_node_list_remove(fib_entry); } @@ -2340,9 +2567,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) { char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN]; - char raltb_pl[MLXSW_REG_RALTB_LEN]; - char ralue_pl[MLXSW_REG_RALUE_LEN]; - int err; + int i, err; mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, MLXSW_SP_LPM_TREE_MIN); @@ -2355,16 +2580,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (err) return err; - mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); - if (err) - return err; + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i]; + char raltb_pl[MLXSW_REG_RALTB_LEN]; + char ralue_pl[MLXSW_REG_RALUE_LEN]; - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, - MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0); - mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + if (!mlxsw_sp_vr_is_used(vr)) + continue; + + mlxsw_reg_raltb_pack(raltb_pl, vr->id, + MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_SP_LPM_TREE_MIN); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), + raltb_pl); + if (err) + return err; + + mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, + MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0, + 0); + mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), + ralue_pl); + if (err) + return err; + } + + return 0; } static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, @@ -2390,7 +2632,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - switch (fib_node->vr->proto) { + switch (fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); break; @@ -2400,26 +2642,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, } } -static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) { + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); struct mlxsw_sp_fib_node *fib_node, *tmp; - struct mlxsw_sp_vr *vr; + + list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) { + bool do_break = &tmp->list == &fib->node_list; + + mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); + if (do_break) + break; + } +} + +static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) +{ int i; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { - vr = &mlxsw_sp->router.vrs[i]; + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i]; - if (!vr->used) + if (!mlxsw_sp_vr_is_used(vr)) continue; - - list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list, - list) { - bool do_break = &tmp->list == &vr->fib->node_list; - - mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); - if (do_break) - break; - } + mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); } } @@ -2437,86 +2685,24 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); } -static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) -{ - char ritr_pl[MLXSW_REG_RITR_LEN]; - int err; - - mlxsw_reg_ritr_rif_pack(ritr_pl, rif); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (WARN_ON_ONCE(err)) - return err; - - mlxsw_reg_ritr_enable_set(ritr_pl, false); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) -{ - mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif); - mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r); - mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r); -} +struct mlxsw_sp_fib_event_work { + struct work_struct work; + union { + struct fib_entry_notifier_info fen_info; + struct fib_rule_notifier_info fr_info; + struct fib_nh_notifier_info fnh_info; + }; + struct mlxsw_sp *mlxsw_sp; + unsigned long event; +}; -static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_router_fib_event_work(struct work_struct *work) { - char rgcr_pl[MLXSW_REG_RGCR_LEN]; - u64 max_rifs; - int err; - - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) - return -EIO; - - max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); - mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), - GFP_KERNEL); - if (!mlxsw_sp->rifs) - return -ENOMEM; - - mlxsw_reg_rgcr_pack(rgcr_pl, true); - mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); - if (err) - goto err_rgcr_fail; - - return 0; - -err_rgcr_fail: - kfree(mlxsw_sp->rifs); - return err; -} - -static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) -{ - char rgcr_pl[MLXSW_REG_RGCR_LEN]; - int i; - - mlxsw_reg_rgcr_pack(rgcr_pl, false); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - WARN_ON_ONCE(mlxsw_sp->rifs[i]); - - kfree(mlxsw_sp->rifs); -} - -struct mlxsw_sp_fib_event_work { - struct work_struct work; - union { - struct fib_entry_notifier_info fen_info; - struct fib_nh_notifier_info fnh_info; - }; - struct mlxsw_sp *mlxsw_sp; - unsigned long event; -}; - -static void mlxsw_sp_router_fib_event_work(struct work_struct *work) -{ - struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, work); - struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - bool replace, append; + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; + bool replace, append; int err; /* Protect internal structures from changes */ @@ -2539,7 +2725,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - mlxsw_sp_router_fib4_abort(mlxsw_sp); + rule = fib_work->fr_info.rule; + if (!fib4_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib4_abort(mlxsw_sp); + fib_rule_put(rule); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: @@ -2582,6 +2771,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); @@ -2594,6 +2788,707 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } +static struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) + return mlxsw_sp->rifs[i]; + + return NULL; +} + +static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (WARN_ON_ONCE(err)) + return err; + + mlxsw_reg_ritr_enable_set(ritr_pl, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index); + mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif); + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); +} + +static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, + const struct in_device *in_dev, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + if (!rif) + return true; + return false; + case NETDEV_DOWN: + if (rif && !in_dev->ifa_list && + !netif_is_l3_slave(rif->dev)) + return true; + /* It is possible we already removed the RIF ourselves + * if it was assigned to a netdev that is now a bridge + * or LAG slave. + */ + return false; + } + + return false; +} + +#define MLXSW_SP_INVALID_INDEX_RIF 0xffff +static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + if (!mlxsw_sp->rifs[i]) + return i; + + return MLXSW_SP_INVALID_INDEX_RIF; +} + +static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, + bool *p_lagged, u16 *p_system_port) +{ + u8 local_port = mlxsw_sp_vport->local_port; + + *p_lagged = mlxsw_sp_vport->lagged; + *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; +} + +static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, + u16 vr_id, struct net_device *l3_dev, + u16 rif_index, bool create) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + bool lagged = mlxsw_sp_vport->lagged; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u16 system_port; + + mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index, + vr_id, l3_dev->mtu, l3_dev->dev_addr); + + mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); + mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, + mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index) +{ + return MLXSW_SP_RFID_BASE + rif_index; +} + +static struct mlxsw_sp_fid * +mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->leave = mlxsw_sp_vport_rif_sp_leave; + f->ref_count = 0; + f->dev = l3_dev; + f->fid = fid; + + return f; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *rif; + + rif = kzalloc(sizeof(*rif), GFP_KERNEL); + if (!rif) + return NULL; + + INIT_LIST_HEAD(&rif->nexthop_list); + INIT_LIST_HEAD(&rif->neigh_list); + ether_addr_copy(rif->addr, l3_dev->dev_addr); + rif->mtu = l3_dev->mtu; + rif->vr_id = vr_id; + rif->dev = l3_dev; + rif->rif_index = rif_index; + rif->f = f; + + return rif; +} + +u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif) +{ + return rif->rif_index; +} + +int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) +{ + return rif->dev->ifindex; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u32 tb_id = l3mdev_fib_table(l3_dev); + struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fid *f; + struct mlxsw_sp_rif *rif; + u16 fid, rif_index; + int err; + + rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif_index == MLXSW_SP_INVALID_INDEX_RIF) + return ERR_PTR(-ERANGE); + + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, + rif_index, true); + if (err) + goto err_vport_rif_sp_op; + + fid = mlxsw_sp_rif_sp_to_fid(rif_index); + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); + if (err) + goto err_rif_fdb_op; + + f = mlxsw_sp_rfid_alloc(fid, l3_dev); + if (!f) { + err = -ENOMEM; + goto err_rfid_alloc; + } + + rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f); + if (!rif) { + err = -ENOMEM; + goto err_rif_alloc; + } + + if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core), + MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) { + err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, + MLXSW_SP_RIF_COUNTER_EGRESS); + if (err) + netdev_dbg(mlxsw_sp_vport->dev, + "Counter alloc Failed err=%d\n", err); + } + + f->rif = rif; + mlxsw_sp->rifs[rif_index] = rif; + vr->rif_count++; + + return rif; + +err_rif_alloc: + kfree(f); +err_rfid_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); +err_rif_fdb_op: + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index, + false); +err_vport_rif_sp_op: + mlxsw_sp_vr_put(vr); + return ERR_PTR(err); +} + +static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id]; + struct net_device *l3_dev = rif->dev; + struct mlxsw_sp_fid *f = rif->f; + u16 rif_index = rif->rif_index; + u16 fid = f->fid; + + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); + + mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); + mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS); + + vr->rif_count--; + mlxsw_sp->rifs[rif_index] = NULL; + f->rif = NULL; + + kfree(rif); + + kfree(f); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index, + false); + mlxsw_sp_vr_put(vr); +} + +static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!rif) { + rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); + if (IS_ERR(rif)) + return PTR_ERR(rif); + } + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f); + rif->f->ref_count++; + + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid); + + return 0; +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif); +} + +static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, + struct net_device *port_dev, + unsigned long event, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); + case NETDEV_DOWN: + mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, + unsigned long event) +{ + if (netif_is_bridge_port(port_dev) || + netif_is_lag_port(port_dev) || + netif_is_ovs_port(port_dev)) + return 0; + + return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); +} + +static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, + struct net_device *lag_dev, + unsigned long event, u16 vid) +{ + struct net_device *port_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, port_dev, iter) { + if (mlxsw_sp_port_dev_check(port_dev)) { + err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, + event, vid); + if (err) + return err; + } + } + + return 0; +} + +static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, + unsigned long event) +{ + if (netif_is_bridge_port(lag_dev)) + return 0; + + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); +} + +static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + u16 fid; + + if (is_vlan_dev(l3_dev)) + fid = vlan_dev_vlan_id(l3_dev); + else if (mlxsw_sp->master_bridge.dev == l3_dev) + fid = 1; + else + return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); + + return mlxsw_sp_fid_find(mlxsw_sp, fid); +} + +static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_core_max_ports(mlxsw_sp->core) + 1; +} + +static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; +} + +static u16 mlxsw_sp_flood_table_index_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; +} + +static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, + bool set) +{ + u8 router_port = mlxsw_sp_router_port(mlxsw_sp); + enum mlxsw_flood_table_type table_type; + char *sftr_pl; + u16 index; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + table_type = mlxsw_sp_flood_table_type_get(fid); + index = mlxsw_sp_flood_table_index_get(fid); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, + 1, router_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return MLXSW_REG_RITR_FID_IF; + else + return MLXSW_REG_RITR_VLAN_IF; +} + +static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id, + struct net_device *l3_dev, + u16 fid, u16 rif, + bool create) +{ + enum mlxsw_reg_ritr_if_type rif_type; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + rif_type = mlxsw_sp_rif_type_get(fid); + mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu, + l3_dev->dev_addr); + mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + u32 tb_id = l3mdev_fib_table(l3_dev); + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_vr *vr; + u16 rif_index; + int err; + + rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif_index == MLXSW_SP_INVALID_INDEX_RIF) + return -ERANGE; + + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); + if (IS_ERR(vr)) + return PTR_ERR(vr); + + err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); + if (err) + goto err_port_flood_set; + + err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, + rif_index, true); + if (err) + goto err_rif_bridge_op; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); + if (err) + goto err_rif_fdb_op; + + rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f); + if (!rif) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->rif = rif; + mlxsw_sp->rifs[rif_index] = rif; + vr->rif_count++; + + netdev_dbg(l3_dev, "RIF=%d created\n", rif_index); + + return 0; + +err_rif_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); +err_rif_fdb_op: + mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index, + false); +err_rif_bridge_op: + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); +err_port_flood_set: + mlxsw_sp_vr_put(vr); + return err; +} + +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id]; + struct net_device *l3_dev = rif->dev; + struct mlxsw_sp_fid *f = rif->f; + u16 rif_index = rif->rif_index; + + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); + + vr->rif_count--; + mlxsw_sp->rifs[rif_index] = NULL; + f->rif = NULL; + + kfree(rif); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + + mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index, + false); + + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + + mlxsw_sp_vr_put(vr); + + netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index); +} + +static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, + struct net_device *br_dev, + unsigned long event) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + struct mlxsw_sp_fid *f; + + /* FID can either be an actual FID if the L3 device is the + * VLAN-aware bridge or a VLAN device on top. Otherwise, the + * L3 device is a VLAN-unaware bridge and we get a vFID. + */ + f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); + if (WARN_ON(!f)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); + case NETDEV_DOWN: + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, + unsigned long event) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_lag_master(real_dev)) + return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_bridge_master(real_dev) && + mlxsw_sp->master_bridge.dev == real_dev) + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, + event); + + return 0; +} + +static int __mlxsw_sp_inetaddr_event(struct net_device *dev, + unsigned long event) +{ + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_inetaddr_port_event(dev, event); + else if (netif_is_lag_master(dev)) + return mlxsw_sp_inetaddr_lag_event(dev, event); + else if (netif_is_bridge_master(dev)) + return mlxsw_sp_inetaddr_bridge_event(dev, dev, event); + else if (is_vlan_dev(dev)) + return mlxsw_sp_inetaddr_vlan_event(dev, event); + else + return 0; +} + +int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event)) + goto out; + + err = __mlxsw_sp_inetaddr_event(dev, event); +out: + return notifier_from_errno(err); +} + +static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const char *mac, int mtu) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) +{ + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false); + if (err) + return err; + + err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr, + dev->mtu); + if (err) + goto err_rif_edit; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true); + if (err) + goto err_rif_fdb_op; + + ether_addr_copy(rif->addr, dev->dev_addr); + rif->mtu = dev->mtu; + + netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index); + + return 0; + +err_rif_fdb_op: + mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu); +err_rif_edit: + mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true); + return err; +} + +static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + struct mlxsw_sp_rif *rif; + + /* If netdev is already associated with a RIF, then we need to + * destroy it and create a new one with the new virtual router ID. + */ + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (rif) + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); + + return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP); +} + +static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!rif) + return; + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); +} + +int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, + struct netdev_notifier_changeupper_info *info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + int err = 0; + + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_PRECHANGEUPPER: + return 0; + case NETDEV_CHANGEUPPER: + if (info->linking) + err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev); + else + mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev); + break; + } + + return err; +} + static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) { struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); @@ -2606,6 +3501,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) mlxsw_sp_router_fib_flush(mlxsw_sp); } +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + u64 max_rifs; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) + return -EIO; + + max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), + GFP_KERNEL); + if (!mlxsw_sp->rifs) + return -ENOMEM; + + mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); + if (err) + goto err_rgcr_fail; + + return 0; + +err_rgcr_fail: + kfree(mlxsw_sp->rifs); + return err; +} + +static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + int i; + + mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + WARN_ON_ONCE(mlxsw_sp->rifs[i]); + + kfree(mlxsw_sp->rifs); +} + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { int err; @@ -2625,7 +3562,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_nexthop_group_ht_init; - mlxsw_sp_lpm_init(mlxsw_sp); + err = mlxsw_sp_lpm_init(mlxsw_sp); + if (err) + goto err_lpm_init; + err = mlxsw_sp_vrs_init(mlxsw_sp); if (err) goto err_vrs_init; @@ -2647,6 +3587,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: + mlxsw_sp_lpm_fini(mlxsw_sp); +err_lpm_init: rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); err_nexthop_group_ht_init: rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); @@ -2660,6 +3602,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(&mlxsw_sp->fib_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); + mlxsw_sp_lpm_fini(mlxsw_sp); rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); __mlxsw_sp_router_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h new file mode 100644 index 0000000000000000000000000000000000000000..c3095fef669774b13dcb5bce6b7b617d568bf7c3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -0,0 +1,58 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_ROUTER_H_ +#define _MLXSW_ROUTER_H_ + +#include "spectrum.h" + +enum mlxsw_sp_rif_counter_dir { + MLXSW_SP_RIF_COUNTER_INGRESS, + MLXSW_SP_RIF_COUNTER_EGRESS, +}; + +u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); +int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); +int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, + u64 *cnt); +void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir); +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir); + +#endif /* _MLXSW_ROUTER_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 598727d578c16e924ac5b25a98a7d622e02dc06a..0d8411f1f954c5668bc3bd26f721589f2ccf205d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) list_del(&f->list); - if (f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); kfree(f); @@ -745,27 +745,6 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) return err; } -static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid_begin, u16 vid_end, bool is_member, - bool untagged) -{ - u16 vid, vid_e; - int err; - - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, - is_member, untagged); - if (err) - return err; - } - - return 0; -} - static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool learn_enable) @@ -804,8 +783,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, - true, flag_untagged); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + true, flag_untagged); if (err) { netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, vid_end); @@ -863,8 +842,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (old_pvid != mlxsw_sp_port->pvid) mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); err_port_pvid_set: - __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); err_port_vlans_set: mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); return err; @@ -1012,7 +991,7 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); if (clear_all_ports) { - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp->ports[i]) mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } @@ -1171,8 +1150,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (pvid >= vid_begin && pvid <= vid_end) mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); - __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index ec1e886d4566fb098aefc6e4d82d6f69ea62173b..3b0f72455681663514d4725b50ffebe696ada240 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1321,7 +1321,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) { int i; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); @@ -1329,17 +1329,18 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core); size_t alloc_size; u8 module, width; int i; int err; - alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS; + alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports; mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL); if (!mlxsw_sx->ports) return -ENOMEM; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + for (i = 1; i < max_ports; i++) { err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module, &width); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 02ea48b15eb57f7533a838806107732b7ea97c26..e008fdbed20fd34e13ef3dbd357f663b681cfaa8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -55,6 +55,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, + MLXSW_TRAP_ID_FID_MISS = 0x3D, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 279ee4612981b0af8d482674c6f1f5525988a6f7..20358f87de57053b6e8a0cdd5078334260e1be6b 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -211,25 +211,6 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) netdev_err(ks->netdev, "spi_sync() failed\n"); } -/** - * ks8851_rx_1msg - select whether to use one or two messages for spi read - * @ks: The device structure - * - * Return whether to generate a single message with a tx and rx buffer - * supplied to spi_sync(), or alternatively send the tx and rx buffers - * as separate messages. - * - * Depending on the hardware in use, a single message may be more efficient - * on interrupts or work done by the driver. - * - * This currently always returns true until we add some per-device data passed - * from the platform code to specify which mode is better. - */ -static inline bool ks8851_rx_1msg(struct ks8851_net *ks) -{ - return true; -} - /** * ks8851_rdreg - issue read register command and return the data * @ks: The device state @@ -251,14 +232,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, txb[0] = cpu_to_le16(op | KS_SPIOP_RD); - if (ks8851_rx_1msg(ks)) { - msg = &ks->spi_msg1; - xfer = &ks->spi_xfer1; - - xfer->tx_buf = txb; - xfer->rx_buf = trx; - xfer->len = rxl + 2; - } else { + if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) { msg = &ks->spi_msg2; xfer = ks->spi_xfer2; @@ -270,15 +244,22 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, xfer->tx_buf = NULL; xfer->rx_buf = trx; xfer->len = rxl; + } else { + msg = &ks->spi_msg1; + xfer = &ks->spi_xfer1; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = rxl + 2; } ret = spi_sync(ks->spidev, msg); if (ret < 0) netdev_err(ks->netdev, "read: spi_sync() failed\n"); - else if (ks8851_rx_1msg(ks)) - memcpy(rxb, trx + 2, rxl); - else + else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) memcpy(rxb, trx, rxl); + else + memcpy(rxb, trx + 2, rxl); } /** diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 6ad44be08b3307ac147b6f25c8d04bc0706f65a5..c0d7d5eec7e72d4d7ea3f6b61e14523fed00a1d0 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -228,8 +228,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL | RX_DESC0_RUNT | RX_DESC0_ODD_NB)) { net_dbg_ratelimited("packet error\n"); - priv->stats.rx_dropped++; - priv->stats.rx_errors++; + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; goto rx_next; } @@ -245,8 +245,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (unlikely(!skb)) { net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); - priv->stats.rx_dropped++; - priv->stats.rx_errors++; + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; goto rx_next; } @@ -256,10 +256,10 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) napi_gro_receive(&priv->napi, skb); rx++; - priv->stats.rx_packets++; - priv->stats.rx_bytes += len; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; if (desc0 & RX_DESC0_MULTICAST) - priv->stats.multicast++; + ndev->stats.multicast++; rx_next: wmb(); /* prevent setting ownership back too early */ @@ -296,8 +296,8 @@ static void moxart_tx_finished(struct net_device *ndev) dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], priv->tx_len[tx_tail], DMA_TO_DEVICE); - priv->stats.tx_packets++; - priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len; dev_kfree_skb_irq(priv->tx_skb[tx_tail]); priv->tx_skb[tx_tail] = NULL; @@ -349,7 +349,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); - priv->stats.tx_dropped++; + ndev->stats.tx_dropped++; goto out_unlock; } rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */ @@ -400,13 +400,6 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) return ret; } -static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev) -{ - struct moxart_mac_priv_t *priv = netdev_priv(ndev); - - return &priv->stats; -} - static void moxart_mac_setmulticast(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); @@ -456,7 +449,6 @@ static const struct net_device_ops moxart_netdev_ops = { .ndo_open = moxart_mac_open, .ndo_stop = moxart_mac_stop, .ndo_start_xmit = moxart_mac_start_xmit, - .ndo_get_stats = moxart_mac_get_stats, .ndo_set_rx_mode = moxart_mac_set_rx_mode, .ndo_set_mac_address = moxart_set_mac_address, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index afc32ec998c043957f0b472080d22d01600edf2a..686b8957d5cf00edf786663abff93cf31e36d831 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -293,7 +293,6 @@ struct moxart_mac_priv_t { void __iomem *base; - struct net_device_stats stats; unsigned int reg_maccr; unsigned int reg_imr; struct napi_struct napi; diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 6933afa69df2e28b8ed24ced53d7108ccc308647..4b15f0f496aa7e65385509e774428d57442197b6 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -6,8 +6,10 @@ nfp-objs := \ nfpcore/nfp_cpplib.o \ nfpcore/nfp_hwinfo.o \ nfpcore/nfp_mip.o \ + nfpcore/nfp_mutex.o \ nfpcore/nfp_nffw.o \ nfpcore/nfp_nsp.o \ + nfpcore/nfp_nsp_cmds.o \ nfpcore/nfp_nsp_eth.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_rtsym.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c index 335beb8b8b45c75193a604550257be821fd2a1b5..97a8f00674d01c46901f8c8bdb7e59207dd39422 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c @@ -798,7 +798,7 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, const struct bpf_insn *insn = &meta->insn; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, insn->src_reg * 2, br_mask, insn->off); @@ -818,7 +818,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); if (!swap) @@ -847,7 +847,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (swap) { areg ^= breg; @@ -1132,7 +1132,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN); else - return -ENOTSUPP; + return -EOPNOTSUPP; return 0; } @@ -1143,7 +1143,7 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->insn.off != offsetof(struct xdp_md, data) && meta->insn.off != offsetof(struct xdp_md, data_end)) - return -ENOTSUPP; + return -EOPNOTSUPP; emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); @@ -1174,12 +1174,12 @@ static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->insn.off == offsetof(struct sk_buff, mark)) return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2); - return -ENOTSUPP; + return -EOPNOTSUPP; } static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -1192,7 +1192,7 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { if (meta->insn.off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); return 0; @@ -1206,7 +1206,7 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (imm & ~0U) { tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); @@ -1245,7 +1245,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (!imm) { meta->skip = true; @@ -1276,7 +1276,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (!imm) { emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), @@ -1302,7 +1302,7 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), ALU_OP_XOR, reg_b(insn->src_reg * 2)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index dedac720fb292c289edc8a7db50169852945bb5c..dde35dae35c502530efe32b15731d743981970b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -48,7 +48,7 @@ #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nffw.h" -#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" @@ -253,6 +253,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) { + struct nfp_nsp_identify *nspi; struct nfp_nsp *nsp; int err; @@ -269,6 +270,12 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + nspi = __nfp_nsp_identify(nsp); + if (nspi) { + dev_info(&pdev->dev, "BSP: %s\n", nspi->version); + kfree(nspi); + } + err = nfp_fw_load(pdev, pf, nsp); if (err < 0) { kfree(pf->eth_tbl); @@ -385,8 +392,7 @@ static void nfp_pci_remove(struct pci_dev *pdev) { struct nfp_pf *pf = pci_get_drvdata(pdev); - if (!list_empty(&pf->ports)) - nfp_net_pci_remove(pf); + nfp_net_pci_remove(pf); nfp_pcie_sriov_disable(pdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 39105d0435e9861f96a7ec8337a472d974ae69ab..b57de047b0029fa3cad3364f207600d8d5bda063 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -42,7 +42,9 @@ #include #include #include +#include #include +#include struct dentry; struct pci_dev; @@ -64,8 +66,11 @@ struct nfp_eth_table; * @fw_loaded: Is the firmware loaded? * @eth_tbl: NSP ETH table * @ddir: Per-device debugfs directory - * @num_ports: Number of adapter ports + * @num_ports: Number of adapter ports app firmware supports + * @num_netdevs: Number of netdevs spawned * @ports: Linked list of port structures (struct nfp_net) + * @port_lock: Protects @ports, @num_ports, @num_netdevs + * @port_refresh_work: Work entry for taking netdevs out */ struct nfp_pf { struct pci_dev *pdev; @@ -88,7 +93,11 @@ struct nfp_pf { struct dentry *ddir; unsigned int num_ports; + unsigned int num_netdevs; + struct list_head ports; + struct work_struct port_refresh_work; + struct mutex port_lock; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index e614a376b595280148494e8a1029fb2328057ca4..fcf81b3be830de463e425338ad9ca92fde2af8e6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -50,14 +50,14 @@ #include "nfp_net_ctrl.h" -#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) -#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) -#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) -#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) -#define nn_warn_ratelimit(nn, fmt, args...) \ +#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args) +#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args) +#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args) +#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args) +#define nn_dp_warn(dp, fmt, args...) \ do { \ if (unlikely(net_ratelimit())) \ - netdev_warn((nn)->netdev, fmt, ## args); \ + netdev_warn((dp)->netdev, fmt, ## args); \ } while (0) /* Max time to wait for NFP to respond on updates (in seconds) */ @@ -112,6 +112,7 @@ /* Forward declarations */ struct nfp_cpp; +struct nfp_eth_table_port; struct nfp_net; struct nfp_net_r_vector; @@ -200,6 +201,7 @@ struct nfp_net_tx_buf { * @txds: Virtual address of TX ring in host memory * @dma: DMA address of the TX ring * @size: Size, in bytes, of the TX ring (needed to free) + * @is_xdp: Is this a XDP TX ring? */ struct nfp_net_tx_ring { struct nfp_net_r_vector *r_vec; @@ -220,6 +222,7 @@ struct nfp_net_tx_ring { dma_addr_t dma; unsigned int size; + bool is_xdp; } ____cacheline_aligned; /* RX and freelist descriptor format */ @@ -283,6 +286,12 @@ struct nfp_net_rx_desc { #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0) +struct nfp_meta_parsed { + u32 hash_type; + u32 hash; + u32 mark; +}; + struct nfp_net_rx_hash { __be32 hash_type; __be32 hash; @@ -306,17 +315,13 @@ struct nfp_net_rx_buf { * @rd_p: FL/RX ring read pointer (free running) * @idx: Ring index from Linux's perspective * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist - * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue * @qcp_fl: Pointer to base of the QCP freelist queue - * @qcp_rx: Pointer to base of the QCP RX queue * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer * (used for free list batching) * @rxbufs: Array of transmitted FL/RX buffers * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) - * @bufsz: Buffer allocation size for convenience of management routines - * (NOTE: this is in second cache line, do not use on fast path!) */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -325,20 +330,17 @@ struct nfp_net_rx_ring { u32 wr_p; u32 rd_p; - u16 idx; - u16 wr_ptr_add; + u32 idx; + u32 wr_ptr_add; int fl_qcidx; - int rx_qcidx; u8 __iomem *qcp_fl; - u8 __iomem *qcp_rx; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; dma_addr_t dma; unsigned int size; - unsigned int bufsz; } ____cacheline_aligned; /** @@ -433,19 +435,76 @@ struct nfp_stat_pair { }; /** - * struct nfp_net - NFP network device structure - * @pdev: Backpointer to PCI device - * @netdev: Backpointer to net_device structure - * @is_vf: Is the driver attached to a VF? + * struct nfp_net_dp - NFP network device datapath data structure + * @dev: Backpointer to struct device + * @netdev: Backpointer to net_device structure + * @is_vf: Is the driver attached to a VF? * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP - * @ctrl: Local copy of the control register/word. - * @fl_bufsz: Currently configured size of the freelist buffers + * @chained_metadata_format: Firemware will use new metadata format + * @rx_dma_dir: Mapping direction for RX buffers + * @rx_dma_off: Offset at which DMA packets (for XDP headroom) * @rx_offset: Offset in the RX buffers where packet data starts + * @ctrl: Local copy of the control register/word. + * @fl_bufsz: Currently configured size of the freelist buffers * @xdp_prog: Installed XDP program - * @fw_ver: Firmware version + * @tx_rings: Array of pre-allocated TX ring structures + * @rx_rings: Array of pre-allocated RX ring structures + * @ctrl_bar: Pointer to mapped control BAR + * + * @txd_cnt: Size of the TX ring in number of descriptors + * @rxd_cnt: Size of the RX ring in number of descriptors + * @num_r_vecs: Number of used ring vectors + * @num_tx_rings: Currently configured number of TX rings + * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) + * @num_rx_rings: Currently configured number of RX rings + * @mtu: Device MTU + */ +struct nfp_net_dp { + struct device *dev; + struct net_device *netdev; + + u8 is_vf:1; + u8 bpf_offload_skip_sw:1; + u8 bpf_offload_xdp:1; + u8 chained_metadata_format:1; + + u8 rx_dma_dir; + u8 rx_offset; + + u32 rx_dma_off; + + u32 ctrl; + u32 fl_bufsz; + + struct bpf_prog *xdp_prog; + + struct nfp_net_tx_ring *tx_rings; + struct nfp_net_rx_ring *rx_rings; + + u8 __iomem *ctrl_bar; + + /* Cold data follows */ + + unsigned int txd_cnt; + unsigned int rxd_cnt; + + unsigned int num_r_vecs; + + unsigned int num_tx_rings; + unsigned int num_stack_tx_rings; + unsigned int num_rx_rings; + + unsigned int mtu; +}; + +/** + * struct nfp_net - NFP network device structure + * @dp: Datapath structure + * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware + * @rss_hfunc: RSS selected hash function * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table @@ -454,17 +513,9 @@ struct nfp_stat_pair { * @rx_filter_change: Jiffies when statistics last changed * @rx_filter_stats_timer: Timer for polling filter offload statistics * @rx_filter_lock: Lock protecting timer state changes (teardown) + * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware - * @num_tx_rings: Currently configured number of TX rings - * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) - * @num_rx_rings: Currently configured number of RX rings - * @txd_cnt: Size of the TX ring in number of descriptors - * @rxd_cnt: Size of the RX ring in number of descriptors - * @tx_rings: Array of pre-allocated TX ring structures - * @rx_rings: Array of pre-allocated RX ring structures - * @max_r_vecs: Number of allocated interrupt vectors for RX/TX - * @num_r_vecs: Number of used ring vectors * @r_vecs: Pre-allocated array of ring vectors * @irq_entries: Pre-allocated array of MSI-X entries * @lsc_handler: Handler for Link State Change interrupt @@ -480,7 +531,8 @@ struct nfp_stat_pair { * @reconfig_sync_present: Some thread is performing synchronous reconfig * @reconfig_timer: Timer for async reading of reconfig results * @link_up: Is the link up? - * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading + * @link_changed: Has link state changes since last port refresh? + * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter @@ -488,36 +540,24 @@ struct nfp_stat_pair { * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts * @qcp_cfg: Pointer to QCP queue used for configuration notification - * @ctrl_bar: Pointer to mapped control BAR * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs * @ethtool_dump_flag: Ethtool dump flag * @port_list: Entry on device port list + * @pdev: Backpointer to PCI device * @cpp: CPP device handle if available + * @eth_port: Translated ETH Table port entry */ struct nfp_net { - struct pci_dev *pdev; - struct net_device *netdev; - - unsigned is_vf:1; - unsigned bpf_offload_skip_sw:1; - unsigned bpf_offload_xdp:1; - - u32 ctrl; - u32 fl_bufsz; - - u32 rx_offset; - - struct bpf_prog *xdp_prog; - - struct nfp_net_tx_ring *tx_rings; - struct nfp_net_rx_ring *rx_rings; + struct nfp_net_dp dp; struct nfp_net_fw_version fw_ver; + u32 cap; u32 max_mtu; + u8 rss_hfunc; u32 rss_cfg; u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; @@ -530,18 +570,10 @@ struct nfp_net { unsigned int max_tx_rings; unsigned int max_rx_rings; - unsigned int num_tx_rings; - unsigned int num_stack_tx_rings; - unsigned int num_rx_rings; - int stride_tx; int stride_rx; - int txd_cnt; - int rxd_cnt; - unsigned int max_r_vecs; - unsigned int num_r_vecs; struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS]; struct msix_entry irq_entries[NFP_NET_MAX_IRQS]; @@ -557,6 +589,7 @@ struct nfp_net { u32 me_freq_mhz; bool link_up; + bool link_changed; spinlock_t link_status_lock; spinlock_t reconfig_lock; @@ -575,7 +608,6 @@ struct nfp_net { u8 __iomem *qcp_cfg; - u8 __iomem *ctrl_bar; u8 __iomem *tx_bar; u8 __iomem *rx_bar; @@ -584,14 +616,10 @@ struct nfp_net { struct list_head port_list; + struct pci_dev *pdev; struct nfp_cpp *cpp; -}; -struct nfp_net_ring_set { - unsigned int n_rings; - unsigned int mtu; - unsigned int dcnt; - void *rings; + struct nfp_eth_table_port *eth_port; }; /* Functions to read/write from/to a BAR @@ -599,42 +627,42 @@ struct nfp_net_ring_set { */ static inline u16 nn_readb(struct nfp_net *nn, int off) { - return readb(nn->ctrl_bar + off); + return readb(nn->dp.ctrl_bar + off); } static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) { - writeb(val, nn->ctrl_bar + off); + writeb(val, nn->dp.ctrl_bar + off); } static inline u16 nn_readw(struct nfp_net *nn, int off) { - return readw(nn->ctrl_bar + off); + return readw(nn->dp.ctrl_bar + off); } static inline void nn_writew(struct nfp_net *nn, int off, u16 val) { - writew(val, nn->ctrl_bar + off); + writew(val, nn->dp.ctrl_bar + off); } static inline u32 nn_readl(struct nfp_net *nn, int off) { - return readl(nn->ctrl_bar + off); + return readl(nn->dp.ctrl_bar + off); } static inline void nn_writel(struct nfp_net *nn, int off, u32 val) { - writel(val, nn->ctrl_bar + off); + writel(val, nn->dp.ctrl_bar + off); } static inline u64 nn_readq(struct nfp_net *nn, int off) { - return readq(nn->ctrl_bar + off); + return readq(nn->dp.ctrl_bar + off); } static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) { - writeq(val, nn->ctrl_bar + off); + writeq(val, nn->dp.ctrl_bar + off); } /* Flush posted PCI writes by reading something without side effects */ @@ -776,6 +804,7 @@ void nfp_net_netdev_clean(struct net_device *netdev); void nfp_net_set_ethtool_ops(struct net_device *netdev); void nfp_net_info(struct nfp_net *nn); int nfp_net_reconfig(struct nfp_net *nn, u32 update); +unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); @@ -787,9 +816,14 @@ void nfp_net_irqs_disable(struct pci_dev *pdev); void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n); -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); + +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, + struct netlink_ext_ack *extack); + +bool nfp_net_link_changed_read_clear(struct nfp_net *nn); +int nfp_net_refresh_eth_port(struct nfp_net *nn); +void nfp_net_refresh_port_table(struct nfp_net *nn); #ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a41377e26c07d038aa6c8716ef52d7152e92bcf0..db20376260f526348a1447cd300ada57880ab303 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -41,6 +41,7 @@ * Chris Telfer */ +#include #include #include #include @@ -66,6 +67,7 @@ #include #include +#include "nfpcore/nfp_nsp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" @@ -83,20 +85,33 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, put_unaligned_le32(reg, fw_ver); } -static dma_addr_t -nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz, - int direction) +static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) { - return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); } static void -nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr, - unsigned int bufsz, int direction) +nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) { - dma_unmap_single(&nn->pdev->dev, dma_addr, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + dma_sync_single_for_device(dp->dev, dma_addr, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir); +} + +static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) +{ + dma_unmap_single_attrs(dp->dev, dma_addr, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, + unsigned int len) +{ + dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, + len, dp->rx_dma_dir); } /* Firmware reconfig @@ -327,19 +342,22 @@ void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n) { + struct nfp_net_dp *dp = &nn->dp; + nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; - nn->num_r_vecs = nn->max_r_vecs; + dp->num_r_vecs = nn->max_r_vecs; memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); - if (nn->num_rx_rings > nn->num_r_vecs || - nn->num_tx_rings > nn->num_r_vecs) - nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", - nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); + if (dp->num_rx_rings > dp->num_r_vecs || + dp->num_tx_rings > dp->num_r_vecs) + dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", + dp->num_rx_rings, dp->num_tx_rings, + dp->num_r_vecs); - nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); - nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); - nn->num_stack_tx_rings = nn->num_tx_rings; + dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); + dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); + dp->num_stack_tx_rings = dp->num_tx_rings; } /** @@ -373,6 +391,19 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) return IRQ_HANDLED; } +bool nfp_net_link_changed_read_clear(struct nfp_net *nn) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&nn->link_status_lock, flags); + ret = nn->link_changed; + nn->link_changed = false; + spin_unlock_irqrestore(&nn->link_status_lock, flags); + + return ret; +} + /** * nfp_net_read_link_status() - Reread link status from control BAR * @nn: NFP Network structure @@ -392,13 +423,14 @@ static void nfp_net_read_link_status(struct nfp_net *nn) goto out; nn->link_up = link_up; + nn->link_changed = true; if (nn->link_up) { - netif_carrier_on(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Up\n"); + netif_carrier_on(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Up\n"); } else { - netif_carrier_off(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Down\n"); + netif_carrier_off(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Down\n"); } out: spin_unlock_irqrestore(&nn->link_status_lock, flags); @@ -446,15 +478,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data) * @tx_ring: TX ring structure * @r_vec: IRQ vector servicing this ring * @idx: Ring index + * @is_xdp: Is this an XDP TX ring? */ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, - struct nfp_net_r_vector *r_vec, unsigned int idx) + struct nfp_net_r_vector *r_vec, unsigned int idx, + bool is_xdp) { struct nfp_net *nn = r_vec->nfp_net; tx_ring->idx = idx; tx_ring->r_vec = r_vec; + tx_ring->is_xdp = is_xdp; tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); @@ -476,10 +511,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, rx_ring->r_vec = r_vec; rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; - rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); - rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); - rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx); } /** @@ -530,7 +562,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, entry = &nn->irq_entries[vector_idx]; - snprintf(name, name_sz, format, netdev_name(nn->netdev)); + snprintf(name, name_sz, format, netdev_name(nn->dp.netdev)); err = request_irq(entry->vector, handler, 0, name, nn); if (err) { nn_err(nn, "Failed to request IRQ %d (err=%d).\n", @@ -617,7 +649,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, /** * nfp_net_tx_tso() - Set up Tx descriptor for LSO - * @nn: NFP Net device * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to HW TX descriptor @@ -626,7 +657,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Return error on packet header greater than maximum supported LSO header size. */ -static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -657,7 +688,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, /** * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to TX descriptor @@ -666,7 +697,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, * This function sets the TX checksum flags in the TX descriptor based * on the configuration and the protocol of the packet to be transmitted. */ -static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -674,7 +706,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, struct iphdr *iph; u8 l4_hdr; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) + if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) return; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -693,8 +725,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, } else if (ipv6h->version == 6) { l4_hdr = ipv6h->nexthdr; } else { - nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", - iph->version); + nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); return; } @@ -706,8 +737,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, txd->flags |= PCIE_DESC_TX_UDP_CSUM; break; default: - nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", - l4_hdr); + nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); return; } @@ -737,28 +767,31 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; - struct nfp_net_r_vector *r_vec; struct nfp_net_tx_desc *txd, txdg; - struct nfp_net_tx_buf *txbuf; struct nfp_net_tx_ring *tx_ring; + struct nfp_net_r_vector *r_vec; + struct nfp_net_tx_buf *txbuf; struct netdev_queue *nd_q; + struct nfp_net_dp *dp; dma_addr_t dma_addr; unsigned int fsize; int f, nr_frags; int wr_idx; u16 qidx; + dp = &nn->dp; qidx = skb_get_queue_mapping(skb); - tx_ring = &nn->tx_rings[qidx]; + tx_ring = &dp->tx_rings[qidx]; r_vec = tx_ring->r_vec; - nd_q = netdev_get_tx_queue(nn->netdev, qidx); + nd_q = netdev_get_tx_queue(dp->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { - nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", - qidx, tx_ring->wr_p, tx_ring->rd_p); + nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", + qidx, tx_ring->wr_p, tx_ring->rd_p); netif_tx_stop_queue(nd_q); + nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_busy++; u64_stats_update_end(&r_vec->tx_sync); @@ -766,9 +799,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) } /* Start with the head skbuf */ - dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), + dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_free; wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); @@ -792,11 +825,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) txd->mss = 0; txd->l4_offset = 0; - nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_tso(r_vec, txbuf, txd, skb); - nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); - if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { + if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); } @@ -810,9 +843,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[f]; fsize = skb_frag_size(frag); - dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, + dma_addr = skb_frag_dma_map(dp->dev, frag, 0, fsize, DMA_TO_DEVICE); - if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_unmap; wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1); @@ -851,8 +884,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) --f; while (f >= 0) { frag = &skb_shinfo(skb)->frags[f]; - dma_unmap_page(&nn->pdev->dev, - tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; @@ -861,13 +893,14 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) if (wr_idx < 0) wr_idx += tx_ring->cnt; } - dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; tx_ring->txbufs[wr_idx].fidx = -2; err_free: - nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); + nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); + nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_errors++; u64_stats_update_end(&r_vec->tx_sync); @@ -884,7 +917,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; @@ -894,6 +927,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) int fidx; int idx; + if (tx_ring->wr_p == tx_ring->rd_p) + return; + /* Work out how many descriptors have been transmitted */ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); @@ -918,8 +954,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) if (fidx == -1) { /* unmap head */ - dma_unmap_single(&nn->pdev->dev, - tx_ring->txbufs[idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); done_pkts += tx_ring->txbufs[idx].pkt_cnt; @@ -927,8 +962,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(&nn->pdev->dev, - tx_ring->txbufs[idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -948,7 +982,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) r_vec->tx_pkts += done_pkts; u64_stats_update_end(&r_vec->tx_sync); - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); if (nfp_net_tx_ring_should_wake(tx_ring)) { /* Make sure TX thread will see updated tx_ring->rd_p */ @@ -966,11 +1000,13 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; u32 done_pkts = 0, done_bytes = 0; int idx, todo; u32 qcp_rd_p; + if (tx_ring->wr_p == tx_ring->rd_p) + return; + /* Work out how many descriptors have been transmitted */ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); @@ -982,23 +1018,12 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) else todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p; + done_pkts = todo; while (todo--) { idx = tx_ring->rd_p & (tx_ring->cnt - 1); tx_ring->rd_p++; - if (!tx_ring->txbufs[idx].frag) - continue; - - nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); - __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); - - done_pkts++; done_bytes += tx_ring->txbufs[idx].real_len; - - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].frag = NULL; - tx_ring->txbufs[idx].fidx = -2; } tx_ring->qcp_rd_p = qcp_rd_p; @@ -1015,52 +1040,43 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers - * @nn: NFP Net device + * @dp: NFP Net data path struct * @tx_ring: TX ring structure * * Assumes that the device is stopped */ static void -nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) +nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; const struct skb_frag_struct *frag; - struct pci_dev *pdev = nn->pdev; struct netdev_queue *nd_q; - while (tx_ring->rd_p != tx_ring->wr_p) { + while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { struct nfp_net_tx_buf *tx_buf; - int idx; + struct sk_buff *skb; + int idx, nr_frags; idx = tx_ring->rd_p & (tx_ring->cnt - 1); tx_buf = &tx_ring->txbufs[idx]; - if (tx_ring == r_vec->xdp_ring) { - nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); - __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); - } else { - struct sk_buff *skb = tx_ring->txbufs[idx].skb; - int nr_frags = skb_shinfo(skb)->nr_frags; - - if (tx_buf->fidx == -1) { - /* unmap head */ - dma_unmap_single(&pdev->dev, tx_buf->dma_addr, - skb_headlen(skb), - DMA_TO_DEVICE); - } else { - /* unmap fragment */ - frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; - dma_unmap_page(&pdev->dev, tx_buf->dma_addr, - skb_frag_size(frag), - DMA_TO_DEVICE); - } + skb = tx_ring->txbufs[idx].skb; + nr_frags = skb_shinfo(skb)->nr_frags; - /* check for last gather fragment */ - if (tx_buf->fidx == nr_frags - 1) - dev_kfree_skb_any(skb); + if (tx_buf->fidx == -1) { + /* unmap head */ + dma_unmap_single(dp->dev, tx_buf->dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; + dma_unmap_page(dp->dev, tx_buf->dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); } + /* check for last gather fragment */ + if (tx_buf->fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + tx_buf->dma_addr = 0; tx_buf->skb = NULL; tx_buf->fidx = -2; @@ -1075,10 +1091,10 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) tx_ring->qcp_rd_p = 0; tx_ring->wr_ptr_add = 0; - if (tx_ring == r_vec->xdp_ring) + if (tx_ring->is_xdp) return; - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); } @@ -1087,7 +1103,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->netdev->real_num_tx_queues; i++) { + for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) continue; nn_warn(nn, "TX timeout on ring: %d\n", i); @@ -1098,16 +1114,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev) /* Receive processing */ static unsigned int -nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) +nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) { unsigned int fl_bufsz; fl_bufsz = NFP_NET_RX_BUF_HEADROOM; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + fl_bufsz += dp->rx_dma_off; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) fl_bufsz += NFP_NET_MAX_PREPEND; else - fl_bufsz += nn->rx_offset; - fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu; + fl_bufsz += dp->rx_offset; + fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1126,62 +1143,53 @@ nfp_net_free_frag(void *frag, bool xdp) /** * nfp_net_rx_alloc_one() - Allocate and map page frag for RX - * @rx_ring: RX ring structure of the skb + * @dp: NFP Net data path struct * @dma_addr: Pointer to storage for DMA address (output param) - * @fl_bufsz: size of freelist buffers - * @xdp: Whether XDP is enabled * * This function will allcate a new page frag, map it for DMA. * * Return: allocated page frag or NULL on failure. */ -static void * -nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, - unsigned int fl_bufsz, bool xdp) +static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { - struct nfp_net *nn = rx_ring->r_vec->nfp_net; - int direction; void *frag; - if (!xdp) - frag = netdev_alloc_frag(fl_bufsz); + if (!dp->xdp_prog) + frag = netdev_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - - *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction); - if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { - nfp_net_free_frag(frag, xdp); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } return frag; } -static void * -nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) +static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { void *frag; - if (!nn->xdp_prog) - frag = napi_alloc_frag(nn->fl_bufsz); + if (!dp->xdp_prog) + frag = napi_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction); - if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { - nfp_net_free_frag(frag, nn->xdp_prog); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } @@ -1190,17 +1198,21 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) /** * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings + * @dp: NFP Net data path struct * @rx_ring: RX ring structure * @frag: page fragment buffer * @dma_addr: DMA address of skb mapping */ -static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, +static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring, void *frag, dma_addr_t dma_addr) { unsigned int wr_idx; wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1); + nfp_net_dma_sync_dev_rx(dp, dma_addr); + /* Stash SKB and DMA address away */ rx_ring->rxbufs[wr_idx].frag = frag; rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; @@ -1208,7 +1220,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, /* Fill freelist descriptor */ rx_ring->rxds[wr_idx].fld.reserved = 0; rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; - nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, + dma_addr + dp->rx_dma_off); rx_ring->wr_p++; rx_ring->wr_ptr_add++; @@ -1249,19 +1262,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled * * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) * entries. After device is disabled nfp_net_rx_ring_reset() must be called * to restore required ring geometry. */ static void -nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { - int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) { @@ -1272,9 +1283,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (!rx_ring->rxbufs[i].frag) continue; - nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr, - rx_ring->bufsz, direction); - nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp); + nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); + nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); rx_ring->rxbufs[i].dma_addr = 0; rx_ring->rxbufs[i].frag = NULL; } @@ -1282,13 +1292,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled */ static int -nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { struct nfp_net_rx_buf *rxbufs; unsigned int i; @@ -1296,11 +1305,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, rxbufs = rx_ring->rxbufs; for (i = 0; i < rx_ring->cnt - 1; i++) { - rxbufs[i].frag = - nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, - rx_ring->bufsz, xdp); + rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); if (!rxbufs[i].frag) { - nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp); + nfp_net_rx_ring_bufs_free(dp, rx_ring); return -ENOMEM; } } @@ -1310,14 +1317,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW + * @dp: NFP Net data path struct * @rx_ring: RX ring to fill */ -static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +static void +nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) - nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag, + nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, rx_ring->rxbufs[i].dma_addr); } @@ -1337,17 +1347,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags) /** * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @rxd: Pointer to RX descriptor * @skb: Pointer to SKB */ -static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_rx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_rx_desc *rxd, struct sk_buff *skb) { skb_checksum_none_assert(skb); - if (!(nn->netdev->features & NETIF_F_RXCSUM)) + if (!(dp->netdev->features & NETIF_F_RXCSUM)) return; if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { @@ -1378,8 +1389,9 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, } } -static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, - unsigned int type, __be32 *hash) +static void +nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta, + unsigned int type, __be32 *hash) { if (!(netdev->features & NETIF_F_RXHASH)) return; @@ -1388,34 +1400,33 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, case NFP_NET_RSS_IPV4: case NFP_NET_RSS_IPV6: case NFP_NET_RSS_IPV6_EX: - skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3); + meta->hash_type = PKT_HASH_TYPE_L3; break; default: - skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4); + meta->hash_type = PKT_HASH_TYPE_L4; break; } + + meta->hash = get_unaligned_be32(hash); } static void -nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, - struct nfp_net_rx_desc *rxd) +nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, + void *data, struct nfp_net_rx_desc *rxd) { - struct nfp_net_rx_hash *rx_hash; + struct nfp_net_rx_hash *rx_hash = data; if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) return; - rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash)); - - nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type), + nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type), &rx_hash->hash); } static void * -nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, - int meta_len) +nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, + void *data, int meta_len) { - u8 *data = skb->data - meta_len; u32 meta_info; meta_info = get_unaligned_be32(data); @@ -1425,13 +1436,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, switch (meta_info & NFP_NET_META_FIELD_MASK) { case NFP_NET_META_HASH: meta_info >>= NFP_NET_META_FIELD_SIZE; - nfp_net_set_hash(netdev, skb, + nfp_net_set_hash(netdev, meta, meta_info & NFP_NET_META_FIELD_MASK, (__be32 *)data); data += 4; break; case NFP_NET_META_MARK: - skb->mark = get_unaligned_be32(data); + meta->mark = get_unaligned_be32(data); data += 4; break; default: @@ -1445,8 +1456,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, } static void -nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, - struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb) +nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, + struct sk_buff *skb) { u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; @@ -1458,53 +1470,47 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, if (skb && rxbuf && skb->head == rxbuf->frag) page_ref_inc(virt_to_head_page(rxbuf->frag)); if (rxbuf) - nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); if (skb) dev_kfree_skb_any(skb); } static bool -nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, +nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, - struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, + struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, unsigned int pkt_len) { struct nfp_net_tx_buf *txbuf; struct nfp_net_tx_desc *txd; - dma_addr_t new_dma_addr; - void *new_frag; int wr_idx; if (unlikely(nfp_net_tx_full(tx_ring, 1))) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return false; - } - - new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); - if (unlikely(!new_frag)) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL); return false; } - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); /* Stash the soft descriptor of the head then initialize it */ txbuf = &tx_ring->txbufs[wr_idx]; + + nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); + txbuf->frag = rxbuf->frag; txbuf->dma_addr = rxbuf->dma_addr; txbuf->fidx = -1; txbuf->pkt_cnt = 1; txbuf->real_len = pkt_len; - dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, + dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; txd->offset_eop = PCIE_DESC_TX_EOP; txd->dma_len = cpu_to_le16(pkt_len); - nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off); + nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off); txd->data_len = cpu_to_le16(pkt_len); txd->flags = 0; @@ -1516,14 +1522,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, return true; } -static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) +static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start, + unsigned int *off, unsigned int *len) { struct xdp_buff xdp; + void *orig_data; + int ret; - xdp.data = data; - xdp.data_end = data + len; + xdp.data_hard_start = hard_start; + xdp.data = data + *off; + xdp.data_end = data + *off + *len; - return bpf_prog_run_xdp(prog, &xdp); + orig_data = xdp.data; + ret = bpf_prog_run_xdp(prog, &xdp); + + *len -= xdp.data - orig_data; + *off += xdp.data - orig_data; + + return ret; } /** @@ -1540,25 +1556,24 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; struct nfp_net_tx_ring *tx_ring; struct bpf_prog *xdp_prog; unsigned int true_bufsz; struct sk_buff *skb; int pkts_polled = 0; - int rx_dma_map_dir; int idx; rcu_read_lock(); - xdp_prog = READ_ONCE(nn->xdp_prog); - rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz; + xdp_prog = READ_ONCE(dp->xdp_prog); + true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { - unsigned int meta_len, data_len, data_off, pkt_len, pkt_off; + unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_desc *rxd; + struct nfp_meta_parsed meta; dma_addr_t new_dma_addr; void *new_frag; @@ -1573,6 +1588,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) */ dma_rmb(); + memset(&meta, 0, sizeof(meta)); + rx_ring->rd_p++; pkts_polled++; @@ -1593,11 +1610,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) data_len = le16_to_cpu(rxd->rxd.data_len); pkt_len = data_len - meta_len; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) - pkt_off = meta_len; + pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + pkt_off += meta_len; else - pkt_off = nn->rx_offset; - data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off; + pkt_off += dp->rx_offset; + meta_off = pkt_off - meta_len; /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); @@ -1605,30 +1623,62 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) r_vec->rx_bytes += pkt_len; u64_stats_update_end(&r_vec->rx_sync); + if (unlikely(meta_len > NFP_NET_MAX_PREPEND || + (dp->rx_offset && meta_len > dp->rx_offset))) { + nn_dp_warn(dp, "oversized RX packet metadata %u\n", + meta_len); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); + continue; + } + + nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, + data_len); + + if (!dp->chained_metadata_format) { + nfp_net_set_hash_desc(dp->netdev, &meta, + rxbuf->frag + meta_off, rxd); + } else if (meta_len) { + void *end; + + end = nfp_net_parse_meta(dp->netdev, &meta, + rxbuf->frag + meta_off, + meta_len); + if (unlikely(end != rxbuf->frag + pkt_off)) { + nn_dp_warn(dp, "invalid RX packet metadata\n"); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, + NULL); + continue; + } + } + if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - nn->bpf_offload_xdp)) { + dp->bpf_offload_xdp)) { + unsigned int dma_off; + void *hard_start; int act; - dma_sync_single_for_cpu(&nn->pdev->dev, - rxbuf->dma_addr + pkt_off, - pkt_len, DMA_BIDIRECTIONAL); - act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, - pkt_len); + hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; + + act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start, + &pkt_off, &pkt_len); switch (act) { case XDP_PASS: break; case XDP_TX: - if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; + if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len))) - trace_xdp_exception(nn->netdev, xdp_prog, act); + dma_off, + pkt_len))) + trace_xdp_exception(dp->netdev, + xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(nn->netdev, xdp_prog, act); + trace_xdp_exception(dp->netdev, xdp_prog, act); case XDP_DROP: - nfp_net_rx_give_one(rx_ring, rxbuf->frag, + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); continue; } @@ -1636,41 +1686,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) skb = build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; } - new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir, - &new_dma_addr); + new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); if (unlikely(!new_frag)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); continue; } - nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz, - rx_dma_map_dir); + nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); + nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); - skb_reserve(skb, data_off); + skb_reserve(skb, pkt_off); skb_put(skb, pkt_len); - if (nn->fw_ver.major <= 3) { - nfp_net_set_hash_desc(nn->netdev, skb, rxd); - } else if (meta_len) { - void *end; - - end = nfp_net_parse_meta(nn->netdev, skb, meta_len); - if (unlikely(end != skb->data)) { - nn_warn_ratelimit(nn, "invalid RX packet metadata\n"); - nfp_net_rx_drop(r_vec, rx_ring, NULL, skb); - continue; - } - } + skb->mark = meta.mark; + skb_set_hash(skb, meta.hash, meta.hash_type); skb_record_rx_queue(skb, rx_ring->idx); - skb->protocol = eth_type_trans(skb, nn->netdev); + skb->protocol = eth_type_trans(skb, dp->netdev); - nfp_net_rx_csum(nn, r_vec, rxd, skb); + nfp_net_rx_csum(dp, r_vec, rxd, skb); if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1707,10 +1745,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) nfp_net_xdp_complete(r_vec->xdp_ring); } - if (pkts_polled < budget) { - napi_complete_done(napi, pkts_polled); - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); - } + if (pkts_polled < budget) + if (napi_complete_done(napi, pkts_polled)) + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); return pkts_polled; } @@ -1725,13 +1762,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(tx_ring->txbufs); if (tx_ring->txds) - dma_free_coherent(&pdev->dev, tx_ring->size, + dma_free_coherent(dp->dev, tx_ring->size, tx_ring->txds, tx_ring->dma); tx_ring->cnt = 0; @@ -1743,24 +1779,21 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring + * @dp: NFP Net data path struct * @tx_ring: TX Ring structure to allocate - * @cnt: Ring buffer count - * @is_xdp: True if ring will be used for XDP * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) +nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; int sz; - tx_ring->cnt = cnt; + tx_ring->cnt = dp->txd_cnt; tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; - tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->txds) goto err_alloc; @@ -1770,15 +1803,10 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) if (!tx_ring->txbufs) goto err_alloc; - if (!is_xdp) - netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, + if (!tx_ring->is_xdp) + netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, tx_ring->idx); - nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n", - tx_ring->idx, tx_ring->qcidx, - tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds, - is_xdp ? "XDP" : ""); - return 0; err_alloc: @@ -1786,62 +1814,92 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) return -ENOMEM; } -static struct nfp_net_tx_ring * -nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - unsigned int num_stack_tx_rings) +static void +nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, + struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_tx_ring *rings; - unsigned int r; + unsigned int i; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + if (!tx_ring->is_xdp) + return; - for (r = 0; r < s->n_rings; r++) { - int bias = 0; + for (i = 0; i < tx_ring->cnt; i++) { + if (!tx_ring->txbufs[i].frag) + return; - if (r >= num_stack_tx_rings) - bias = num_stack_tx_rings; + nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); + __free_page(virt_to_page(tx_ring->txbufs[i].frag)); + } +} + +static int +nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, + struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_tx_buf *txbufs = tx_ring->txbufs; + unsigned int i; - nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r); + if (!tx_ring->is_xdp) + return 0; - if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias)) - goto err_free_prev; + for (i = 0; i < tx_ring->cnt; i++) { + txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); + if (!txbufs[i].frag) { + nfp_net_tx_ring_bufs_free(dp, tx_ring); + return -ENOMEM; + } } - return s->rings = rings; - -err_free_prev: - while (r--) - nfp_net_tx_ring_free(&rings[r]); - kfree(rings); - return NULL; + return 0; } -static void -nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) +static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - struct nfp_net_ring_set new = *s; + unsigned int r; + + dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), + GFP_KERNEL); + if (!dp->tx_rings) + return -ENOMEM; - s->dcnt = nn->txd_cnt; - s->rings = nn->tx_rings; - s->n_rings = nn->num_tx_rings; + for (r = 0; r < dp->num_tx_rings; r++) { + int bias = 0; + + if (r >= dp->num_stack_tx_rings) + bias = dp->num_stack_tx_rings; - nn->txd_cnt = new.dcnt; - nn->tx_rings = new.rings; - nn->num_tx_rings = new.n_rings; + nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], + r, bias); + + if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) + goto err_free_prev; + + if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) + goto err_free_ring; + } + + return 0; + +err_free_prev: + while (r--) { + nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); +err_free_ring: + nfp_net_tx_ring_free(&dp->tx_rings[r]); + } + kfree(dp->tx_rings); + return -ENOMEM; } -static void -nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) +static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_tx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) - nfp_net_tx_ring_free(&rings[r]); + for (r = 0; r < dp->num_tx_rings; r++) { + nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); + nfp_net_tx_ring_free(&dp->tx_rings[r]); + } - kfree(rings); + kfree(dp->tx_rings); } /** @@ -1851,13 +1909,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(rx_ring->rxbufs); if (rx_ring->rxds) - dma_free_coherent(&pdev->dev, rx_ring->size, + dma_free_coherent(dp->dev, rx_ring->size, rx_ring->rxds, rx_ring->dma); rx_ring->cnt = 0; @@ -1869,26 +1926,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring + * @dp: NFP Net data path struct * @rx_ring: RX ring to allocate - * @fl_bufsz: Size of buffers to allocate - * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, - u32 cnt) +nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; int sz; - rx_ring->cnt = cnt; - rx_ring->bufsz = fl_bufsz; - + rx_ring->cnt = dp->rxd_cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; - rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->rxds) goto err_alloc; @@ -1898,10 +1948,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, if (!rx_ring->rxbufs) goto err_alloc; - nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", - rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, - rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); - return 0; err_alloc: @@ -1909,82 +1955,59 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, return -ENOMEM; } -static struct nfp_net_rx_ring * -nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) +static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu); - struct nfp_net_rx_ring *rings; unsigned int r; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), + GFP_KERNEL); + if (!dp->rx_rings) + return -ENOMEM; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); - if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt)) + if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) goto err_free_prev; - if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp)) + if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) goto err_free_ring; } - return s->rings = rings; + return 0; err_free_prev: while (r--) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); err_free_ring: - nfp_net_rx_ring_free(&rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); - return NULL; -} - -static void -nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) -{ - struct nfp_net_ring_set new = *s; - - s->mtu = nn->netdev->mtu; - s->dcnt = nn->rxd_cnt; - s->rings = nn->rx_rings; - s->n_rings = nn->num_rx_rings; - - nn->netdev->mtu = new.mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu); - nn->rxd_cnt = new.dcnt; - nn->rx_rings = new.rings; - nn->num_rx_rings = new.n_rings; + kfree(dp->rx_rings); + return -ENOMEM; } -static void -nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) +static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_rx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); - nfp_net_rx_ring_free(&rings[r]); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); + kfree(dp->rx_rings); } static void -nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, - int idx) +nfp_net_vector_assign_rings(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, int idx) { - r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL; + r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; r_vec->tx_ring = - idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL; + idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; - r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ? - &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL; + r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? + &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; } static int @@ -1994,11 +2017,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int err; /* Setup NAPI */ - netif_napi_add(nn->netdev, &r_vec->napi, + netif_napi_add(nn->dp.netdev, &r_vec->napi, nfp_net_poll, NAPI_POLL_WEIGHT); snprintf(r_vec->name, sizeof(r_vec->name), - "%s-rxtx-%d", nn->netdev->name, idx); + "%s-rxtx-%d", nn->dp.netdev->name, idx); err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, r_vec); if (err) { @@ -2045,7 +2068,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn) { int i; - for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4) + for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4) nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, get_unaligned_le32(nn->rss_key + i)); } @@ -2069,13 +2092,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) /* copy RX interrupt coalesce parameters */ value = (nn->rx_coalesce_max_frames << 16) | (factor * nn->rx_coalesce_usecs); - for (i = 0; i < nn->num_rx_rings; i++) + for (i = 0; i < nn->dp.num_rx_rings; i++) nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); /* copy TX interrupt coalesce parameters */ value = (nn->tx_coalesce_max_frames << 16) | (factor * nn->tx_coalesce_usecs); - for (i = 0; i < nn->num_tx_rings; i++) + for (i = 0; i < nn->dp.num_tx_rings; i++) nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); } @@ -2090,9 +2113,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) static void nfp_net_write_mac_addr(struct nfp_net *nn) { nn_writel(nn, NFP_NET_CFG_MACADDR + 0, - get_unaligned_be32(nn->netdev->dev_addr)); + get_unaligned_be32(nn->dp.netdev->dev_addr)); nn_writew(nn, NFP_NET_CFG_MACADDR + 6, - get_unaligned_be16(nn->netdev->dev_addr + 4)); + get_unaligned_be16(nn->dp.netdev->dev_addr + 4)); } static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) @@ -2116,7 +2139,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; update = NFP_NET_CFG_UPDATE_GEN; update |= NFP_NET_CFG_UPDATE_MSIX; @@ -2133,14 +2156,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) if (err) nn_err(nn, "Could not disable device: %d\n", err); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_reset(&nn->rx_rings[r]); - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_vec_clear_ring_data(nn, r); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void @@ -2162,13 +2185,17 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); } -static int __nfp_net_set_config_and_enable(struct nfp_net *nn) +/** + * nfp_net_set_config_and_enable() - Write control BAR and enable NFP + * @nn: NFP Net device to reconfigure + */ +static int nfp_net_set_config_and_enable(struct nfp_net *nn) { - u32 new_ctrl, update = 0; + u32 bufsz, new_ctrl, update = 0; unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (nn->cap & NFP_NET_CFG_CTRL_RSS) { nfp_net_rss_write_key(nn); @@ -2184,22 +2211,23 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) update |= NFP_NET_CFG_UPDATE_IRQMOD; } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); - nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); - nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); nfp_net_write_mac_addr(nn); - nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, - nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); + nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu); + + bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; @@ -2211,37 +2239,26 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); + if (err) { + nfp_net_clear_config_and_disable(nn); + return err; + } - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); /* Since reconfiguration requests while NFP is down are ignored we * have to wipe the entire VXLAN configuration and reinitialize it. */ - if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - udp_tunnel_get_rx_info(nn->netdev); + udp_tunnel_get_rx_info(nn->dp.netdev); } - return err; -} - -/** - * nfp_net_set_config_and_enable() - Write control BAR and enable NFP - * @nn: NFP Net device to reconfigure - */ -static int nfp_net_set_config_and_enable(struct nfp_net *nn) -{ - int err; - - err = __nfp_net_set_config_and_enable(nn); - if (err) - nfp_net_clear_config_and_disable(nn); - - return err; + return 0; } /** @@ -2252,12 +2269,12 @@ static void nfp_net_open_stack(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { napi_enable(&nn->r_vecs[r].napi); enable_irq(nn->r_vecs[r].irq_vector); } - netif_tx_wake_all_queues(nn->netdev); + netif_tx_wake_all_queues(nn->dp.netdev); enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); @@ -2266,22 +2283,8 @@ static void nfp_net_open_stack(struct nfp_net *nn) static int nfp_net_netdev_open(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; int err, r; - if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { - nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); - return -EBUSY; - } - /* Step 1: Allocate resources for rings and the like * - Request interrupts * - Allocate RX and TX ring resources @@ -2299,33 +2302,28 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_exn; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) goto err_cleanup_vec_p; } - nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog); - if (!nn->rx_rings) { - err = -ENOMEM; + err = nfp_net_rx_rings_prepare(nn, &nn->dp); + if (err) goto err_cleanup_vec; - } - nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx, - nn->num_stack_tx_rings); - if (!nn->tx_rings) { - err = -ENOMEM; + err = nfp_net_tx_rings_prepare(nn, &nn->dp); + if (err) goto err_free_rx_rings; - } for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings); + err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); if (err) goto err_free_rings; - err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); if (err) goto err_free_rings; @@ -2351,11 +2349,11 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; err_free_rings: - nfp_net_tx_ring_set_free(nn, &tx); + nfp_net_tx_rings_free(&nn->dp); err_free_rx_rings: - nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog); + nfp_net_rx_rings_free(&nn->dp); err_cleanup_vec: - r = nn->num_r_vecs; + r = nn->dp.num_r_vecs; err_cleanup_vec_p: while (r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); @@ -2374,15 +2372,15 @@ static void nfp_net_close_stack(struct nfp_net *nn) unsigned int r; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - netif_carrier_off(nn->netdev); + netif_carrier_off(nn->dp.netdev); nn->link_up = false; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { disable_irq(nn->r_vecs[r].irq_vector); napi_disable(&nn->r_vecs[r].napi); } - netif_tx_disable(nn->netdev); + netif_tx_disable(nn->dp.netdev); } /** @@ -2393,17 +2391,19 @@ static void nfp_net_close_free_all(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_rx_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog); - nfp_net_rx_ring_free(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]); + nfp_net_rx_ring_free(&nn->dp.rx_rings[r]); } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_free(&nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_tx_rings; r++) { + nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]); + nfp_net_tx_ring_free(&nn->dp.tx_rings[r]); + } + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - kfree(nn->rx_rings); - kfree(nn->tx_rings); + kfree(nn->dp.rx_rings); + kfree(nn->dp.tx_rings); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); @@ -2417,11 +2417,6 @@ static int nfp_net_netdev_close(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { - nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); - return 0; - } - /* Step 1: Disable RX and TX rings from the Linux kernel perspective */ nfp_net_close_stack(nn); @@ -2443,7 +2438,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); u32 new_ctrl; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (netdev->flags & IFF_PROMISC) { if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) @@ -2454,13 +2449,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; } - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return; nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void nfp_net_rss_init_itbl(struct nfp_net *nn) @@ -2469,181 +2464,174 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn) for (i = 0; i < sizeof(nn->rss_itbl); i++) nn->rss_itbl[i] = - ethtool_rxfh_indir_default(i, nn->num_rx_rings); + ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); } -static int -nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs, - unsigned int *stack_tx_rings, - struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx) +static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) +{ + struct nfp_net_dp new_dp = *dp; + + *dp = nn->dp; + nn->dp = new_dp; + + nn->dp.netdev->mtu = new_dp.mtu; + + if (!netif_is_rxfh_configured(nn->dp.netdev)) + nfp_net_rss_init_itbl(nn); +} + +static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) { unsigned int r; int err; - if (rx) - nfp_net_rx_ring_set_swap(nn, rx); - if (tx) - nfp_net_tx_ring_set_swap(nn, tx); - - swap(*num_vecs, nn->num_r_vecs); - swap(*stack_tx_rings, nn->num_stack_tx_rings); - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); + nfp_net_dp_swap(nn, dp); for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); - - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_rx_queues(nn->netdev, - nn->num_rx_rings); + err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); if (err) return err; - if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) { - err = netif_set_real_num_tx_queues(nn->netdev, - nn->num_stack_tx_rings); + if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { + err = netif_set_real_num_tx_queues(nn->dp.netdev, + nn->dp.num_stack_tx_rings); if (err) return err; } - return __nfp_net_set_config_and_enable(nn); + return nfp_net_set_config_and_enable(nn); +} + +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) +{ + struct nfp_net_dp *new; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + *new = nn->dp; + + /* Clear things which need to be recomputed */ + new->fl_bufsz = 0; + new->tx_rings = NULL; + new->rx_rings = NULL; + new->num_r_vecs = 0; + new->num_stack_tx_rings = 0; + + return new; } static int -nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) +nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, + struct netlink_ext_ack *extack) { /* XDP-enabled tests */ - if (!xdp_prog) + if (!dp->xdp_prog) return 0; - if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) { - nn_warn(nn, "MTU too large w/ XDP enabled\n"); + if (dp->fl_bufsz > PAGE_SIZE) { + NL_MOD_TRY_SET_ERR_MSG(extack, "MTU too large w/ XDP enabled"); return -EINVAL; } - if (tx && tx->n_rings > nn->max_tx_rings) { - nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n"); + if (dp->num_tx_rings > nn->max_tx_rings) { + NL_MOD_TRY_SET_ERR_MSG(extack, "Insufficient number of TX rings w/ XDP enabled"); return -EINVAL; } return 0; } -static void -nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx, - unsigned int stack_tx_rings, unsigned int num_vecs) -{ - nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu); - nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt; - nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt; - nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings; - nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - nn->num_stack_tx_rings = stack_tx_rings; - nn->num_r_vecs = num_vecs; - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); - - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); -} - -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, + struct netlink_ext_ack *extack) { - unsigned int stack_tx_rings, num_vecs, r; - int err; + int r, err; + + dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); - stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - if (*xdp_prog) - stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings; + dp->num_stack_tx_rings = dp->num_tx_rings; + if (dp->xdp_prog) + dp->num_stack_tx_rings -= dp->num_rx_rings; - num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings); + dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); - err = nfp_net_check_config(nn, *xdp_prog, rx, tx); + err = nfp_net_check_config(nn, dp, extack); if (err) - return err; + goto exit_free_dp; - if (!netif_running(nn->netdev)) { - nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx, - stack_tx_rings, num_vecs); - return 0; + if (!netif_running(dp->netdev)) { + nfp_net_dp_swap(nn, dp); + err = 0; + goto exit_free_dp; } /* Prepare new rings */ - for (r = nn->num_r_vecs; r < num_vecs; r++) { + for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) { - num_vecs = r; - goto err_cleanup_vecs; - } - } - if (rx) { - if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) { - err = -ENOMEM; + dp->num_r_vecs = r; goto err_cleanup_vecs; } } - if (tx) { - if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) { - err = -ENOMEM; - goto err_free_rx; - } - } + + err = nfp_net_rx_rings_prepare(nn, dp); + if (err) + goto err_cleanup_vecs; + + err = nfp_net_tx_rings_prepare(nn, dp); + if (err) + goto err_free_rx; /* Stop device, swap in new rings, try to start the firmware */ nfp_net_close_stack(nn); nfp_net_clear_config_and_disable(nn); - err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err = nfp_net_dp_swap_enable(nn, dp); if (err) { int err2; nfp_net_clear_config_and_disable(nn); /* Try with old configuration and old rings */ - err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err2 = nfp_net_dp_swap_enable(nn, dp); if (err2) nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", err, err2); } - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); - if (tx) - nfp_net_tx_ring_set_free(nn, tx); + nfp_net_rx_rings_free(dp); + nfp_net_tx_rings_free(dp); nfp_net_open_stack(nn); +exit_free_dp: + kfree(dp); return err; err_free_rx: - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); + nfp_net_rx_rings_free(dp); err_cleanup_vecs: - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + kfree(dp); return err; } static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = new_mtu, - .dcnt = nn->rxd_cnt, - }; + struct nfp_net_dp *dp; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; + + dp->mtu = new_mtu; + + return nfp_net_ring_reconfig(nn, dp, NULL); } static void nfp_net_stat64(struct net_device *netdev, @@ -2652,7 +2640,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; unsigned int start; @@ -2694,12 +2682,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct nfp_net *nn = netdev_priv(netdev); if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (proto != htons(ETH_P_ALL)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->bpf_offload_xdp) + if (!nn->dp.bpf_offload_xdp) return nfp_net_bpf_offload(nn, tc->cls_bpf); else return -EBUSY; @@ -2718,7 +2706,7 @@ static int nfp_net_set_features(struct net_device *netdev, /* Assume this is not called with features we have not advertised */ - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (changed & NETIF_F_RXCSUM) { if (features & NETIF_F_RXCSUM) @@ -2762,7 +2750,7 @@ static int nfp_net_set_features(struct net_device *netdev, new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; } - if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) { + if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { nn_err(nn, "Cannot disable HW TC offload while in use\n"); return -EBUSY; } @@ -2770,16 +2758,16 @@ static int nfp_net_set_features(struct net_device *netdev, nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", netdev->features, features, changed); - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return 0; - nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); + nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) return err; - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; return 0; } @@ -2830,6 +2818,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int +nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err; + + if (!nn->eth_port) + return -EOPNOTSUPP; + + if (!nn->eth_port->is_split) + err = snprintf(name, len, "p%d", nn->eth_port->label_port); + else + err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port, + nn->eth_port->label_subport); + if (err >= len) + return -EINVAL; + + return 0; +} + /** * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW * @nn: NFP Net device to reconfigure @@ -2842,7 +2850,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) nn->vxlan_ports[idx] = port; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) return; BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); @@ -2921,8 +2929,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) if (!nfp_net_ebpf_capable(nn)) return -EINVAL; - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) { - if (!nn->bpf_offload_xdp) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { + if (!nn->dp.bpf_offload_xdp) return prog ? -EBUSY : 0; cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; } else { @@ -2935,48 +2943,44 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) /* Stop offload if replace not possible */ if (ret && cmd.command == TC_CLSBPF_REPLACE) nfp_net_xdp_offload(nn, NULL); - nn->bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = prog && !ret; return ret; } -static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog) +static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp) { - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; + struct bpf_prog *old_prog = nn->dp.xdp_prog; + struct bpf_prog *prog = xdp->prog; + struct nfp_net_dp *dp; int err; - if (prog && prog->xdp_adjust_head) { - nn_err(nn, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - if (!prog && !nn->xdp_prog) + if (!prog && !nn->dp.xdp_prog) return 0; - if (prog && nn->xdp_prog) { - prog = xchg(&nn->xdp_prog, prog); + if (prog && nn->dp.xdp_prog) { + prog = xchg(&nn->dp.xdp_prog, prog); bpf_prog_put(prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } - tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; + + dp->xdp_prog = prog; + dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; + dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx); + err = nfp_net_ring_reconfig(nn, dp, xdp->extack); if (err) return err; - /* @prog got swapped and is now the old one */ - if (prog) - bpf_prog_put(prog); + if (old_prog) + bpf_prog_put(old_prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } @@ -2987,9 +2991,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp) switch (xdp->command) { case XDP_SETUP_PROG: - return nfp_net_xdp_setup(nn, xdp->prog); + return nfp_net_xdp_setup(nn, xdp); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; + xdp->prog_attached = !!nn->dp.xdp_prog; return 0; default: return -EINVAL; @@ -3008,6 +3012,7 @@ static const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, + .ndo_get_phys_port_name = nfp_net_get_phys_port_name, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_xdp = nfp_net_xdp, @@ -3020,9 +3025,9 @@ static const struct net_device_ops nfp_net_netdev_ops = { void nfp_net_info(struct nfp_net *nn) { nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", - nn->is_vf ? "VF " : "", - nn->num_tx_rings, nn->max_tx_rings, - nn->num_rx_rings, nn->max_rx_rings); + nn->dp.is_vf ? "VF " : "", + nn->dp.num_tx_rings, nn->max_tx_rings, + nn->dp.num_rx_rings, nn->max_rx_rings); nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", nn->fw_ver.resv, nn->fw_ver.class, nn->fw_ver.major, nn->fw_ver.minor, @@ -3074,21 +3079,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); nn = netdev_priv(netdev); - nn->netdev = netdev; + nn->dp.netdev = netdev; + nn->dp.dev = &pdev->dev; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; nn->max_rx_rings = max_rx_rings; - nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus()); - nn->num_rx_rings = min_t(unsigned int, max_rx_rings, + nn->dp.num_tx_rings = min_t(unsigned int, + max_tx_rings, num_online_cpus()); + nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, netif_get_num_default_rss_queues()); - nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings); - nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus()); + nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); + nn->dp.num_r_vecs = min_t(unsigned int, + nn->dp.num_r_vecs, num_online_cpus()); - nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; - nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; + nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->rx_filter_lock); @@ -3108,7 +3116,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, */ void nfp_net_netdev_free(struct nfp_net *nn) { - free_netdev(nn->netdev); + free_netdev(nn->dp.netdev); +} + +/** + * nfp_net_rss_key_sz() - Get current size of the RSS key + * @nn: NFP Net device instance + * + * Return: size of the RSS key for currently selected hash function. + */ +unsigned int nfp_net_rss_key_sz(struct nfp_net *nn) +{ + switch (nn->rss_hfunc) { + case ETH_RSS_HASH_TOP: + return NFP_NET_CFG_RSS_KEY_SZ; + case ETH_RSS_HASH_XOR: + return 0; + case ETH_RSS_HASH_CRC32: + return 4; + } + + nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc); + return 0; } /** @@ -3117,14 +3146,32 @@ void nfp_net_netdev_free(struct nfp_net *nn) */ static void nfp_net_rss_init(struct nfp_net *nn) { - netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + unsigned long func_bit, rss_cap_hfunc; + u32 reg; + + /* Read the RSS function capability and select first supported func */ + reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP); + rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg); + if (!rss_cap_hfunc) + rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, + NFP_NET_CFG_RSS_TOEPLITZ); + + func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS); + if (func_bit == NFP_NET_CFG_RSS_HFUNCS) { + dev_warn(nn->dp.dev, + "Bad RSS config, defaulting to Toeplitz hash\n"); + func_bit = ETH_RSS_HASH_TOP_BIT; + } + nn->rss_hfunc = 1 << func_bit; + + netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn)); nfp_net_rss_init_itbl(nn); /* Enable IPv4/IPv6 TCP by default */ nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | NFP_NET_CFG_RSS_IPV6_TCP | - NFP_NET_CFG_RSS_TOEPLITZ | + FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) | NFP_NET_CFG_RSS_MASK; } @@ -3151,6 +3198,10 @@ int nfp_net_netdev_init(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int err; + nn->dp.chained_metadata_format = nn->fw_ver.major > 3; + + nn->dp.rx_dma_dir = DMA_FROM_DEVICE; + /* Get some of the read-only fields from the BAR */ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); @@ -3158,17 +3209,26 @@ int nfp_net_netdev_init(struct net_device *netdev) nfp_net_write_mac_addr(nn); /* Determine RX packet/metadata boundary offset */ - if (nn->fw_ver.major >= 2) - nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); - else - nn->rx_offset = NFP_NET_RX_OFFSET; + if (nn->fw_ver.major >= 2) { + u32 reg; + + reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); + if (reg > NFP_NET_MAX_PREPEND) { + nn_err(nn, "Invalid rx offset: %d\n", reg); + return -EINVAL; + } + nn->dp.rx_offset = reg; + } else { + nn->dp.rx_offset = NFP_NET_RX_OFFSET; + } /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) netdev->mtu = nn->max_mtu; else netdev->mtu = NFP_NET_DEFAULT_MTU; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu); + nn->dp.mtu = netdev->mtu; + nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); /* Advertise/enable offloads based on capabilities * @@ -3179,31 +3239,31 @@ int nfp_net_netdev_init(struct net_device *netdev) netdev->hw_features = NETIF_F_HIGHDMA; if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { netdev->hw_features |= NETIF_F_RXCSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { netdev->hw_features |= NETIF_F_SG; - nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; } if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - nn->ctrl |= NFP_NET_CFG_CTRL_LSO; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO; } if (nn->cap & NFP_NET_CFG_CTRL_RSS) { netdev->hw_features |= NETIF_F_RXHASH; nfp_net_rss_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_RSS; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS; } if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && nn->cap & NFP_NET_CFG_CTRL_NVGRE) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; - nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; netdev->hw_enc_features = netdev->hw_features; } @@ -3212,11 +3272,11 @@ int nfp_net_netdev_init(struct net_device *netdev) if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; } if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; - nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; } netdev->features = netdev->hw_features; @@ -3229,14 +3289,14 @@ int nfp_net_netdev_init(struct net_device *netdev) /* Allow L2 Broadcast and Multicast through by default, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_L2BC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; if (nn->cap & NFP_NET_CFG_CTRL_L2MC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC; /* Allow IRQ moderation, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { nfp_net_irqmod_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } /* Stash the re-configuration queue away. First odd queue in TX Bar */ @@ -3275,10 +3335,10 @@ void nfp_net_netdev_clean(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - unregister_netdev(nn->netdev); + unregister_netdev(nn->dp.netdev); - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - if (nn->bpf_offload_xdp) + if (nn->dp.xdp_prog) + bpf_prog_put(nn->dp.xdp_prog); + if (nn->dp.bpf_offload_xdp) nfp_net_xdp_offload(nn, NULL); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 385ba355c965c35cf81ecd09f25e3c70c29b76e7..d04ccc9f61162c73bbe67f68cf5dfaed5ecb4a69 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -177,6 +177,19 @@ #define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) #define NFP_NET_CFG_STS 0x0034 #define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +/* Link rate */ +#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1 +#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF +#define NFP_NET_CFG_STS_LINK_RATE \ + (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT) +#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0 +#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1 +#define NFP_NET_CFG_STS_LINK_RATE_1G 2 +#define NFP_NET_CFG_STS_LINK_RATE_10G 3 +#define NFP_NET_CFG_STS_LINK_RATE_25G 4 +#define NFP_NET_CFG_STS_LINK_RATE_40G 5 +#define NFP_NET_CFG_STS_LINK_RATE_50G 6 +#define NFP_NET_CFG_STS_LINK_RATE_100G 7 #define NFP_NET_CFG_CAP 0x0038 #define NFP_NET_CFG_MAX_TXRINGS 0x003c #define NFP_NET_CFG_MAX_RXRINGS 0x0040 @@ -191,6 +204,14 @@ #define NFP_NET_CFG_RX_OFFSET 0x0050 #define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ +/** + * RSS capabilities + * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as + * @NFP_NET_CFG_RSS_HFUNC) + */ +#define NFP_NET_CFG_RSS_CAP 0x0054 +#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000 + /** * VXLAN/UDP encap configuration * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports @@ -249,7 +270,11 @@ #define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ #define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ #define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_HFUNC 0xff000000 #define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */ +#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */ +#define NFP_NET_CFG_RSS_HFUNCS 3 #define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) #define NFP_NET_CFG_RSS_KEY_SZ 0x28 #define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 6e9372a1837579928bb24b5435e2062dc0c534b8..4077c59bf782ea595420c1b72ad86f72ebbac1a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -40,9 +40,9 @@ static struct dentry *nfp_dir; static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) { - int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_rx_ring *rx_ring; + int fl_rd_p, fl_wr_p, rxd_cnt; struct nfp_net_rx_desc *rxd; struct nfp_net *nn; void *frag; @@ -54,19 +54,18 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) goto out; nn = r_vec->nfp_net; rx_ring = r_vec->rx_ring; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; rxd_cnt = rx_ring->cnt; fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); - rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); - rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); - seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", - rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, - fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); + seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n", + rx_ring->idx, rx_ring->fl_qcidx, + rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, + rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); for (i = 0; i < rxd_cnt; i++) { rxd = &rx_ring->rxds[i]; @@ -89,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) seq_puts(file, " FL_RD"); if (i == fl_wr_p % rxd_cnt) seq_puts(file, " FL_WR"); - if (i == rx_rd_p % rxd_cnt) - seq_puts(file, " RX_RD"); - if (i == rx_wr_p % rxd_cnt) - seq_puts(file, " RX_WR"); seq_putc(file, '\n'); } @@ -143,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) if (!r_vec->nfp_net || !tx_ring) goto out; nn = r_vec->nfp_net; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; txd_cnt = tx_ring->cnt; @@ -151,8 +146,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); - seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", - tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); + seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", + tx_ring->idx, tx_ring->qcidx, + tx_ring == r_vec->tx_ring ? "" : "xdp", + tx_ring->cnt, &tx_ring->dma, tx_ring->txds, + tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); for (i = 0; i < txd_cnt; i++) { txd = &tx_ring->txds[i]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 2649f7523c81f11ddbb9c0b9bdba78dd220d7c6a..abbb47e60cc37e5ba6f3ac477e1b178d16c86bd9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -40,6 +40,7 @@ * Brad Petrus */ +#include #include #include #include @@ -48,6 +49,7 @@ #include #include "nfpcore/nfp.h" +#include "nfpcore/nfp_nsp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" @@ -126,9 +128,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = { }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) -#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) +#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3) #define NN_ET_RVEC_GATHER_STATS 7 -#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) +#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2) #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) @@ -172,6 +174,119 @@ static void nfp_net_get_drvinfo(struct net_device *netdev, drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; } +/** + * nfp_net_get_link_ksettings - Get Link Speed settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed settings based on info in the BAR provided by the fw. + */ +static int +nfp_net_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + static const u32 ls_to_ethtool[] = { + [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0, + [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN, + [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000, + [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000, + [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000, + [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000, + [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000, + [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000, + }; + struct nfp_net *nn = netdev_priv(netdev); + u32 sts, ls; + + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + cmd->base.port = PORT_OTHER; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (nn->eth_port) + cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (!netif_carrier_ok(netdev)) + return 0; + + /* Use link speed from ETH table if available, otherwise try the BAR */ + if (nn->eth_port) { + int err; + + if (nfp_net_link_changed_read_clear(nn)) { + err = nfp_net_refresh_eth_port(nn); + if (err) + return err; + } + + cmd->base.port = nn->eth_port->port_type; + cmd->base.speed = nn->eth_port->speed; + cmd->base.duplex = DUPLEX_FULL; + return 0; + } + + sts = nn_readl(nn, NFP_NET_CFG_STS); + + ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts); + if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED) + return -EOPNOTSUPP; + + if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN || + ls >= ARRAY_SIZE(ls_to_ethtool)) + return 0; + + cmd->base.speed = ls_to_ethtool[sts]; + cmd->base.duplex = DUPLEX_FULL; + + return 0; +} + +static int +nfp_net_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nfp_net *nn = netdev_priv(netdev); + struct nfp_nsp *nsp; + int err; + + if (!nn->eth_port) + return -EOPNOTSUPP; + + if (netif_running(netdev)) { + nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n"); + return -EBUSY; + } + + nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ? + NFP_ANEG_AUTO : NFP_ANEG_DISABLED); + if (err) + goto err_bad_set; + if (cmd->base.speed != SPEED_UNKNOWN) { + u32 speed = cmd->base.speed / nn->eth_port->lanes; + + err = __nfp_eth_set_speed(nsp, speed); + if (err) + goto err_bad_set; + } + + err = nfp_eth_config_commit_end(nsp); + if (err > 0) + return 0; /* no change */ + + nfp_net_refresh_port_table(nn); + + return err; + +err_bad_set: + nfp_eth_config_cleanup_end(nsp); + return err; +} + static void nfp_net_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { @@ -179,30 +294,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev, ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; - ring->rx_pending = nn->rxd_cnt; - ring->tx_pending = nn->txd_cnt; + ring->rx_pending = nn->dp.rxd_cnt; + ring->tx_pending = nn->dp.txd_cnt; } static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->rxd_cnt != rxd_cnt) - reconfig_rx = ℞ - if (nn->txd_cnt != txd_cnt) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + dp->rxd_cnt = rxd_cnt; + dp->txd_cnt = txd_cnt; + + return nfp_net_ring_reconfig(nn, dp, NULL); } static int nfp_net_set_ringparam(struct net_device *netdev, @@ -223,11 +330,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev, txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; - if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) + if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", - nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } @@ -245,7 +352,7 @@ static void nfp_net_get_strings(struct net_device *netdev, memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_r_vecs; i++) { + for (i = 0; i < nn->dp.num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); @@ -267,13 +374,13 @@ static void nfp_net_get_strings(struct net_device *netdev, p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; - for (i = 0; i < nn->num_tx_rings; i++) { + for (i = 0; i < nn->dp.num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_rx_rings; i++) { + for (i = 0; i < nn->dp.num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); @@ -306,12 +413,12 @@ static void nfp_net_get_stats(struct net_device *netdev, break; case NFP_NET_DEV_ET_STATS: - io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; + io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } - for (j = 0; j < nn->num_r_vecs; j++) { + for (j = 0; j < nn->dp.num_r_vecs; j++) { unsigned int start; do { @@ -337,16 +444,16 @@ static void nfp_net_get_stats(struct net_device *netdev, } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; - for (j = 0; j < nn->num_tx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); + for (j = 0; j < nn->dp.num_tx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } - for (j = 0; j < nn->num_rx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); + for (j = 0; j < nn->dp.num_rx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } @@ -410,7 +517,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = nn->num_rx_rings; + cmd->data = nn->dp.num_rx_rings; return 0; case ETHTOOL_GRXFH: return nfp_net_get_rss_hash_opts(nn, cmd); @@ -454,13 +561,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, return -EINVAL; } - new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; + new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc); new_rss_cfg |= NFP_NET_CFG_RSS_MASK; if (new_rss_cfg == nn->rss_cfg) return 0; - writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); + writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); if (err) return err; @@ -496,7 +603,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev) static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) { - return NFP_NET_CFG_RSS_KEY_SZ; + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + return nfp_net_rss_key_sz(nn); } static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, @@ -512,9 +624,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) indir[i] = nn->rss_itbl[i]; if (key) - memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn)); + if (hfunc) { + *hfunc = nn->rss_hfunc; + if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT) + *hfunc = ETH_RSS_HASH_UNKNOWN; + } return 0; } @@ -527,14 +642,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev, int i; if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) || - !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP)) + !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc)) return -EOPNOTSUPP; if (!key && !indir) return 0; if (key) { - memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ); + memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn)); nfp_net_rss_write_key(nn); } if (indir) { @@ -564,7 +679,7 @@ static void nfp_net_get_regs(struct net_device *netdev, regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) - regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); + regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32))); } static int nfp_net_get_coalesce(struct net_device *netdev, @@ -676,7 +791,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, ec->tx_coalesce_usecs_high || ec->tx_max_coalesced_frames_high || ec->rate_sample_interval) - return -ENOTSUPP; + return -EOPNOTSUPP; /* Compute factor used to convert coalesce '_usecs' parameters to * ME timestamp ticks. There are 16 ME clock cycles for each timestamp @@ -736,16 +851,16 @@ static void nfp_net_get_channels(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); unsigned int num_tx_rings; - num_tx_rings = nn->num_tx_rings; - if (nn->xdp_prog) - num_tx_rings -= nn->num_rx_rings; + num_tx_rings = nn->dp.num_tx_rings; + if (nn->dp.xdp_prog) + num_tx_rings -= nn->dp.num_rx_rings; channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs); channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs); channel->max_combined = min(channel->max_rx, channel->max_tx); channel->max_other = NFP_NET_NON_Q_VECTORS; - channel->combined_count = min(nn->num_rx_rings, num_tx_rings); - channel->rx_count = nn->num_rx_rings - channel->combined_count; + channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings); + channel->rx_count = nn->dp.num_rx_rings - channel->combined_count; channel->tx_count = num_tx_rings - channel->combined_count; channel->other_count = NFP_NET_NON_Q_VECTORS; } @@ -753,29 +868,19 @@ static void nfp_net_get_channels(struct net_device *netdev, static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx, unsigned int total_tx) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = total_rx, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = total_tx, - .dcnt = nn->txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->num_rx_rings != total_rx) - reconfig_rx = ℞ - if (nn->num_stack_tx_rings != total_tx || - (nn->xdp_prog && reconfig_rx)) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */ - if (nn->xdp_prog) - tx.n_rings += total_rx; + dp->num_rx_rings = total_rx; + dp->num_tx_rings = total_tx; + /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */ + if (dp->xdp_prog) + dp->num_tx_rings += total_rx; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + return nfp_net_ring_reconfig(nn, dp, NULL); } static int nfp_net_set_channels(struct net_device *netdev, @@ -823,6 +928,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, .set_channels = nfp_net_set_channels, + .get_link_ksettings = nfp_net_get_link_ksettings, + .set_link_ksettings = nfp_net_set_link_ksettings, }; void nfp_net_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 3afcdc11480c82c7d19f2252cae29a40066cfef8..8cb87cbe1120c73855321c220f47aea16876b3ca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -47,11 +47,12 @@ #include #include #include +#include #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nffw.h" -#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" #include "nfp_net_ctrl.h" @@ -129,61 +130,61 @@ static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp, return (u8 __iomem *)ERR_PTR(err); } +/** + * nfp_net_get_mac_addr() - Get the MAC address. + * @nn: NFP Network structure + * @cpp: NFP CPP handle + * @id: NFP port id + * + * First try to get the MAC address from NSP ETH table. If that + * fails try HWInfo. As a last resort generate a random address. + */ static void -nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, - unsigned int id) +nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id) { + struct nfp_net_dp *dp = &nn->dp; u8 mac_addr[ETH_ALEN]; const char *mac_str; char name[32]; + if (nn->eth_port) { + ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr); + ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr); + return; + } + snprintf(name, sizeof(name), "eth%d.mac", id); mac_str = nfp_hwinfo_lookup(cpp, name); if (!mac_str) { - dev_warn(&nn->pdev->dev, - "Can't lookup MAC address. Generate\n"); - eth_hw_addr_random(nn->netdev); + dev_warn(dp->dev, "Can't lookup MAC address. Generate\n"); + eth_hw_addr_random(dp->netdev); return; } if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { - dev_warn(&nn->pdev->dev, + dev_warn(dp->dev, "Can't parse MAC address (%s). Generate.\n", mac_str); - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(dp->netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(dp->netdev->dev_addr, mac_addr); + ether_addr_copy(dp->netdev->perm_addr, mac_addr); } -/** - * nfp_net_get_mac_addr() - Get the MAC address. - * @nn: NFP Network structure - * @pf: NFP PF device structure - * @id: NFP port id - * - * First try to get the MAC address from NSP ETH table. If that - * fails try HWInfo. As a last resort generate a random address. - */ -static void -nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id) +static struct nfp_eth_table_port * +nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id) { int i; - for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++) - if (pf->eth_tbl->ports[i].eth_index == id) { - const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr; + for (i = 0; eth_tbl && i < eth_tbl->count; i++) + if (eth_tbl->ports[i].eth_index == id) + return ð_tbl->ports[i]; - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); - return; - } - - nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); + return NULL; } static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) @@ -282,6 +283,7 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) while (!list_empty(&pf->ports)) { nn = list_first_entry(&pf->ports, struct nfp_net, port_list); list_del(&nn->port_list); + pf->num_netdevs--; nfp_net_netdev_free(nn); } @@ -290,7 +292,8 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) static struct nfp_net * nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, void __iomem *tx_bar, void __iomem *rx_bar, - int stride, struct nfp_net_fw_version *fw_ver) + int stride, struct nfp_net_fw_version *fw_ver, + struct nfp_eth_table_port *eth_port) { u32 n_tx_rings, n_rx_rings; struct nfp_net *nn; @@ -305,12 +308,13 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, nn->cpp = pf->cpp; nn->fw_ver = *fw_ver; - nn->ctrl_bar = ctrl_bar; + nn->dp.ctrl_bar = ctrl_bar; nn->tx_bar = tx_bar; nn->rx_bar = rx_bar; - nn->is_vf = 0; + nn->dp.is_vf = 0; nn->stride_rx = stride; nn->stride_tx = stride; + nn->eth_port = eth_port; return nn; } @@ -322,7 +326,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, int err; /* Get MAC address */ - nfp_net_get_mac_addr(nn, pf, id); + nfp_net_get_mac_addr(nn, pf->cpp, id); /* Get ME clock frequency from ctrl BAR * XXX for now frequency is hardcoded until we figure out how @@ -330,7 +334,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) return err; @@ -347,6 +351,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, int stride, struct nfp_net_fw_version *fw_ver) { u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; + struct nfp_eth_table_port *eth_port; struct nfp_net *nn; unsigned int i; int err; @@ -362,17 +367,27 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, prev_tx_base = tgt_tx_base; prev_rx_base = tgt_rx_base; - nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar, - stride, fw_ver); - if (IS_ERR(nn)) { - err = PTR_ERR(nn); - goto err_free_prev; + eth_port = nfp_net_find_port(pf->eth_tbl, i); + if (eth_port && eth_port->override_changed) { + nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i); + } else { + nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, + rx_bar, stride, + fw_ver, eth_port); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_free_prev; + } + list_add_tail(&nn->port_list, &pf->ports); + pf->num_netdevs++; } - list_add_tail(&nn->port_list, &pf->ports); ctrl_bar += NFP_PF_CSR_SLICE_SIZE; } + if (list_empty(&pf->ports)) + return -ENODEV; + return 0; err_free_prev: @@ -399,7 +414,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, /* Get MSI-X vectors */ wanted_irqs = 0; list_for_each_entry(nn, &pf->ports, port_list) - wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; + wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), GFP_KERNEL); if (!pf->irq_entries) { @@ -408,7 +423,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, } num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, - NFP_NET_MIN_PORT_IRQS * pf->num_ports, + NFP_NET_MIN_PORT_IRQS * pf->num_netdevs, wanted_irqs); if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); @@ -418,7 +433,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, /* Distribute IRQs to ports */ irqs_left = num_irqs; - ports_left = pf->num_ports; + ports_left = pf->num_netdevs; list_for_each_entry(nn, &pf->ports, port_list) { unsigned int n; @@ -444,7 +459,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, err_prev_deinit: list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_irqs_disable(pf->pdev); err_vec_free: @@ -454,6 +469,108 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, return err; } +static void nfp_net_pci_remove_finish(struct nfp_pf *pf) +{ + nfp_net_debugfs_dir_clean(&pf->ddir); + + nfp_net_irqs_disable(pf->pdev); + kfree(pf->irq_entries); + + nfp_cpp_area_release_free(pf->rx_area); + nfp_cpp_area_release_free(pf->tx_area); + nfp_cpp_area_release_free(pf->ctrl_area); +} + +static void nfp_net_refresh_netdevs(struct work_struct *work) +{ + struct nfp_pf *pf = container_of(work, struct nfp_pf, + port_refresh_work); + struct nfp_eth_table *eth_table; + struct nfp_net *nn, *next; + + mutex_lock(&pf->port_lock); + + /* Check for nfp_net_pci_remove() racing against us */ + if (list_empty(&pf->ports)) + goto out; + + list_for_each_entry(nn, &pf->ports, port_list) + nfp_net_link_changed_read_clear(nn); + + eth_table = nfp_eth_read_ports(pf->cpp); + if (!eth_table) { + nfp_err(pf->cpp, "Error refreshing port config!\n"); + goto out; + } + + rtnl_lock(); + list_for_each_entry(nn, &pf->ports, port_list) { + if (!nn->eth_port) + continue; + nn->eth_port = nfp_net_find_port(eth_table, + nn->eth_port->eth_index); + } + rtnl_unlock(); + + kfree(pf->eth_tbl); + pf->eth_tbl = eth_table; + + list_for_each_entry_safe(nn, next, &pf->ports, port_list) { + if (!nn->eth_port) { + nfp_warn(pf->cpp, "Warning: port not present after reconfig\n"); + continue; + } + if (!nn->eth_port->override_changed) + continue; + + nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n"); + + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_netdev_clean(nn->dp.netdev); + + list_del(&nn->port_list); + pf->num_netdevs--; + nfp_net_netdev_free(nn); + } + + if (list_empty(&pf->ports)) + nfp_net_pci_remove_finish(pf); +out: + mutex_unlock(&pf->port_lock); +} + +void nfp_net_refresh_port_table(struct nfp_net *nn) +{ + struct nfp_pf *pf = pci_get_drvdata(nn->pdev); + + schedule_work(&pf->port_refresh_work); +} + +int nfp_net_refresh_eth_port(struct nfp_net *nn) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_eth_table *eth_table; + + eth_table = nfp_eth_read_ports(nn->cpp); + if (!eth_table) { + nn_err(nn, "Error refreshing port state table!\n"); + return -EIO; + } + + eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index); + if (!eth_port) { + nn_err(nn, "Error finding state of the port!\n"); + kfree(eth_table); + return -EIO; + } + + memcpy(nn->eth_port, eth_port, sizeof(*eth_port)); + + kfree(eth_table); + + return 0; +} + /* * PCI device functions */ @@ -467,17 +584,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf) int stride; int err; + INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs); + mutex_init(&pf->port_lock); + /* Verify that the board has completed initialization */ if (!nfp_is_ready(pf->cpp)) { nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); return -EINVAL; } + mutex_lock(&pf->port_lock); pf->num_ports = nfp_net_pf_get_num_ports(pf); ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); - if (!ctrl_bar) - return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + if (!ctrl_bar) { + err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + goto err_unlock; + } nfp_net_get_fw_version(&fw_ver, ctrl_bar); if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { @@ -551,6 +674,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_clean_ddir; + mutex_unlock(&pf->port_lock); + return 0; err_clean_ddir: @@ -560,6 +685,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) nfp_cpp_area_release_free(pf->tx_area); err_ctrl_unmap: nfp_cpp_area_release_free(pf->ctrl_area); +err_unlock: + mutex_unlock(&pf->port_lock); return err; } @@ -567,20 +694,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf) { struct nfp_net *nn; + mutex_lock(&pf->port_lock); + if (list_empty(&pf->ports)) + goto out; + list_for_each_entry(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_pf_free_netdevs(pf); - nfp_net_debugfs_dir_clean(&pf->ddir); - - nfp_net_irqs_disable(pf->pdev); - kfree(pf->irq_entries); + nfp_net_pci_remove_finish(pf); +out: + mutex_unlock(&pf->port_lock); - nfp_cpp_area_release_free(pf->rx_area); - nfp_cpp_area_release_free(pf->tx_area); - nfp_cpp_area_release_free(pf->ctrl_area); + cancel_work_sync(&pf->port_refresh_work); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c index 18a851eb35084397dd6fa003b3def76ed42a6960..cc823df12c8a83b3ef6f2f33f9c53a062d45be71 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c @@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data) spin_lock_bh(&nn->rx_filter_lock); - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -119,12 +119,12 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) if (tc_no_actions(cls_bpf->exts)) return NN_ACT_DIRECT; - return -ENOTSUPP; + return -EOPNOTSUPP; } /* TC legacy mode */ if (!tc_single_action(cls_bpf->exts)) - return -ENOTSUPP; + return -EOPNOTSUPP; tcf_exts_to_list(cls_bpf->exts, &actions); list_for_each_entry(a, &actions, list) { @@ -132,11 +132,11 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return NN_ACT_TC_DROP; if (is_tcf_mirred_egress_redirect(a) && - tcf_mirred_ifindex(a) == nn->netdev->ifindex) + tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) return NN_ACT_TC_REDIR; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int @@ -152,7 +152,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, int ret; if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) - return -ENOTSUPP; + return -EOPNOTSUPP; ret = nfp_net_bpf_get_act(nn, cls_bpf); if (ret < 0) @@ -160,16 +160,15 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, act = ret; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; - if (max_mtu < nn->netdev->mtu) { + if (max_mtu < nn->dp.netdev->mtu) { nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); - *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr, - GFP_KERNEL); + *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); if (!*code) return -ENOMEM; @@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, return 0; out: - dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); return ret; } @@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, u64 bpf_addr = dma_addr; int err; - nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); + nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); if (dense_mode) bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; @@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, nn_err(nn, "FW command error while loading BPF: %d\n", err); /* Enable passing packets through BPF function */ - nn->ctrl |= NFP_NET_CFG_CTRL_BPF; - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) nn_err(nn, "FW command error while enabling BPF: %d\n", err); - dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); nfp_net_bpf_stats_reset(nn); mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, static int nfp_net_bpf_stop(struct nfp_net *nn) { - if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) return 0; spin_lock_bh(&nn->rx_filter_lock); - nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF; + nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; spin_unlock_bh(&nn->rx_filter_lock); - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); del_timer_sync(&nn->rx_filter_stats_timer); - nn->bpf_offload_skip_sw = 0; + nn->dp.bpf_offload_skip_sw = 0; return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); } @@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) * frames which didn't have BPF applied in the hardware should * be fine if software fallback is available, though. */ - if (nn->bpf_offload_skip_sw) + if (nn->dp.bpf_offload_skip_sw) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, @@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return 0; case TC_CLSBPF_ADD: - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, @@ -290,6 +289,6 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return nfp_net_bpf_stats_update(nn, cls_bpf); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 39407f7cc586c948319b963aae3fb455a7a6cb8f..86e61be6f35c11f8c7b932844b15c989cd6db079 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn) put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); if (!is_valid_ether_addr(mac_addr)) { - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(nn->dp.netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } static int nfp_netvf_pci_probe(struct pci_dev *pdev, @@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, vf->nn = nn; nn->fw_ver = fw_ver; - nn->ctrl_bar = ctrl_bar; - nn->is_vf = 1; + nn->dp.ctrl_bar = ctrl_bar; + nn->dp.is_vf = 1; nn->stride_tx = stride; nn->stride_rx = stride; @@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, NFP_NET_MIN_PORT_IRQS, - NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); + NFP_NET_NON_Q_VECTORS + + nn->dp.num_r_vecs); if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); err = -EIO; @@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) goto err_irqs_disable; @@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&vf->ddir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); nfp_net_irqs_disable(pdev); @@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) } else { iounmap(vf->q_bar); } - iounmap(nn->ctrl_bar); + iounmap(nn->dp.ctrl_bar); nfp_net_netdev_free(nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 42cb720b696d17b6dfb580a145237245a8217412..4df2ce261b3f786e4871b14f9090038fab91daae 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -48,32 +48,26 @@ const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup); -/* Implemented in nfp_nsp.c */ +/* Implemented in nfp_nsp.c, low level functions */ struct nfp_nsp; -struct firmware; - -struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); -void nfp_nsp_close(struct nfp_nsp *state); -u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); -u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); -int nfp_nsp_wait(struct nfp_nsp *state); -int nfp_nsp_device_soft_reset(struct nfp_nsp *state); -int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); + +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state); +bool nfp_nsp_config_modified(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); /* Implemented in nfp_resource.c */ -#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU -#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL - -/* NFP Resource Table self-identifier */ -#define NFP_RESOURCE_TBL_NAME "nfp.res" -#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ - -/* All other keys are CRC32-POSIX of the 8-byte identification string */ +/* All keys are CRC32-POSIX of the 8-byte identification string */ /* ARM/PCI vNIC Interfaces 0..3 */ #define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 15cc3e77cf6acddfec0afe8906e2a61f3f958aa5..43dc68e01274225c79fea7760ac303a8a2409a85 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar) #define TARGET_WIDTH_64 8 static int -compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, +compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar, u32 *bar_config, u64 *bar_base, int tgt, int act, int tok, u64 offset, size_t size, int width) { @@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp, /* Return EAGAIN if no resource is available */ static int -find_unused_bar_noblock(struct nfp6000_pcie *nfp, +find_unused_bar_noblock(const struct nfp6000_pcie *nfp, int tgt, int act, int tok, u64 offset, size_t size, int width) { - int n, invalid = 0; + int n, busy = 0; for (n = 0; n < nfp->bars; n++) { - struct nfp_bar *bar = &nfp->bar[n]; + const struct nfp_bar *bar = &nfp->bar[n]; int err; - if (bar->bitsize == 0) { - invalid++; - continue; - } - - if (atomic_read(&bar->refcnt) != 0) + if (!bar->bitsize) continue; /* Just check to see if we can make it fit... */ err = compute_bar(nfp, bar, NULL, NULL, tgt, act, tok, offset, size, width); + if (err) + continue; - if (err < 0) - invalid++; - else + if (!atomic_read(&bar->refcnt)) return n; + + busy++; } - return (n == invalid) ? -EINVAL : -EAGAIN; + if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n", + tgt, act, tok, offset, size, width)) + return -EINVAL; + + return -EAGAIN; } static int diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 40108e66c65480fcf8e002379bf68ec948cd22ee..e2abba4c3a3fd84225f443d82e61454d41093819 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -65,39 +65,49 @@ struct nfp_cpp_resource { u64 end; }; -struct nfp_cpp_mutex { - struct list_head list; - struct nfp_cpp *cpp; - int target; - u16 usage; - u16 depth; - unsigned long long address; - u32 key; -}; - +/** + * struct nfp_cpp - main nfpcore device structure + * Following fields are read-only after probe() exits or netdevs are spawned. + * @dev: embedded device structure + * @op: low-level implementation ops + * @priv: private data of the low-level implementation + * @model: chip model + * @interface: chip interface id we are using to reach it + * @serial: chip serial number + * @imb_cat_table: CPP Mapping Table + * + * Following fields can be used only in probe() or with rtnl held: + * @hwinfo: HWInfo database fetched from the device + * @rtsym: firmware run time symbols + * + * Following fields use explicit locking: + * @resource_list: NFP CPP resource list + * @resource_lock: protects @resource_list + * + * @area_cache_list: cached areas for cpp/xpb read/write speed up + * @area_cache_mutex: protects @area_cache_list + * + * @waitq: area wait queue + */ struct nfp_cpp { struct device dev; - void *priv; /* Private data of the low-level implementation */ + void *priv; u32 model; u16 interface; u8 serial[NFP_SERIAL_LEN]; const struct nfp_cpp_operations *op; - struct list_head resource_list; /* NFP CPP resource list */ - struct list_head mutex_cache; /* Mutex cache */ + struct list_head resource_list; rwlock_t resource_lock; wait_queue_head_t waitq; - /* NFP6000 CPP Mapping Table */ u32 imb_cat_table[16]; - /* Cached areas for cpp/xpb readl/writel speedups */ - struct mutex area_cache_mutex; /* Lock for the area cache */ + struct mutex area_cache_mutex; struct list_head area_cache_list; - /* Cached information */ void *hwinfo; void *rtsym; }; @@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp) { struct nfp_cpp_area_cache *cache, *ctmp; struct nfp_cpp_resource *res, *rtmp; - struct nfp_cpp_mutex *mutex, *mtmp; - - /* There should be no mutexes in the cache at this point. */ - WARN_ON(!list_empty(&cpp->mutex_cache)); - /* .. but if there are, unlock them and complain. */ - list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) { - dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n", - mutex->target, (unsigned long long)mutex->address, - mutex->depth, mutex->usage); - - /* Forcing an unlock */ - mutex->depth = 1; - nfp_cpp_mutex_unlock(mutex); - - /* Forcing a free */ - mutex->usage = 1; - nfp_cpp_mutex_free(mutex); - } /* Remove all caches */ list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { @@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, */ void nfp_cpp_area_free(struct nfp_cpp_area *area) { + if (atomic_read(&area->refcount)) + nfp_warn(area->cpp, "Warning: freeing busy area\n"); nfp_cpp_area_put(area); } +static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status) +{ + *status = area->cpp->op->area_acquire(area); + + return *status != -EAGAIN; +} + +static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + int err, status; + + if (atomic_inc_return(&area->refcount) > 1) + return 0; + + if (!area->cpp->op->area_acquire) + return 0; + + err = wait_event_interruptible(area->cpp->waitq, + nfp_cpp_area_acquire_try(area, &status)); + if (!err) + err = status; + if (err) { + nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err); + atomic_dec(&area->refcount); + return err; + } + + nfp_cpp_area_get(area); + + return 0; +} + /** * nfp_cpp_area_acquire() - lock down a CPP area for access * @area: CPP area handle @@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area) */ int nfp_cpp_area_acquire(struct nfp_cpp_area *area) { - mutex_lock(&area->mutex); - if (atomic_inc_return(&area->refcount) == 1) { - int (*a_a)(struct nfp_cpp_area *); - - a_a = area->cpp->op->area_acquire; - if (a_a) { - int err; + int ret; - wait_event_interruptible(area->cpp->waitq, - (err = a_a(area)) != -EAGAIN); - if (err < 0) { - atomic_dec(&area->refcount); - mutex_unlock(&area->mutex); - return err; - } - } - } + mutex_lock(&area->mutex); + ret = __nfp_cpp_area_acquire(area); mutex_unlock(&area->mutex); - nfp_cpp_area_get(area); - return 0; + return ret; } /** @@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, * the need for special case code below when * checking against available cache size. */ - if (length == 0) - return NULL; - - if (list_empty(&cpp->area_cache_list) || id == 0) + if (length == 0 || id == 0) return NULL; /* Remap from cpp_island to cpp_target */ @@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, if (err < 0) return NULL; - addr += *offset; - mutex_lock(&cpp->area_cache_mutex); + if (list_empty(&cpp->area_cache_list)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + addr += *offset; + /* See if we have a match */ list_for_each_entry(cache, &cpp->area_cache_list, entry) { if (id == cache->id && @@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, return -ENOMEM; err = nfp_cpp_area_acquire(area); - if (err) - goto out; + if (err) { + nfp_cpp_area_free(area); + return err; + } } err = nfp_cpp_area_read(area, offset, kernel_vaddr, length); -out: + if (cache) area_cache_put(cpp, cache); else @@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, return -ENOMEM; err = nfp_cpp_area_acquire(area); - if (err) - goto out; + if (err) { + nfp_cpp_area_free(area); + return err; + } } err = nfp_cpp_area_write(area, offset, kernel_vaddr, length); -out: if (cache) area_cache_put(cpp, cache); else @@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); - INIT_LIST_HEAD(&cpp->mutex_cache); INIT_LIST_HEAD(&cpp->resource_list); INIT_LIST_HEAD(&cpp->area_cache_list); mutex_init(&cpp->area_cache_mutex); @@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit) { return &cpp_explicit[1]; } - -/* THIS FUNCTION IS NOT EXPORTED */ -static u32 nfp_mutex_locked(u16 interface) -{ - return (u32)interface << 16 | 0x000f; -} - -static u32 nfp_mutex_unlocked(u16 interface) -{ - return (u32)interface << 16 | 0x0000; -} - -static bool nfp_mutex_is_locked(u32 val) -{ - return (val & 0xffff) == 0x000f; -} - -static bool nfp_mutex_is_unlocked(u32 val) -{ - return (val & 0xffff) == 0000; -} - -/* If you need more than 65536 recursive locks, please rethink your code. */ -#define MUTEX_DEPTH_MAX 0xffff - -static int -nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) -{ - /* Not permitted on invalid interfaces */ - if (NFP_CPP_INTERFACE_TYPE_of(interface) == - NFP_CPP_INTERFACE_TYPE_INVALID) - return -EINVAL; - - /* Address must be 64-bit aligned */ - if (address & 7) - return -EINVAL; - - if (*target != NFP_CPP_TARGET_MU) - return -EINVAL; - - return 0; -} - -/** - * nfp_cpp_mutex_init() - Initialize a mutex location - * @cpp: NFP CPP handle - * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) - * @address: Offset into the address space of the NFP CPP target ID - * @key: Unique 32-bit value for this mutex - * - * The CPP target:address must point to a 64-bit aligned location, and - * will initialize 64 bits of data at the location. - * - * This creates the initial mutex state, as locked by this - * nfp_cpp_interface(). - * - * This function should only be called when setting up - * the initial lock state upon boot-up of the system. - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_init(struct nfp_cpp *cpp, - int target, unsigned long long address, u32 key) -{ - const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ - u16 interface = nfp_cpp_interface(cpp); - int err; - - err = nfp_cpp_mutex_validate(interface, &target, address); - if (err) - return err; - - err = nfp_cpp_writel(cpp, muw, address + 4, key); - if (err) - return err; - - err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); - if (err) - return err; - - return 0; -} - -/** - * nfp_cpp_mutex_alloc() - Create a mutex handle - * @cpp: NFP CPP handle - * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) - * @address: Offset into the address space of the NFP CPP target ID - * @key: 32-bit unique key (must match the key at this location) - * - * The CPP target:address must point to a 64-bit aligned location, and - * reserve 64 bits of data at the location for use by the handle. - * - * Only target/address pairs that point to entities that support the - * MU Atomic Engine's CmpAndSwap32 command are supported. - * - * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. - */ -struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, - unsigned long long address, u32 key) -{ - const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ - u16 interface = nfp_cpp_interface(cpp); - struct nfp_cpp_mutex *mutex; - int err; - u32 tmp; - - err = nfp_cpp_mutex_validate(interface, &target, address); - if (err) - return NULL; - - /* Look for mutex on cache list */ - list_for_each_entry(mutex, &cpp->mutex_cache, list) { - if (mutex->target == target && mutex->address == address) { - mutex->usage++; - return mutex; - } - } - - err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); - if (err < 0) - return NULL; - - if (tmp != key) - return NULL; - - mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); - if (!mutex) - return NULL; - - mutex->cpp = cpp; - mutex->target = target; - mutex->address = address; - mutex->key = key; - mutex->depth = 0; - mutex->usage = 1; - - /* Add mutex to cache list */ - list_add(&mutex->list, &cpp->mutex_cache); - - return mutex; -} - -/** - * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state - * @mutex: NFP CPP Mutex handle - */ -void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) -{ - if (--mutex->usage) - return; - - /* Remove mutex from cache */ - list_del(&mutex->list); - kfree(mutex); -} - -/** - * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine - * @mutex: NFP CPP Mutex handle - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) -{ - unsigned long warn_at = jiffies + 15 * HZ; - unsigned int timeout_ms = 1; - int err; - - /* We can't use a waitqueue here, because the unlocker - * might be on a separate CPU. - * - * So just wait for now. - */ - for (;;) { - err = nfp_cpp_mutex_trylock(mutex); - if (err != -EBUSY) - break; - - err = msleep_interruptible(timeout_ms); - if (err != 0) - return -ERESTARTSYS; - - if (time_is_before_eq_jiffies(warn_at)) { - warn_at = jiffies + 60 * HZ; - dev_warn(mutex->cpp->dev.parent, - "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n", - mutex->usage, mutex->depth, - mutex->target, mutex->address, mutex->key); - } - } - - return err; -} - -/** - * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine - * @mutex: NFP CPP Mutex handle - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) -{ - const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - struct nfp_cpp *cpp = mutex->cpp; - u32 key, value; - u16 interface; - int err; - - interface = nfp_cpp_interface(cpp); - - if (mutex->depth > 1) { - mutex->depth--; - return 0; - } - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); - if (err < 0) - return err; - - if (key != mutex->key) - return -EPERM; - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); - if (err < 0) - return err; - - if (value != nfp_mutex_locked(interface)) - return -EACCES; - - err = nfp_cpp_writel(cpp, muw, mutex->address, - nfp_mutex_unlocked(interface)); - if (err < 0) - return err; - - mutex->depth = 0; - return 0; -} - -/** - * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle - * @mutex: NFP CPP Mutex handle - * - * Return: 0 if the lock succeeded, -errno on failure - */ -int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) -{ - const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ - const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - struct nfp_cpp *cpp = mutex->cpp; - u32 key, value, tmp; - int err; - - if (mutex->depth > 0) { - if (mutex->depth == MUTEX_DEPTH_MAX) - return -E2BIG; - mutex->depth++; - return 0; - } - - /* Verify that the lock marker is not damaged */ - err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); - if (err < 0) - return err; - - if (key != mutex->key) - return -EPERM; - - /* Compare against the unlocked state, and if true, - * write the interface id into the top 16 bits, and - * mark as locked. - */ - value = nfp_mutex_locked(nfp_cpp_interface(cpp)); - - /* We use test_set_imm here, as it implies a read - * of the current state, and sets the bits in the - * bytemask of the command to 1s. Since the mutex - * is guaranteed to be 64-bit aligned, the bytemask - * of this 32-bit command is ensured to be 8'b00001111, - * which implies that the lower 4 bits will be set to - * ones regardless of the initial state. - * - * Since this is a 'Readback' operation, with no Pull - * data, we can treat this as a normal Push (read) - * atomic, which returns the original value. - */ - err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); - if (err < 0) - return err; - - /* Was it unlocked? */ - if (nfp_mutex_is_unlocked(tmp)) { - /* The read value can only be 0x....0000 in the unlocked state. - * If there was another contending for this lock, then - * the lock state would be 0x....000f - */ - - /* Write our owner ID into the lock - * While not strictly necessary, this helps with - * debug and bookkeeping. - */ - err = nfp_cpp_writel(cpp, muw, mutex->address, value); - if (err < 0) - return err; - - mutex->depth = 1; - return 0; - } - - /* Already locked by us? Success! */ - if (tmp == value) { - mutex->depth = 1; - return 0; - } - - return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; -} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 0000000000000000000000000000000000000000..8a99c189efa8c3165a12ead6a4d4e44e1eb29805 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + int target; + u16 depth; + unsigned long long address; + u32 key; +}; + +static u32 nfp_mutex_locked(u16 interface) +{ + return (u32)interface << 16 | 0x000f; +} + +static u32 nfp_mutex_unlocked(u16 interface) +{ + return (u32)interface << 16 | 0x0000; +} + +static bool nfp_mutex_is_locked(u32 val) +{ + return (val & 0xffff) == 0x000f; +} + +static bool nfp_mutex_is_unlocked(u32 val) +{ + return (val & 0xffff) == 0000; +} + +/* If you need more than 65536 recursive locks, please rethink your code. */ +#define NFP_MUTEX_DEPTH_MAX 0xffff + +static int +nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) +{ + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == + NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + + /* Address must be 64-bit aligned */ + if (address & 7) + return -EINVAL; + + if (*target != NFP_CPP_TARGET_MU) + return -EINVAL; + + return 0; +} + +/** + * nfp_cpp_mutex_init() - Initialize a mutex location + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: Unique 32-bit value for this mutex + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, unsigned long long address, u32 key) +{ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); + if (err) + return err; + + return 0; +} + +/** + * nfp_cpp_mutex_alloc() - Create a mutex handle + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: 32-bit unique key (must match the key at this location) + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine's CmpAndSwap32 command are supported. + * + * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + u16 interface = nfp_cpp_interface(cpp); + struct nfp_cpp_mutex *mutex; + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NULL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + + return mutex; +} + +/** + * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state + * @mutex: NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + kfree(mutex); +} + +/** + * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + unsigned long warn_at = jiffies + 15 * HZ; + unsigned int timeout_ms = 1; + int err; + + /* We can't use a waitqueue here, because the unlocker + * might be on a separate CPU. + * + * So just wait for now. + */ + for (;;) { + err = nfp_cpp_mutex_trylock(mutex); + if (err != -EBUSY) + break; + + err = msleep_interruptible(timeout_ms); + if (err != 0) + return -ERESTARTSYS; + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + nfp_warn(mutex->cpp, + "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n", + mutex->depth, + mutex->target, mutex->address, mutex->key); + } + } + + return err; +} + +/** + * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value; + u16 interface; + int err; + + interface = nfp_cpp_interface(cpp); + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; + + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + mutex->depth = 0; + return 0; +} + +/** + * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle + * @mutex: NFP CPP Mutex handle + * + * Return: 0 if the lock succeeded, -errno on failure + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value, tmp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == NFP_MUTEX_DEPTH_MAX) + return -E2BIG; + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + /* Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); + + /* We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + return err; + + /* Was it unlocked? */ + if (nfp_mutex_is_unlocked(tmp)) { + /* The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + */ + + /* Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + return err; + + mutex->depth = 1; + return 0; + } + + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 34c50987c377c6e721d82621b51702457a629781..2fa9247bb23db41e406c2243eee6d32fab06e372 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -49,6 +49,7 @@ #include "nfp.h" #include "nfp_cpp.h" +#include "nfp_nsp.h" /* Offsets relative to the CSR base */ #define NSP_STATUS 0x00 @@ -77,7 +78,7 @@ #define NSP_MAGIC 0xab10 #define NSP_MAJOR 0 -#define NSP_MINOR (__MAX_SPCODE - 1) +#define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) #define NSP_CODE_MINOR GENMASK(11, 0) @@ -92,8 +93,18 @@ enum nfp_nsp_cmd { SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ +}; - __MAX_SPCODE, +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, }; struct nfp_nsp { @@ -103,8 +114,63 @@ struct nfp_nsp { u16 major; u16 minor; } ver; + + /* Eth table config state */ + bool modified; + unsigned int idx; + void *entries; }; +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) +{ + return state->cpp; +} + +bool nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified) +{ + state->modified = modified; +} + +void *nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < ARRAY_SIZE(nsp_errors); i++) + if (ret_val == nsp_errors[i].code) + nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg); +} + static int nfp_nsp_check(struct nfp_nsp *state) { struct nfp_cpp *cpp = state->cpp; @@ -209,9 +275,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, if ((*reg & mask) == val) return 0; - err = msleep_interruptible(100); - if (err) - return err; + if (msleep_interruptible(25)) + return -ERESTARTSYS; if (time_after(start_time, wait_until)) return -ETIMEDOUT; @@ -228,7 +293,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, * * Return: 0 for success with no result * - * 1..255 for NSP completion with a result code + * positive value for NSP completion with a result code * * -EAGAIN if the NSP is not yet present * -ENODEV if the NSP is not a supported model @@ -239,7 +304,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, u64 buff_addr) { - u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command; + u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; u32 nsp_cpp; int err; @@ -292,18 +357,20 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, return err; } + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { - nfp_warn(cpp, "Result (error) code set: %d command: %d\n", - -err, code); + nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, code); + nfp_nsp_print_extended_error(state, ret_val); return -err; } - err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, ®); - if (err < 0) - return err; - - return FIELD_GET(NSP_COMMAND_OPTION, reg); + return ret_val; } static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, @@ -380,9 +447,10 @@ int nfp_nsp_wait(struct nfp_nsp *state) if (err != -EAGAIN) break; - err = msleep_interruptible(100); - if (err) + if (msleep_interruptible(25)) { + err = -ERESTARTSYS; break; + } if (time_after(start_time, wait_until)) { err = -ETIMEDOUT; @@ -424,3 +492,9 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state, return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, NULL, 0); } + +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, + buf, size); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 0000000000000000000000000000000000000000..36b21e4dc56d3fa8d48547b9f1ba1d3de5316d0f --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include +#include + +struct firmware; +struct nfp_cpp; +struct nfp_nsp; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @interface: interface (module) plugged in + * @media: media type of the @interface + * @aneg: auto negotiation mode + * @mac_addr: interface MAC address + * @label_port: port id + * @label_subport: id of interface within port (for split ports) + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + * @override_changed: is media reconfig pending? + * + * @port_type: one of %PORT_* defines for ethtool + * @is_split: is interface part of a split port + */ +struct nfp_eth_table { + unsigned int count; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_aneg aneg; + + u8 mac_addr[ETH_ALEN]; + + u8 label_port; + u8 label_subport; + + bool enabled; + bool tx_enabled; + bool rx_enabled; + + bool override_changed; + + /* Computed fields */ + u8 port_type; + + bool is_split; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + bool configed); + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + */ +struct nfp_nsp_identify { + char version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + u16 primary; + u16 secondary; + u16 nsp; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c similarity index 54% rename from drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h rename to drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c index edf703d319c8a9c7d98386b74237017b38d795cc..e7a263de3731db29d76ec3ba2141762a81ea4ac2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Netronome Systems, Inc. + * Copyright (C) 2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,51 +31,59 @@ * SOFTWARE. */ -#ifndef NSP_NSP_ETH_H -#define NSP_NSP_ETH_H 1 +#include +#include -#include -#include +#include "nfp.h" +#include "nfp_nsp.h" -/** - * struct nfp_eth_table - ETH table information - * @count: number of table entries - * @ports: table of ports - * - * @eth_index: port index according to legacy ethX numbering - * @index: chip-wide first channel index - * @nbi: NBI index - * @base: first channel index (within NBI) - * @lanes: number of channels - * @speed: interface speed (in Mbps) - * @mac_addr: interface MAC address - * @label: interface id string - * @enabled: is enabled? - * @tx_enabled: is TX enabled? - * @rx_enabled: is RX enabled? - */ -struct nfp_eth_table { - unsigned int count; - struct nfp_eth_table_port { - unsigned int eth_index; - unsigned int index; - unsigned int nbi; - unsigned int base; - unsigned int lanes; - unsigned int speed; +struct nsp_identify { + u8 version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + __le16 primary; + __le16 secondary; + __le16 nsp; + __le16 reserved; +}; - u8 mac_addr[ETH_ALEN]; - char label[8]; +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; - bool enabled; - bool tx_enabled; - bool rx_enabled; - } ports[0]; -}; + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = kzalloc(sizeof(*ni), GFP_KERNEL); + if (!ni) + return NULL; + + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = kzalloc(sizeof(*nspi), GFP_KERNEL); + if (!nspi) + goto exit_free; -struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); -struct nfp_eth_table * -__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); -int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = le16_to_cpu(ni->primary); + nspi->secondary = le16_to_cpu(ni->secondary); + nspi->nsp = le16_to_cpu(ni->nsp); -#endif +exit_free: + kfree(ni); + return nspi; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 1ece1f8ae4b30c0c74a7f630487749d91d5b5620..639438d8313afc558d53233b6da7ab20c26417c0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -43,13 +43,13 @@ #include #include "nfp.h" -#include "nfp_nsp_eth.h" +#include "nfp_nsp.h" #include "nfp6000/nfp6000.h" #define NSP_ETH_NBI_PORT_COUNT 24 #define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) #define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ - sizeof(struct eth_table_entry)) + sizeof(union eth_table_entry)) #define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) #define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) @@ -58,14 +58,32 @@ #define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) #define NSP_ETH_STATE_ENABLED BIT_ULL(1) #define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) #define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) #define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) #define NSP_ETH_CTRL_ENABLED BIT_ULL(1) #define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) #define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; enum nfp_eth_rate { RATE_INVALID = 0, @@ -76,29 +94,49 @@ enum nfp_eth_rate { RATE_25G, }; -struct eth_table_entry { - __le64 port; - __le64 state; - u8 mac_addr[6]; - u8 resv[2]; - __le64 control; +union eth_table_entry { + struct { + __le64 port; + __le64 state; + u8 mac_addr[6]; + u8 resv[2]; + __le64 control; + }; + __le64 raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, }; -static unsigned int nfp_eth_rate(enum nfp_eth_rate rate) +static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate) { - unsigned int rate_xlate[] = { - [RATE_INVALID] = 0, - [RATE_10M] = SPEED_10, - [RATE_100M] = SPEED_100, - [RATE_1G] = SPEED_1000, - [RATE_10G] = SPEED_10000, - [RATE_25G] = SPEED_25000, - }; + int i; - if (rate >= ARRAY_SIZE(rate_xlate)) - return 0; + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; - return rate_xlate[rate]; + return RATE_INVALID; } static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) @@ -110,8 +148,8 @@ static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) } static void -nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, - struct nfp_eth_table_port *dst) +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) { unsigned int rate; u64 port, state; @@ -129,14 +167,60 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); - rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state)); + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); dst->speed = dst->lanes * rate; + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); - snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu", - FIELD_GET(NSP_ETH_PORT_PHYLABEL, port), - FIELD_GET(NSP_ETH_PORT_LABEL, port)); + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); +} + +static void +nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) + for (j = 0; j < table->count; j++) { + if (i == j) + continue; + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + nfp_warn(cpp, + "Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = true; + break; + } +} + +static void +nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; } /** @@ -166,10 +250,9 @@ struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp) struct nfp_eth_table * __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) { - struct eth_table_entry *entries; + union eth_table_entry *entries; struct nfp_eth_table *table; - unsigned int cnt; - int i, j, ret; + int i, j, ret, cnt = 0; entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); if (!entries) @@ -178,93 +261,288 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { nfp_err(cpp, "reading port table failed %d\n", ret); - kfree(entries); - return NULL; + goto err; } - /* Some versions of flash will give us 0 instead of port count */ - cnt = ret; - if (!cnt) { - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) - if (entries[i].port & NSP_ETH_PORT_LANES_MASK) - cnt++; + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. + * For those that give a port count, verify it against the value + * calculated above. + */ + if (ret && ret != cnt) { + nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", + ret, cnt); + goto err; } table = kzalloc(sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); - if (!table) { - kfree(entries); - return NULL; - } + if (!table) + goto err; table->count = cnt; for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) if (entries[i].port & NSP_ETH_PORT_LANES_MASK) - nfp_eth_port_translate(&entries[i], i, + nfp_eth_port_translate(nsp, &entries[i], i, &table->ports[j++]); + nfp_eth_mark_split_ports(cpp, table); + for (i = 0; i < table->count; i++) + nfp_eth_calc_port_type(cpp, &table->ports[i]); + kfree(entries); return table; + +err: + kfree(entries); + return NULL; } -/** - * nfp_eth_set_mod_enable() - set PHY module enable control bit - * @cpp: NFP CPP handle - * @idx: NFP chip-wide port index - * @enable: Desired state - * - * Enable or disable PHY module (this usually means setting the TX lanes - * disable bits). - * - * Return: 0 or -ERRNO. - */ -int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) { - struct eth_table_entry *entries; + union eth_table_entry *entries; struct nfp_nsp *nsp; - u64 reg; int ret; entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); if (!entries) - return -ENOMEM; + return ERR_PTR(-ENOMEM); nsp = nfp_nsp_open(cpp); if (IS_ERR(nsp)) { kfree(entries); - return PTR_ERR(nsp); + return nsp; } ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { nfp_err(cpp, "reading port table failed %d\n", ret); - goto exit_close_nsp; + goto err; } if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { nfp_warn(cpp, "trying to set port state on disabled port %d\n", idx); - ret = -EINVAL; - goto exit_close_nsp; + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + kfree(entries); + return ERR_PTR(-EIO); +} + +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, false); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + kfree(entries); +} + +/** + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/** + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); } + return nfp_eth_config_commit_end(nsp); +} + +/** + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + entries = nfp_nsp_config_entries(nsp); + /* Check if we are already in requested state */ reg = le64_to_cpu(entries[idx].state); - if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { - ret = 0; - goto exit_close_nsp; + if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); } - reg = le64_to_cpu(entries[idx].control); - reg &= ~NSP_ETH_CTRL_ENABLED; - reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); - entries[idx].control = cpu_to_le64(reg); + return nfp_eth_config_commit_end(nsp); +} - ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); -exit_close_nsp: - nfp_nsp_close(nsp); - kfree(entries); +/* Force inline, FIELD_* macroes require masks to be compilation-time known */ +static __always_inline int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const u64 mask, unsigned int val, const u64 ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + u64 reg; + + /* Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + nfp_err(nfp_nsp_cpp(nsp), + "set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].raw[raw_idx]); + if (val == FIELD_GET(mask, reg)) + return 0; - return ret < 0 ? ret : 0; + reg &= ~mask; + reg |= FIELD_PREP(mask, val); + entries[idx].raw[raw_idx] = cpu_to_le64(reg); + + entries[idx].control |= cpu_to_le64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, true); + + return 0; +} + +/** + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/** + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + nfp_warn(nfp_nsp_cpp(nsp), + "could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/** + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index a2850344f8b44179dac1501e1f0a5369b6dddc6a..2d15a7c9d0de33d6b3773e3c7da7ed49c694095d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -45,6 +45,13 @@ #include "nfp_cpp.h" #include "nfp6000/nfp6000.h" +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + #define NFP_RESOURCE_ENTRY_NAME_SZ 8 /** @@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) strncpy(name_pad, res->name, sizeof(name_pad)); /* Search for a matching entry */ - key = NFP_RESOURCE_TBL_KEY; - if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) - key = crc32_posix(name_pad, sizeof(name_pad)); + if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + nfp_err(cpp, "Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = crc32_posix(name_pad, sizeof(name_pad)); for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { u64 addr = NFP_RESOURCE_TBL_BASE + diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 9709c8ca0774dcf70f226d29d63b4eaf70cc578c..159564d8dcdb5cd1ec5d6cd02049e380d7b73e44 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -152,7 +152,6 @@ struct w90p910_ether { struct tran_pdesc *tdesc; dma_addr_t rdesc_phys; dma_addr_t tdesc_phys; - struct net_device_stats stats; struct platform_device *pdev; struct resource *res; struct sk_buff *skb; @@ -584,15 +583,6 @@ static int w90p910_ether_close(struct net_device *dev) return 0; } -static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) -{ - struct w90p910_ether *ether; - - ether = netdev_priv(dev); - - return ðer->stats; -} - static int w90p910_send_frame(struct net_device *dev, unsigned char *data, int length) { @@ -671,10 +661,10 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) ether->finish_tx = 0; if (txbd->sl & TXDS_TXCP) { - ether->stats.tx_packets++; - ether->stats.tx_bytes += txbd->sl & 0xFFFF; + dev->stats.tx_packets++; + dev->stats.tx_bytes += txbd->sl & 0xFFFF; } else { - ether->stats.tx_errors++; + dev->stats.tx_errors++; } txbd->sl = 0x0; @@ -730,7 +720,7 @@ static void netdev_rx(struct net_device *dev) data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { - ether->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -738,24 +728,24 @@ static void netdev_rx(struct net_device *dev) skb_put(skb, length); skb_copy_to_linear_data(skb, data, length); skb->protocol = eth_type_trans(skb, dev); - ether->stats.rx_packets++; - ether->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; netif_rx(skb); } else { - ether->stats.rx_errors++; + dev->stats.rx_errors++; if (status & RXDS_RP) { dev_err(&pdev->dev, "rx runt err\n"); - ether->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else if (status & RXDS_CRCE) { dev_err(&pdev->dev, "rx crc err\n"); - ether->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { dev_err(&pdev->dev, "rx alignment err\n"); - ether->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); - ether->stats.rx_over_errors++; + dev->stats.rx_over_errors++; } } @@ -912,7 +902,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_open = w90p910_ether_open, .ndo_stop = w90p910_ether_close, .ndo_start_xmit = w90p910_ether_start_xmit, - .ndo_get_stats = w90p910_ether_stats, .ndo_set_rx_mode = w90p910_ether_set_multicast_list, .ndo_set_mac_address = w90p910_set_mac_address, .ndo_do_ioctl = w90p910_ether_ioctl, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 7b43a3b4abdcbc7bc1cdfd4d13c611563e2760a2..3dd973475125c0f856d4eccacba4ce4c0c34cc50 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter) } while (--retries); - if (!retries) { - printk(KERN_ERR "Receive Peg initialization not " - "complete, state: 0x%x.\n", val); - return -EIO; - } - - return 0; + pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val); + return -EIO; } int netxen_init_firmware(struct netxen_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 00c17fa6545bd5752a427e3660b062dc26ba57db..2ab1aab7c3feb010505dc8a33e8bf96823251680 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -51,7 +51,19 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.10.10.20" + +#define QED_MAJOR_VERSION 8 +#define QED_MINOR_VERSION 10 +#define QED_REVISION_VERSION 10 +#define QED_ENGINEERING_VERSION 21 + +#define QED_VERSION \ + ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ + (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION) + +#define STORM_FW_VERSION \ + ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ + (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 @@ -59,9 +71,8 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 -#define ISCSI_BDQ_ID(_port_id) (_port_id) -#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2) #define QED_WID_SIZE (1024) +#define QED_MIN_WIDS (4) #define QED_PF_DEMS_SIZE (4) /* cau states */ @@ -76,6 +87,15 @@ union qed_mcp_protocol_stats; enum qed_mcp_protocol_type; /* helpers */ +#define QED_MFW_GET_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _SHIFT)) + +#define QED_MFW_SET_FIELD(name, field, value) \ + do { \ + (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \ + (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\ + } while (0) + static inline u32 qed_db_addr(u32 cid, u32 DEMS) { u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | @@ -130,9 +150,35 @@ enum qed_tunn_clss { QED_TUNN_CLSS_MAC_VNI, QED_TUNN_CLSS_INNER_MAC_VLAN, QED_TUNN_CLSS_INNER_MAC_VNI, + QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE, MAX_QED_TUNN_CLSS, }; +struct qed_tunn_update_type { + bool b_update_mode; + bool b_mode_enabled; + enum qed_tunn_clss tun_cls; +}; + +struct qed_tunn_update_udp_port { + bool b_update_port; + u16 port; +}; + +struct qed_tunnel_info { + struct qed_tunn_update_type vxlan; + struct qed_tunn_update_type l2_geneve; + struct qed_tunn_update_type ip_geneve; + struct qed_tunn_update_type l2_gre; + struct qed_tunn_update_type ip_gre; + + struct qed_tunn_update_udp_port vxlan_port; + struct qed_tunn_update_udp_port geneve_port; + + bool b_update_rx_cls; + bool b_update_tx_cls; +}; + struct qed_tunn_start_params { unsigned long tunn_mode; u16 vxlan_udp_port; @@ -198,6 +244,7 @@ enum qed_resources { QED_LL2_QUEUE, QED_CMDQS_CQS, QED_RDMA_STATS_QUEUE, + QED_BDQ, QED_MAX_RESC, }; @@ -205,8 +252,9 @@ enum QED_FEATURE { QED_PF_L2_QUE, QED_VF, QED_RDMA_CNQ, - QED_VF_L2_QUE, + QED_ISCSI_CQ, QED_FCOE_CQ, + QED_VF_L2_QUE, QED_MAX_FEATURES, }; @@ -219,7 +267,9 @@ enum QED_PORT_MODE { QED_PORT_MODE_DE_4X20G, QED_PORT_MODE_DE_1X40G, QED_PORT_MODE_DE_2X25G, - QED_PORT_MODE_DE_1X25G + QED_PORT_MODE_DE_1X25G, + QED_PORT_MODE_DE_4X25G, + QED_PORT_MODE_DE_2X10G, }; enum qed_dev_cap { @@ -249,9 +299,14 @@ struct qed_hw_info { RESC_NUM(_p_hwfn, resc)) #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) - u8 num_tc; + /* Amount of traffic classes HW supports */ + u8 num_hw_tc; + + /* Amount of TCs which should be active according to DCBx or upper + * layer driver configuration. + */ + u8 num_active_tc; u8 offload_tc; - u8 non_offload_tc; u32 concrete_fid; u16 opaque_fid; @@ -314,15 +369,19 @@ struct qed_qm_info { struct init_qm_port_params *qm_port_params; u16 start_pq; u8 start_vport; - u8 pure_lb_pq; - u8 offload_pq; - u8 pure_ack_pq; - u8 ooo_pq; - u8 vf_queues_offset; + u16 pure_lb_pq; + u16 offload_pq; + u16 low_latency_pq; + u16 pure_ack_pq; + u16 ooo_pq; + u16 first_vf_pq; + u16 first_mcos_pq; + u16 first_rl_pq; u16 num_pqs; u16 num_vf_pqs; u8 num_vports; u8 max_phys_tcs_per_port; + u8 ooo_tc; bool pf_rl_en; bool pf_wfq_en; bool vport_rl_en; @@ -353,6 +412,12 @@ struct qed_fw_data { u32 init_ops_size; }; +#define DRV_MODULE_VERSION \ + __stringify(QED_MAJOR_VERSION) "." \ + __stringify(QED_MINOR_VERSION) "." \ + __stringify(QED_REVISION_VERSION) "." \ + __stringify(QED_ENGINEERING_VERSION) + struct qed_simd_fp_handler { void *token; void (*func)(void *); @@ -364,7 +429,8 @@ struct qed_hwfn { #define IS_LEAD_HWFN(edev) (!((edev)->my_id)) u8 rel_pf_id; /* Relative to engine*/ u8 abs_pf_id; -#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) +#define QED_PATH_ID(_p_hwfn) \ + (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1)) u8 port_id; bool b_active; @@ -409,6 +475,11 @@ struct qed_hwfn { struct qed_ptt *p_main_ptt; struct qed_ptt *p_dpc_ptt; + /* PTP will be used only by the leading function. + * Usage of all PTP-apis should be synchronized as result. + */ + struct qed_ptt *p_ptp_ptt; + struct qed_sb_sp_info *p_sp_sb; struct qed_sb_attn_info *p_sb_attn; @@ -455,6 +526,7 @@ struct qed_hwfn { struct dbg_tools_data dbg_info; /* PWM region specific data */ + u16 wid_count; u32 dpi_size; u32 dpi_count; @@ -465,8 +537,8 @@ struct qed_hwfn { u8 dcbx_no_edpm; u8 db_bar_no_edpm; - /* p_ptp_ptt is valid for leading HWFN only */ - struct qed_ptt *p_ptp_ptt; + struct qed_ptt *p_arfs_ptt; + struct qed_simd_fp_handler simd_proto_handler[64]; #ifdef CONFIG_QED_SRIOV @@ -523,9 +595,7 @@ struct qed_dev { u8 dp_level; char name[NAME_SIZE]; - u8 type; -#define QED_DEV_TYPE_BB (0 << 0) -#define QED_DEV_TYPE_AH BIT(0) + enum qed_dev_type type; /* Translate type/revision combo into the proper conditions */ #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ @@ -540,6 +610,9 @@ struct qed_dev { u16 vendor_id; u16 device_id; +#define QED_DEV_ID_MASK 0xff00 +#define QED_DEV_ID_MASK_BB 0x1600 +#define QED_DEV_ID_MASK_AH 0x8000 u16 chip_num; #define CHIP_NUM_MASK 0xffff @@ -606,9 +679,7 @@ struct qed_dev { /* SRIOV */ struct qed_hw_sriov_info *p_iov_info; #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) - - unsigned long tunn_mode; - + struct qed_tunnel_info tunnel; bool b_is_vf; u32 drv_type; struct qed_eth_stats *reset_stats; @@ -652,12 +723,19 @@ struct qed_dev { u32 rdma_max_sge; u32 rdma_max_inline; u32 rdma_max_srq_sge; + u16 tunn_feature_mask; }; -#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB -#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB -#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB -#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB +#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \ + : MAX_NUM_VFS_K2) +#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \ + : MAX_NUM_L2_QUEUES_K2) +#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \ + : MAX_NUM_PORTS_K2) +#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \ + : MAX_SB_PER_PATH_K2) +#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \ + : MAX_NUM_PFS_K2) /** * @brief qed_concrete_to_sw_fid - get the sw function id from @@ -693,6 +771,26 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_device_num_engines(struct qed_dev *cdev); +int qed_device_get_port_id(struct qed_dev *cdev); + +#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) + +/* Flags for indication of required queues */ +#define PQ_FLAGS_RLS (BIT(0)) +#define PQ_FLAGS_MCOS (BIT(1)) +#define PQ_FLAGS_LB (BIT(2)) +#define PQ_FLAGS_OOO (BIT(3)) +#define PQ_FLAGS_ACK (BIT(4)) +#define PQ_FLAGS_OFLD (BIT(5)) +#define PQ_FLAGS_VFS (BIT(6)) +#define PQ_FLAGS_LLT (BIT(7)) + +/* physical queue index for cm context intialization */ +u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); +u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); +u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); + #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) /* Other Linux specific common definitions */ @@ -721,5 +819,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev, enum qed_mcp_protocol_type type, union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); +void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7e3a6fed3da6d94fe47139aef697563b56726950..b3aaa985956e4a937af56ccf2410cf9eeb884985 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -71,8 +71,7 @@ #define TM_ALIGN BIT(TM_SHIFT) #define TM_ELEM_SIZE 4 -/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ -#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3) +#define ILT_DEFAULT_HW_P_SIZE 4 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET @@ -220,9 +219,6 @@ struct qed_cxt_mngr { */ u32 vf_count; - /* total number of SRQ's for this hwfn */ - u32 srq_count; - /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; @@ -238,12 +234,17 @@ struct qed_cxt_mngr { u32 t2_num_pages; u64 first_free; u64 last_free; + + /* total number of SRQ's for this hwfn */ + u32 srq_count; + + /* Maximal number of L2 steering filters */ + u32 arfs_count; }; static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || - type == PROTOCOLID_FCOE || - type == PROTOCOLID_ROCE; + type == PROTOCOLID_FCOE; } static bool tm_cid_proto(enum protocol_type type) @@ -293,6 +294,9 @@ static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, iids->pf_cids += p_mngr->conn_cfg[i].cid_count; iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; } + + /* Add L2 filtering filters in addition */ + iids->pf_cids += p_mngr->arfs_count; } /* counts the iids for the Timers block configuration */ @@ -304,16 +308,34 @@ struct qed_tm_iids { u32 per_vf_tids; }; -static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, +static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, + struct qed_cxt_mngr *p_mngr, struct qed_tm_iids *iids) { - u32 i, j; - - for (i = 0; i < MAX_CONN_TYPES; i++) { + bool tm_vf_required = false; + bool tm_required = false; + int i, j; + + /* Timers is a special case -> we don't count how many cids require + * timers but what's the max cid that will be used by the timer block. + * therefore we traverse in reverse order, and once we hit a protocol + * that requires the timers memory, we'll sum all the protocols up + * to that one. + */ + for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; - if (tm_cid_proto(i)) { + if (tm_cid_proto(i) || tm_required) { + if (p_cfg->cid_count) + tm_required = true; + iids->pf_cids += p_cfg->cid_count; + } + + if (tm_cid_proto(i) || tm_vf_required) { + if (p_cfg->cids_per_vf) + tm_vf_required = true; + iids->per_vf_cids += p_cfg->cids_per_vf; } @@ -527,7 +549,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, return lines_to_skip; } -int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) +static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg + *p_cli) +{ + p_cli->active = false; + p_cli->first.val = 0; + p_cli->last.val = 0; + return p_cli; +} + +static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) +{ + p_blk->total_size = 0; + return p_blk; +} + +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 curr_line, total, i, task_size, line; @@ -551,7 +588,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); /* CDUC */ - p_cli = &p_mngr->clients[ILT_CLI_CDUC]; + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); + curr_line = p_mngr->pf_start_line; /* CDUC PF */ @@ -560,7 +598,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) /* get the counters for the CDUC and QM clients */ qed_cxt_cdu_iids(p_mngr, &cdu_iids); - p_blk = &p_cli->pf_blks[CDUC_BLK]; + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); @@ -574,7 +612,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ILT_CLI_CDUC); /* CDUC VF */ - p_blk = &p_cli->vf_blks[CDUC_BLK]; + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, @@ -588,7 +626,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ILT_CLI_CDUC); /* CDUT PF */ - p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); p_cli->first.val = curr_line; /* first the 'working' task memory */ @@ -597,7 +635,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) if (!p_seg || p_seg->count == 0) continue; - p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); total = p_seg->count * p_mngr->task_type_size[p_seg->type]; qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, p_mngr->task_type_size[p_seg->type]); @@ -612,7 +650,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) if (!p_seg || p_seg->count == 0) continue; - p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + p_blk = + qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); if (!p_seg->has_fl_mem) { /* The segment is active (total size pf 'working' @@ -657,7 +696,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) /* 'working' memory */ total = p_seg->count * p_mngr->task_type_size[p_seg->type]; - p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, p_mngr->task_type_size[p_seg->type]); @@ -666,7 +705,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) ILT_CLI_CDUT); /* 'init' memory */ - p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + p_blk = + qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); if (!p_seg->has_fl_mem) { /* see comment above */ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; @@ -694,8 +734,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) } /* QM */ - p_cli = &p_mngr->clients[ILT_CLI_QM]; - p_blk = &p_cli->pf_blks[0]; + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_cxt_qm_iids(p_hwfn, &qm_iids); total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, @@ -719,7 +759,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) p_cli->pf_total_lines = curr_line - p_blk->start_line; /* SRC */ - p_cli = &p_mngr->clients[ILT_CLI_SRC]; + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); qed_cxt_src_iids(p_mngr, &src_iids); /* Both the PF and VFs searcher connections are stored in the per PF @@ -733,7 +773,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) total = roundup_pow_of_two(local_max); - p_blk = &p_cli->pf_blks[0]; + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * sizeof(struct src_ent), sizeof(struct src_ent)); @@ -744,11 +784,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) } /* TM PF */ - p_cli = &p_mngr->clients[ILT_CLI_TM]; - qed_cxt_tm_iids(p_mngr, &tm_iids); + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); + qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); total = tm_iids.pf_cids + tm_iids.pf_tids_total; if (total) { - p_blk = &p_cli->pf_blks[0]; + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * TM_ELEM_SIZE, TM_ELEM_SIZE); @@ -760,14 +800,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) /* TM VF */ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; if (total) { - p_blk = &p_cli->vf_blks[0]; + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * TM_ELEM_SIZE, TM_ELEM_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TM); - p_cli->pf_total_lines = curr_line - p_blk->start_line; + p_cli->vf_total_lines = curr_line - p_blk->start_line; for (i = 1; i < p_mngr->vf_count; i++) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TM); @@ -777,8 +817,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) total = qed_cxt_get_srq_count(p_hwfn); if (total) { - p_cli = &p_mngr->clients[ILT_CLI_TSDM]; - p_blk = &p_cli->pf_blks[SRQ_BLK]; + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); @@ -787,13 +827,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) p_cli->pf_total_lines = curr_line - p_blk->start_line; } + *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line; + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > - RESC_NUM(p_hwfn, QED_ILT)) { - DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", - curr_line - p_hwfn->p_cxt_mngr->pf_start_line); + RESC_NUM(p_hwfn, QED_ILT)) return -EINVAL; + + return 0; +} + +u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) +{ + struct qed_ilt_client_cfg *p_cli; + u32 excess_lines, available_lines; + struct qed_cxt_mngr *p_mngr; + u32 ilt_page_size, elem_size; + struct qed_tid_seg *p_seg; + int i; + + available_lines = RESC_NUM(p_hwfn, QED_ILT); + excess_lines = used_lines - available_lines; + + if (!excess_lines) + return 0; + + if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + return 0; + + p_mngr = p_hwfn->p_cxt_mngr; + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + elem_size = p_mngr->task_type_size[p_seg->type]; + if (!elem_size) + continue; + + return (ilt_page_size / elem_size) * excess_lines; } + DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n"); return 0; } @@ -1127,7 +1204,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); - /* default ILT page size for all clients is 32K */ + /* default ILT page size for all clients is 64K */ for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; @@ -1367,7 +1444,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) } } -void qed_qm_init_pf(struct qed_hwfn *p_hwfn) +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_qm_pf_rt_init_params params; struct qed_qm_info *qm_info = &p_hwfn->qm_info; @@ -1393,22 +1470,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; - qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, ¶ms); + qed_qm_pf_rt_init(p_hwfn, p_ptt, ¶ms); } /* CM PF */ -static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) +void qed_cm_init_pf(struct qed_hwfn *p_hwfn) { - union qed_qm_pq_params pq_params; - u16 pq; - /* XCM pure-LB queue */ - memset(&pq_params, 0, sizeof(pq_params)); - pq_params.core.tc = LB_TC; - pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); - STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq); - - return 0; + STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, + qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); } /* DQ PF */ @@ -1640,7 +1710,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) u8 i; memset(&tm_iids, 0, sizeof(tm_iids)); - qed_cxt_tm_iids(p_mngr, &tm_iids); + qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); /* @@@TBD No pre-scan for now */ @@ -1758,9 +1828,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) qed_prs_init_common(p_hwfn); } -void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_qm_init_pf(p_hwfn); + qed_qm_init_pf(p_hwfn, p_ptt); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn); @@ -1884,13 +1954,12 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) } static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, - struct qed_rdma_pf_params *p_params) + struct qed_rdma_pf_params *p_params, + u32 num_tasks) { - u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; + u32 num_cons, num_qps, num_srqs; enum protocol_type proto; - num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs); - num_tasks = num_mrs; /* each mr uses a single task id */ num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); switch (p_hwfn->hw_info.personality) { @@ -1919,7 +1988,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, } } -int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) { /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ @@ -1931,9 +2000,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH_ROCE: { - qed_rdma_set_pf_params(p_hwfn, - &p_hwfn-> - pf_params.rdma_pf_params); + qed_rdma_set_pf_params(p_hwfn, + &p_hwfn-> + pf_params.rdma_pf_params, + rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } case QED_PCI_ETH: @@ -1943,6 +2013,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, p_params->num_cons, 1); + p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } case QED_PCI_FCOE: diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8b010324268ad2b5b5313f6b8e62db226f7650e5..53ad532dc21223e4a6fa15039e5ab17acb5e6a01 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -105,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, * @brief qed_cxt_set_pf_params - Set the PF params for cxt init * * @param p_hwfn - * + * @param rdma_tasks - requested maximum * @return int */ -int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn); +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters * * @param p_hwfn + * @param last_line * * @return int */ -int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn); +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); + +/** + * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * + * @param p_hwfn + * @param used_lines + */ +u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct @@ -163,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. * - * - * * @param p_hwfn + * @param p_ptt */ -void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn); +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** * @brief qed_qm_init_pf - Initailze the QM PF phase, per path * * @param p_hwfn + * @param p_ptt */ - -void qed_qm_init_pf(struct qed_hwfn *p_hwfn); +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cfdadb658ade0fe73551f6f822ec8ae1805f83de..d883ad5bec6dc61b73e49abc507c26dce10f8fb3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", qed_dcbx_app_update[i].name, p_data->arr[id].update, p_data->arr[id].enable, p_data->arr[id].priority, - p_data->arr[id].tc, p_hwfn->hw_info.num_tc); + p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc); } } @@ -204,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, p_data->arr[type].tc = tc; /* QM reconf data */ - if (p_info->personality == personality) { - if (personality == QED_PCI_ETH) - p_info->non_offload_tc = tc; - else - p_info->offload_tc = tc; - } + if (p_info->personality == personality) + p_info->offload_tc = tc; } /* Update app protocol data and hw_info fields with the TLV info */ @@ -275,8 +271,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) { - u8 tc, priority_map; enum dcbx_protocol_type type; + u8 tc, priority_map; bool enable, ieee; u16 protocol_id; int priority; @@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) if (rc) return rc; - p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC); data.pf_id = p_hwfn->rel_pf_id; data.dcbx_enabled = !!dcbx_version; @@ -558,8 +556,9 @@ qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn, p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); DP_VERBOSE(p_hwfn, QED_MSG_DCB, - "PFC params: willing %d, pfc_bitmap %d\n", - p_params->pfc.willing, pfc_map); + "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n", + p_params->pfc.willing, pfc_map, p_params->pfc.max_tc, + p_params->pfc.enabled); } static void @@ -578,10 +577,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); DP_VERBOSE(p_hwfn, QED_MSG_DCB, - "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", - p_params->ets_willing, - p_params->ets_cbs, - p_ets->pri_tc_tbl[0], p_params->max_ets_tc); + "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, p_params->ets_enabled, + p_params->ets_cbs, p_ets->pri_tc_tbl[0], + p_params->max_ets_tc); if (p_params->ets_enabled && !p_params->max_ets_tc) { p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES; @@ -622,8 +621,7 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, } static void -qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct dcbx_features *p_feat; @@ -635,8 +633,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, } static void -qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct dcbx_features *p_feat; @@ -649,7 +646,6 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, static void qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct qed_dcbx_get *params) { struct qed_dcbx_operational_params *p_operational; @@ -670,6 +666,7 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, if (!enabled) { p_operational->enabled = enabled; p_operational->valid = false; + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n"); return; } @@ -683,8 +680,14 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, DCBX_CONFIG_VERSION_CEE); p_operational->cee = val; - DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n", - p_operational->ieee, p_operational->cee); + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_STATIC); + p_operational->local = val; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Version support: ieee %d, cee %d, static %d\n", + p_operational->ieee, p_operational->cee, + p_operational->local); qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, @@ -699,7 +702,6 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, static void qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct qed_dcbx_get *params) { struct lldp_config_params_s *p_local; @@ -714,7 +716,6 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, static void qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct qed_dcbx_get *params) { struct lldp_status_params_s *p_remote; @@ -728,25 +729,24 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, } static int -qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_dcbx_get *p_params, +qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params, enum qed_mib_read_type type) { switch (type) { case QED_DCBX_REMOTE_MIB: - qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params); + qed_dcbx_get_remote_params(p_hwfn, p_params); break; case QED_DCBX_LOCAL_MIB: - qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params); + qed_dcbx_get_local_params(p_hwfn, p_params); break; case QED_DCBX_OPERATIONAL_MIB: - qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params); + qed_dcbx_get_operational_params(p_hwfn, p_params); break; case QED_DCBX_REMOTE_LLDP_MIB: - qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params); + qed_dcbx_get_remote_lldp_params(p_hwfn, p_params); break; case QED_DCBX_LOCAL_LLDP_MIB: - qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params); + qed_dcbx_get_local_lldp_params(p_hwfn, p_params); break; default: DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); @@ -904,7 +904,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, qed_sp_pf_update(p_hwfn); } } - qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type); + + qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); qed_dcbx_aen(p_hwfn, type); return rc; @@ -912,17 +913,14 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) { - int rc = 0; - p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL); if (!p_hwfn->p_dcbx_info) - rc = -ENOMEM; + return -ENOMEM; - return rc; + return 0; } -void qed_dcbx_info_free(struct qed_hwfn *p_hwfn, - struct qed_dcbx_info *p_dcbx_info) +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_dcbx_info); } @@ -961,14 +959,9 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, p_dcb_data = &p_dest->fcoe_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); p_dcb_data = &p_dest->roce_dcb_data; - - if (p_src->arr[DCBX_PROTOCOL_ROCE].update) - qed_dcbx_update_protocol_data(p_dcb_data, p_src, - DCBX_PROTOCOL_ROCE); - if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update) - qed_dcbx_update_protocol_data(p_dcb_data, p_src, - DCBX_PROTOCOL_ROCE_V2); - + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE); + p_dcb_data = &p_dest->rroce_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2); p_dcb_data = &p_dest->iscsi_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI); p_dcb_data = &p_dest->eth_dcb_data; @@ -994,7 +987,7 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, if (rc) goto out; - rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type); + rc = qed_dcbx_get_params(p_hwfn, p_get, type); out: qed_ptt_release(p_hwfn, p_ptt); @@ -1169,6 +1162,9 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, local_admin->config = DCBX_CONFIG_VERSION_DISABLED; } + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n", + local_admin->config); + if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, ¶ms->config.params); @@ -1245,6 +1241,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; if (dcbx_info->operational.ieee) p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + if (dcbx_info->operational.local) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC; p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; memcpy(&p_hwfn->p_dcbx_info->set.config.params, @@ -1794,8 +1792,9 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode); - if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) { - DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n"); + if (!(mode & DCB_CAP_DCBX_VER_IEEE) && + !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) { + DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n"); return 1; } @@ -1815,6 +1814,11 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) dcbx_set.enabled = true; } + if (mode & DCB_CAP_DCBX_STATIC) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC; + dcbx_set.enabled = true; + } + ptt = qed_ptt_acquire(hwfn); if (!ptt) return 1; @@ -1823,7 +1827,7 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) qed_ptt_release(hwfn, ptt); - return 0; + return rc; } static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags) @@ -2202,15 +2206,46 @@ qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) return qed_dcbnl_get_ieee_pfc(cdev, pfc, true); } +static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee) +{ + switch (selector) { + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case IEEE_8021QAZ_APP_SEL_STREAM: + *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case IEEE_8021QAZ_APP_SEL_DGRAM: + *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case IEEE_8021QAZ_APP_SEL_ANY: + *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + default: + return -EINVAL; + } + + return 0; +} + static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_app_entry *entry; - bool ethtype; u8 prio = 0; + u8 sf_ieee; int i; + DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n", + app->selector, app->protocol); + + if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { + DP_INFO(cdev, "Invalid selector field value %d\n", + app->selector); + return -EINVAL; + } + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; @@ -2221,11 +2256,9 @@ static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) return -EINVAL; } - /* ieee defines the selector field value for ethertype to be 1 */ - ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_info->operational.params.app_entry[i]; - if ((entry->ethtype == ethtype) && + if ((entry->sf_ieee == sf_ieee) && (entry->proto_id == app->protocol)) { prio = entry->prio; break; @@ -2253,14 +2286,22 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) struct qed_dcbx_set dcbx_set; struct qed_app_entry *entry; struct qed_ptt *ptt; - bool ethtype; + u8 sf_ieee; int rc, i; + DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n", + app->selector, app->protocol, app->priority); if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid priority %d\n", app->priority); return -EINVAL; } + if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { + DP_INFO(cdev, "Invalid selector field value %d\n", + app->selector); + return -EINVAL; + } + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; @@ -2278,11 +2319,9 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) if (rc) return -EINVAL; - /* ieee defines the selector field value for ethertype to be 1 */ - ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_set.config.params.app_entry[i]; - if ((entry->ethtype == ethtype) && + if ((entry->sf_ieee == sf_ieee) && (entry->proto_id == app->protocol)) break; /* First empty slot */ @@ -2298,7 +2337,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) } dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; - dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee; dcbx_set.config.params.app_entry[i].proto_id = app->protocol; dcbx_set.config.params.app_entry[i].prio = BIT(app->priority); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 0fabe97f998d2c8e29b343f47ba90396d61f4398..414e26268f3a7c9ecc4b5954d91ba58c9c9d9470 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata { enum qed_pci_personality personality; }; -#define QED_MFW_GET_FIELD(name, field) \ - (((name) & (field ## _MASK)) >> (field ## _SHIFT)) - struct qed_dcbx_info { struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; @@ -122,7 +119,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *, struct qed_ptt *, enum qed_mib_read_type); int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); -void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *); +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, struct pf_update_ramrod_data *p_dest); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 68f19ca57f965b13d6fbf32c85e86d65e500b881..483241b4b05db2add64ff928ccc9419fe733355a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -17,7 +17,6 @@ /* Chip IDs enum */ enum chip_ids { - CHIP_RESERVED, CHIP_BB_B0, CHIP_K2, MAX_CHIP_IDS @@ -40,6 +39,7 @@ enum mem_groups { MEM_GROUP_BTB_RAM, MEM_GROUP_RDIF_CTX, MEM_GROUP_TDIF_CTX, + MEM_GROUP_CFC_MEM, MEM_GROUP_CONN_CFC_MEM, MEM_GROUP_TASK_CFC_MEM, MEM_GROUP_CAU_PI, @@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = { "BTB_RAM", "RDIF_CTX", "TDIF_CTX", + "CFC_MEM", "CONN_CFC_MEM", "TASK_CFC_MEM", "CAU_PI", @@ -185,13 +186,16 @@ struct dbg_array { u32 size_in_dwords; }; +struct chip_platform_defs { + u8 num_ports; + u8 num_pfs; + u8 num_vfs; +}; + /* Chip constant definitions */ struct chip_defs { const char *name; - struct { - u8 num_ports; - u8 num_pfs; - } per_platform[MAX_PLATFORM_IDS]; + struct chip_platform_defs per_platform[MAX_PLATFORM_IDS]; }; /* Platform constant definitions */ @@ -405,22 +409,23 @@ struct phy_defs { /***************************** Constant Arrays *******************************/ /* Debug arrays */ -static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; +static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } }, { "bb_b0", - { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } }, - { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } } + { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0}, + {0, 0, 0}, {0, 0, 0} } }, + { "k2", + { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0}, + {0, 0, 0}, {0, 0, 0} } } }; /* Storm constant definitions array */ static struct storm_defs s_storm_defs[] = { /* Tstorm */ {'T', BLOCK_TSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCT}, true, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, @@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = { 4, TCM_REG_SM_TASK_CTX}, /* Mstorm */ {'M', BLOCK_MSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCM}, false, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, @@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = { 7, MCM_REG_SM_TASK_CTX}, /* Ustorm */ {'U', BLOCK_USEM, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, - DBG_BUS_CLIENT_RBCU}, false, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, @@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = { 3, UCM_REG_SM_TASK_CTX}, /* Xstorm */ {'X', BLOCK_XSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCX}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, @@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = { 0, 0}, /* Ystorm */ {'Y', BLOCK_YSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCY}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, @@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = { 12, YCM_REG_SM_TASK_CTX}, /* Pstorm */ {'P', BLOCK_PSEM, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, - DBG_BUS_CLIENT_RBCS}, true, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, @@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = { /* Block definitions array */ static struct block_defs block_grc_defs = { - "grc", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + "grc", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, GRC_REG_DBG_FORCE_FRAME, @@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = { }; static struct block_defs block_miscs_defs = { - "miscs", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "miscs", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_defs = { - "misc", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbu_defs = { - "dbu", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbu", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_pglue_b_defs = { - "pglue_b", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, + "pglue_b", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, PGLUE_B_REG_DBG_FORCE_FRAME, @@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = { }; static struct block_defs block_cnig_defs = { - "cnig", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, + "cnig", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, CNIG_REG_DBG_FORCE_FRAME_K2, @@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = { }; static struct block_defs block_cpmu_defs = { - "cpmu", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "cpmu", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 8 }; static struct block_defs block_ncsi_defs = { - "ncsi", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + "ncsi", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, NCSI_REG_DBG_FORCE_FRAME, @@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = { }; static struct block_defs block_opte_defs = { - "opte", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "opte", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 4 }; static struct block_defs block_bmb_defs = { - "bmb", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, + "bmb", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, BMB_REG_DBG_FORCE_FRAME, @@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = { }; static struct block_defs block_pcie_defs = { - "pcie", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, + "pcie", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, PCIE_REG_DBG_COMMON_FORCE_FRAME, @@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = { }; static struct block_defs block_mcp_defs = { - "mcp", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "mcp", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_mcp2_defs = { - "mcp2", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + "mcp2", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, MCP2_REG_DBG_FORCE_FRAME, @@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = { }; static struct block_defs block_pswhst_defs = { - "pswhst", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswhst", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, PSWHST_REG_DBG_FORCE_FRAME, @@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = { }; static struct block_defs block_pswhst2_defs = { - "pswhst2", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswhst2", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, PSWHST2_REG_DBG_FORCE_FRAME, @@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = { }; static struct block_defs block_pswrd_defs = { - "pswrd", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswrd", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, PSWRD_REG_DBG_FORCE_FRAME, @@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = { }; static struct block_defs block_pswrd2_defs = { - "pswrd2", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswrd2", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, PSWRD2_REG_DBG_FORCE_FRAME, @@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = { }; static struct block_defs block_pswwr_defs = { - "pswwr", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswwr", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, PSWWR_REG_DBG_FORCE_FRAME, @@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = { }; static struct block_defs block_pswwr2_defs = { - "pswwr2", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "pswwr2", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISC_PL_HV, 3 }; static struct block_defs block_pswrq_defs = { - "pswrq", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswrq", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, PSWRQ_REG_DBG_FORCE_FRAME, @@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = { }; static struct block_defs block_pswrq2_defs = { - "pswrq2", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "pswrq2", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, PSWRQ2_REG_DBG_FORCE_FRAME, @@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = { }; static struct block_defs block_pglcs_defs = { - "pglcs", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, + "pglcs", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE, PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID, PGLCS_REG_DBG_FORCE_FRAME, @@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = { }; static struct block_defs block_ptu_defs = { - "ptu", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "ptu", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, PTU_REG_DBG_FORCE_FRAME, @@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = { }; static struct block_defs block_dmae_defs = { - "dmae", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "dmae", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, DMAE_REG_DBG_FORCE_FRAME, @@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = { }; static struct block_defs block_tcm_defs = { - "tcm", {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + "tcm", + {true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, TCM_REG_DBG_FORCE_FRAME, @@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = { }; static struct block_defs block_mcm_defs = { - "mcm", {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "mcm", + {true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, MCM_REG_DBG_FORCE_FRAME, @@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = { }; static struct block_defs block_ucm_defs = { - "ucm", {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + "ucm", + {true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, UCM_REG_DBG_FORCE_FRAME, @@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = { }; static struct block_defs block_xcm_defs = { - "xcm", {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + "xcm", + {true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, XCM_REG_DBG_FORCE_FRAME, @@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = { }; static struct block_defs block_ycm_defs = { - "ycm", {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + "ycm", + {true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, YCM_REG_DBG_FORCE_FRAME, @@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = { }; static struct block_defs block_pcm_defs = { - "pcm", {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + "pcm", + {true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, PCM_REG_DBG_FORCE_FRAME, @@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = { }; static struct block_defs block_qm_defs = { - "qm", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, + "qm", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, QM_REG_DBG_FORCE_FRAME, @@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = { }; static struct block_defs block_tm_defs = { - "tm", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + "tm", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, TM_REG_DBG_FORCE_FRAME, @@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = { }; static struct block_defs block_dorq_defs = { - "dorq", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + "dorq", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, DORQ_REG_DBG_FORCE_FRAME, @@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = { }; static struct block_defs block_brb_defs = { - "brb", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + "brb", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, BRB_REG_DBG_FORCE_FRAME, @@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = { }; static struct block_defs block_src_defs = { - "src", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + "src", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, SRC_REG_DBG_FORCE_FRAME, @@ -800,8 +828,9 @@ static struct block_defs block_src_defs = { }; static struct block_defs block_prs_defs = { - "prs", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + "prs", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, PRS_REG_DBG_FORCE_FRAME, @@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = { }; static struct block_defs block_tsdm_defs = { - "tsdm", {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + "tsdm", + {true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, TSDM_REG_DBG_FORCE_FRAME, @@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = { }; static struct block_defs block_msdm_defs = { - "msdm", {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "msdm", + {true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, MSDM_REG_DBG_FORCE_FRAME, @@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = { }; static struct block_defs block_usdm_defs = { - "usdm", {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + "usdm", + {true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, USDM_REG_DBG_FORCE_FRAME, @@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = { }; static struct block_defs block_xsdm_defs = { - "xsdm", {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + "xsdm", + {true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, XSDM_REG_DBG_FORCE_FRAME, @@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = { }; static struct block_defs block_ysdm_defs = { - "ysdm", {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + "ysdm", + {true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, YSDM_REG_DBG_FORCE_FRAME, @@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = { }; static struct block_defs block_psdm_defs = { - "psdm", {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + "psdm", + {true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, PSDM_REG_DBG_FORCE_FRAME, @@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = { }; static struct block_defs block_tsem_defs = { - "tsem", {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + "tsem", + {true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, TSEM_REG_DBG_FORCE_FRAME, @@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = { }; static struct block_defs block_msem_defs = { - "msem", {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "msem", + {true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, MSEM_REG_DBG_FORCE_FRAME, @@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = { }; static struct block_defs block_usem_defs = { - "usem", {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + "usem", + {true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, USEM_REG_DBG_FORCE_FRAME, @@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = { }; static struct block_defs block_xsem_defs = { - "xsem", {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + "xsem", + {true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, XSEM_REG_DBG_FORCE_FRAME, @@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = { }; static struct block_defs block_ysem_defs = { - "ysem", {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + "ysem", + {true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, YSEM_REG_DBG_FORCE_FRAME, @@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = { }; static struct block_defs block_psem_defs = { - "psem", {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + "psem", + {true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, PSEM_REG_DBG_FORCE_FRAME, @@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = { }; static struct block_defs block_rss_defs = { - "rss", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + "rss", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, RSS_REG_DBG_FORCE_FRAME, @@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = { }; static struct block_defs block_tmld_defs = { - "tmld", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "tmld", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, TMLD_REG_DBG_FORCE_FRAME, @@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = { }; static struct block_defs block_muld_defs = { - "muld", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + "muld", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, MULD_REG_DBG_FORCE_FRAME, @@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = { }; static struct block_defs block_yuld_defs = { - "yuld", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + "yuld", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE, YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID, YULD_REG_DBG_FORCE_FRAME, @@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = { }; static struct block_defs block_xyld_defs = { - "xyld", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + "xyld", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, XYLD_REG_DBG_FORCE_FRAME, @@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = { }; static struct block_defs block_prm_defs = { - "prm", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "prm", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, PRM_REG_DBG_FORCE_FRAME, @@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = { }; static struct block_defs block_pbf_pb1_defs = { - "pbf_pb1", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + "pbf_pb1", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, PBF_PB1_REG_DBG_FORCE_FRAME, @@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = { }; static struct block_defs block_pbf_pb2_defs = { - "pbf_pb2", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + "pbf_pb2", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, PBF_PB2_REG_DBG_FORCE_FRAME, @@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = { }; static struct block_defs block_rpb_defs = { - "rpb", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "rpb", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, RPB_REG_DBG_FORCE_FRAME, @@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = { }; static struct block_defs block_btb_defs = { - "btb", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, + "btb", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, BTB_REG_DBG_FORCE_FRAME, @@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = { }; static struct block_defs block_pbf_defs = { - "pbf", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + "pbf", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, PBF_REG_DBG_FORCE_FRAME, @@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = { }; static struct block_defs block_rdif_defs = { - "rdif", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + "rdif", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, RDIF_REG_DBG_FORCE_FRAME, @@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = { }; static struct block_defs block_tdif_defs = { - "tdif", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + "tdif", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, TDIF_REG_DBG_FORCE_FRAME, @@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = { }; static struct block_defs block_cdu_defs = { - "cdu", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + "cdu", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, CDU_REG_DBG_FORCE_FRAME, @@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = { }; static struct block_defs block_ccfc_defs = { - "ccfc", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + "ccfc", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, CCFC_REG_DBG_FORCE_FRAME, @@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = { }; static struct block_defs block_tcfc_defs = { - "tcfc", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + "tcfc", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, TCFC_REG_DBG_FORCE_FRAME, @@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = { }; static struct block_defs block_igu_defs = { - "igu", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "igu", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, IGU_REG_DBG_FORCE_FRAME, @@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = { }; static struct block_defs block_cau_defs = { - "cau", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + "cau", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, CAU_REG_DBG_FORCE_FRAME, @@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = { }; static struct block_defs block_umac_defs = { - "umac", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, + "umac", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE, UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID, UMAC_REG_DBG_FORCE_FRAME, @@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = { }; static struct block_defs block_xmac_defs = { - "xmac", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "xmac", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbg_defs = { - "dbg", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbg", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 }; static struct block_defs block_nig_defs = { - "nig", {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + "nig", + {true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, NIG_REG_DBG_FORCE_FRAME, @@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = { }; static struct block_defs block_wol_defs = { - "wol", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, + "wol", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE, WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID, WOL_REG_DBG_FORCE_FRAME, @@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = { }; static struct block_defs block_bmbn_defs = { - "bmbn", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, + "bmbn", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE, BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID, BMBN_REG_DBG_FORCE_FRAME, @@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = { }; static struct block_defs block_ipc_defs = { - "ipc", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "ipc", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_UA, 8 }; static struct block_defs block_nwm_defs = { - "nwm", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, + "nwm", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE, NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID, NWM_REG_DBG_FORCE_FRAME, @@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = { }; static struct block_defs block_nws_defs = { - "nws", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, + "nws", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, + NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE, + NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID, + NWS_REG_DBG_FORCE_FRAME, true, false, DBG_RESET_REG_MISCS_PL_HV, 12 }; static struct block_defs block_ms_defs = { - "ms", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, + "ms", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, + MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE, + MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID, + MS_REG_DBG_FORCE_FRAME, true, false, DBG_RESET_REG_MISCS_PL_HV, 13 }; static struct block_defs block_phy_pcie_defs = { - "phy_pcie", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, + "phy_pcie", + {false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, PCIE_REG_DBG_COMMON_FORCE_FRAME, @@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = { }; static struct block_defs block_led_defs = { - "led", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "led", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, false, DBG_RESET_REG_MISCS_PL_HV, 14 +}; + +static struct block_defs block_avs_wrap_defs = { + "avs_wrap", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, false, DBG_RESET_REG_MISCS_PL_UA, 11 +}; + +static struct block_defs block_rgfs_defs = { + "rgfs", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, - true, true, DBG_RESET_REG_MISCS_PL_HV, 14 + false, false, MAX_DBG_RESET_REGS, 0 +}; + +static struct block_defs block_tgfs_defs = { + "tgfs", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + +static struct block_defs block_ptld_defs = { + "ptld", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 +}; + +static struct block_defs block_ypld_defs = { + "ypld", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_aeu_defs = { - "misc_aeu", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc_aeu", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_bar0_map_defs = { - "bar0_map", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "bar0_map", {false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_ms_defs, &block_phy_pcie_defs, &block_led_defs, + &block_avs_wrap_defs, + &block_rgfs_defs, + &block_tgfs_defs, + &block_ptld_defs, + &block_ypld_defs, &block_misc_aeu_defs, &block_bar0_map_defs, }; @@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = { }; static struct grc_param_defs s_grc_param_defs[] = { - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ - {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ + {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ + {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ + {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ + {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ + {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ + {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ + {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ + {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */ - {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, + {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */ + {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ + {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ + {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ + {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ + {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */ + {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */ }; static struct rss_mem_defs s_rss_mem_defs[] = { { "rss_mem_cid", "rss_cid", 0, - {256, 256, 320}, - {32, 32, 32} }, + {256, 320}, + {32, 32} }, { "rss_mem_key_msb", "rss_key", 1024, - {128, 128, 208}, - {256, 256, 256} }, + {128, 208}, + {256, 256} }, { "rss_mem_key_lsb", "rss_key", 2048, - {128, 128, 208}, - {64, 64, 64} }, + {128, 208}, + {64, 64} }, { "rss_mem_info", "rss_info", 3072, - {128, 128, 208}, - {16, 16, 16} }, + {128, 208}, + {16, 16} }, { "rss_mem_ind", "rss_ind", 4096, - {(128 * 128), (128 * 128), (128 * 208)}, - {16, 16, 16} } + {(128 * 128), (128 * 208)}, + {16, 16} } }; static struct vfc_ram_defs s_vfc_ram_defs[] = { @@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = { static struct big_ram_defs s_big_ram_defs[] = { { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, - {4800, 4800, 5632} }, + {4800, 5632} }, { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, - {2880, 2880, 3680} }, + {2880, 3680} }, { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, - {1152, 1152, 1152} } + {1152, 1152} } }; static struct reset_reg_defs s_reset_regs_defs[] = { { MISCS_REG_RESET_PL_UA, 0x0, - {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ + {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ { MISCS_REG_RESET_PL_HV, 0x0, - {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ + {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ { MISCS_REG_RESET_PL_HV_2, 0x0, - {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ + {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ { MISC_REG_RESET_PL_UA, 0x0, - {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ + {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ { MISC_REG_RESET_PL_HV, 0x0, - {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ + {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, - {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ + {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, - {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ + {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ { MISC_REG_RESET_PL_PDA_VAUX, 0x2, - {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ + {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ }; static struct phy_defs s_phy_defs[] = { @@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf) return dword; } +/* Returns the value of the specified GRC param */ +static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return dev_data->grc.param_val[grc_param]; +} + +/* Initializes the GRC parameters */ +static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + if (!dev_data->grc.params_initialized) { + qed_dbg_grc_set_params_default(p_hwfn); + dev_data->grc.params_initialized = 1; + } +} + /* Initializes debug data for the specified device */ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, dev_data->mode_enable[MODE_K2] = 1; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB_B0; - dev_data->mode_enable[MODE_BB_B0] = 1; + dev_data->mode_enable[MODE_BB] = 1; } else { return DBG_STATUS_UNKNOWN_CHIP; } dev_data->platform_id = PLATFORM_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; + + /* Initializes the GRC parameters */ + qed_dbg_grc_init_params(p_hwfn); + dev_data->initialized = true; return DBG_STATUS_OK; } @@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, int printed_chars; u32 offset = 0; - if (dump) { + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { /* Read FW image/version from PRAM in a non-reset SEMI */ bool found = false; u8 storm_id; @@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, { char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; - if (dump) { + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { u32 global_section_offsize, global_section_addr, mfw_ver; u32 public_data_addr, global_section_offsize_addr; int printed_chars; @@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, bool dump, u8 num_specific_global_params) { + u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0; /* Find platform string and dump global params section header */ offset += qed_dump_section_hdr(dump_buf + offset, - dump, - "global_params", - NUM_COMMON_GLOBAL_PARAMS + - num_specific_global_params); + dump, "global_params", num_params); /* Store params */ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); @@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) } } -/* Returns the value of the specified GRC param */ -static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, - enum dbg_grc_params grc_param) -{ - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - - return dev_data->grc.param_val[grc_param]; -} - -/* Clear all GRC params */ -static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn) -{ - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 i; - - for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) - dev_data->grc.param_set_by_user[i] = 0; -} - -/* Assign default GRC param values */ -static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) -{ - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 i; - - for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) - if (!dev_data->grc.param_set_by_user[i]) - dev_data->grc.param_val[i] = - s_grc_param_defs[i].default_val[dev_data->chip_id]; -} - /* Returns true if the specified entity (indicated by GRC param) should be * included in the dump, false otherwise. */ @@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, } } -/* Returns the attention name offsets of the specified block */ +/* Returns the attention block data of the specified block */ static const struct dbg_attn_block_type_data * qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type) { @@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, * The following parameters are dumped: * - 'count' = num_dumped_entries * - 'split' = split_type - * - 'id'i = split_id (dumped only if split_id >= 0) + * - 'id' = split_id (dumped only if split_id >= 0) * - 'param_name' = param_val (user param, dumped only if param_name != NULL and * param_val != NULL) */ @@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, return offset; } -/* Dumps GRC register/memory. Returns the dumped size in dwords. */ +/* Dumps the GRC registers in the specified address range. + * Returns the dumped size in dwords. + */ +static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, + bool dump, u32 addr, u32 len) +{ + u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i; + + if (dump) + for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) + *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); + else + offset += len; + return offset; +} + +/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr, + u32 len) +{ + if (dump) + *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT); + return 1; +} + +/* Dumps GRC registers sequence. Returns the dumped size in dwords. */ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 addr, u32 len) { - u32 offset = 0, i; + u32 offset = 0; + + offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, dump, addr, len); + return offset; +} + +/* Dumps GRC registers sequence with skip cycle. + * Returns the dumped size in dwords. + */ +static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, + bool dump, u32 addr, u32 total_len, + u32 read_len, u32 skip_len) +{ + u32 offset = 0, reg_offset = 0; + offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len); if (dump) { - *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT); - for (i = 0; i < len; i++, addr++, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, - p_ptt, - DWORDS_TO_BYTES(addr)); + while (reg_offset < total_len) { + u32 curr_len = min_t(u32, + read_len, + total_len - reg_offset); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, curr_len); + reg_offset += curr_len; + addr += curr_len; + if (reg_offset < total_len) { + curr_len = min_t(u32, + skip_len, + total_len - skip_len); + memset(dump_buf + offset, 0, + DWORDS_TO_BYTES(curr_len)); + offset += curr_len; + reg_offset += curr_len; + addr += curr_len; + } + } } else { - offset += len + 1; + offset += total_len; } return offset; @@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *) &input_regs_arr.ptr[input_offset]; + u32 addr, len; + addr = GET_FIELD(reg->data, + DBG_DUMP_REG_ADDRESS); + len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); offset += - qed_grc_dump_reg_entry(p_hwfn, p_ptt, - dump_buf + offset, dump, - GET_FIELD(reg->data, - DBG_DUMP_REG_ADDRESS), - GET_FIELD(reg->data, - DBG_DUMP_REG_LENGTH)); + qed_grc_dump_reg_entry(p_hwfn, p_ptt, + dump_buf + offset, + dump, + addr, + len); (*num_dumped_reg_entries)++; } } else { @@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct chip_platform_defs *p_platform_defs; u32 offset = 0, input_offset = 0; - u8 port_id, pf_id; + struct chip_defs *p_chip_defs; + u8 port_id, pf_id, vf_id; + u16 fid; + + p_chip_defs = &s_chip_defs[dev_data->chip_id]; + p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id]; if (dump) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); @@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, switch (split_type_id) { case SPLIT_TYPE_NONE: - case SPLIT_TYPE_VF: offset += qed_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, @@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, param_val); break; case SPLIT_TYPE_PORT: - for (port_id = 0; - port_id < - s_chip_defs[dev_data->chip_id]. - per_platform[dev_data->platform_id].num_ports; + for (port_id = 0; port_id < p_platform_defs->num_ports; port_id++) { if (dump) qed_port_pretend(p_hwfn, p_ptt, @@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, break; case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; - pf_id < - s_chip_defs[dev_data->chip_id]. - per_platform[dev_data->platform_id].num_pfs; + for (pf_id = 0; pf_id < p_platform_defs->num_pfs; pf_id++) { - if (dump) - qed_fid_pretend(p_hwfn, p_ptt, pf_id); - offset += qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "pf", pf_id, param_name, - param_val); + u8 pfid_shift = + PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + + if (dump) { + fid = pf_id << pfid_shift; + qed_fid_pretend(p_hwfn, p_ptt, fid); + } + + offset += + qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + "pf", pf_id, + param_name, + param_val); + } + break; + case SPLIT_TYPE_VF: + for (vf_id = 0; vf_id < p_platform_defs->num_vfs; + vf_id++) { + u8 vfvalid_shift = + PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; + u8 vfid_shift = + PXP_PRETEND_CONCRETE_FID_VFID_SHIFT; + + if (dump) { + fid = BIT(vfvalid_shift) | + (vf_id << vfid_shift); + qed_fid_pretend(p_hwfn, p_ptt, fid); + } + + offset += + qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + "vf", vf_id, + param_name, + param_val); } break; default: @@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, } /* Pretend to original PF */ - if (dump) - qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + if (dump) { + fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_fid_pretend(p_hwfn, p_ptt, fid); + } + return offset; } @@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { + u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr); + offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, - BYTES_TO_DWORDS - (s_reset_regs_defs - [i].addr), 1); + addr, + 1); num_regs++; } } @@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, &attn_reg_arr[reg_idx]; u16 modes_buf_offset; bool eval_mode; + u32 addr; /* Check mode */ eval_mode = GET_FIELD(reg_data->mode.data, @@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) { /* Mode match - read and dump registers */ - offset += qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - reg_data->mask_address, - 1); - offset += qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - GET_FIELD(reg_data->data, - DBG_ATTN_REG_STS_ADDRESS), - 1); + addr = reg_data->mask_address; + offset += + qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1); + addr = GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS); + offset += + qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1); num_reg_entries += 2; } } @@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Write storm stall status registers */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + u32 addr; + if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] && dump) continue; + addr = + BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr + + SEM_FAST_REG_STALLED); offset += qed_grc_dump_reg_entry(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - BYTES_TO_DWORDS(s_storm_defs[storm_id]. - sem_fast_mem_addr + - SEM_FAST_REG_STALLED), - 1); + p_ptt, + dump_buf + offset, + dump, + addr, + 1); num_reg_entries++; } @@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, return offset; } +/* Dumps registers that can't be represented in the debug arrays */ +static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + u32 offset = 0, addr; + + offset += qed_grc_dump_regs_hdr(dump_buf, + dump, 2, "eng", -1, NULL, NULL); + + /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be + * skipped). + */ + addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO); + offset += qed_grc_dump_reg_entry_skip(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + RDIF_REG_DEBUG_ERROR_INFO_SIZE, + 7, + 1); + addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO); + offset += + qed_grc_dump_reg_entry_skip(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + TDIF_REG_DEBUG_ERROR_INFO_SIZE, + 7, + 1); + + return offset; +} + /* Dumps a GRC memory header (section and params). * The following parameters are dumped: * name - name is dumped only if it's not NULL. - * addr - byte_addr is dumped only if name is NULL. - * len - dword_len is always dumped. + * addr - addr is dumped only if name is NULL. + * len - len is always dumped. * width - bit_width is dumped if it's not zero. * packed - packed=1 is dumped if it's not false. * mem_group - mem_group is always dumped. @@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, const char *name, - u32 byte_addr, - u32 dword_len, + u32 addr, + u32 len, u32 bit_width, bool packed, const char *mem_group, @@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 offset = 0; char buf[64]; - if (!dword_len) + if (!len) DP_NOTICE(p_hwfn, "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); if (bit_width) @@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping %d registers from %s...\n", - dword_len, buf); + len, buf); } else { /* Dump address */ offset += qed_dump_num_param(dump_buf + offset, - dump, "addr", byte_addr); - if (dump && dword_len > 64) + dump, "addr", + DWORDS_TO_BYTES(addr)); + if (dump && len > 64) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", - dword_len, byte_addr); + len, (u32)DWORDS_TO_BYTES(addr)); } /* Dump len */ - offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len); + offset += qed_dump_num_param(dump_buf + offset, dump, "len", len); /* Dump bit width */ if (bit_width) @@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, const char *name, - u32 byte_addr, - u32 dword_len, + u32 addr, + u32 len, u32 bit_width, bool packed, const char *mem_group, @@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, name, - byte_addr, - dword_len, + addr, + len, bit_width, packed, mem_group, is_storm, storm_letter); - if (dump) { - u32 i; - - for (i = 0; i < dword_len; - i++, byte_addr += BYTES_IN_DWORD, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); - } else { - offset += dword_len; - } - + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, dump, addr, len); return offset; } @@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, if (qed_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id)) { - u32 mem_byte_addr = - DWORDS_TO_BYTES(GET_FIELD(mem->dword0, - DBG_DUMP_MEM_ADDRESS)); + u32 mem_addr = GET_FIELD(mem->dword0, + DBG_DUMP_MEM_ADDRESS); u32 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH); + enum dbg_grc_params grc_param; char storm_letter = 'a'; bool is_storm = false; /* Update memory length for CCFC/TCFC memories * according to number of LCIDs/LTIDs. */ - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) + if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { + if (mem_len % MAX_LCIDS != 0) { + DP_NOTICE(p_hwfn, + "Invalid CCFC connection memory size\n"); + return 0; + } + + grc_param = DBG_GRC_PARAM_NUM_LCIDS; mem_len = qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS) - * (mem_len / MAX_LCIDS); - else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) + grc_param) * + (mem_len / MAX_LCIDS); + } else if (mem_group_id == + MEM_GROUP_TASK_CFC_MEM) { + if (mem_len % MAX_LTIDS != 0) { + DP_NOTICE(p_hwfn, + "Invalid TCFC task memory size\n"); + return 0; + } + + grc_param = DBG_GRC_PARAM_NUM_LTIDS; mem_len = qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS) - * (mem_len / MAX_LTIDS); + grc_param) * + (mem_len / MAX_LTIDS); + } /* If memory is associated with Storm, update * Storm details. @@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, /* Dump memory */ offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, NULL, - mem_byte_addr, mem_len, 0, + mem_addr, mem_len, 0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter); @@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - if (qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id)) { - for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { - u32 addr = - s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STORM_REG_FILE + - DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id)); + struct storm_defs *storm = &s_storm_defs[storm_id]; - buf[strlen(buf) - 1] = '0' + set_id; - offset += qed_grc_dump_mem(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - buf, - addr, - IORS_PER_SET, - 32, - false, - "ior", - true, - s_storm_defs - [storm_id].letter); - } + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id)) + continue; + + for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { + u32 dwords, addr; + + dwords = storm->sem_fast_mem_addr + + SEM_FAST_REG_STORM_REG_FILE; + addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id); + buf[strlen(buf) - 1] = '0' + set_id; + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + buf, + addr, + IORS_PER_SET, + 32, + false, + "ior", + true, + storm->letter); } } @@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id]; u32 num_entries = rss_defs->num_entries[dev_data->chip_id]; u32 entry_width = rss_defs->entry_width[dev_data->chip_id]; - u32 total_size = (num_entries * entry_width) / 32; + u32 total_dwords = (num_entries * entry_width) / 32; + u32 size = RSS_REG_RSS_RAM_DATA_SIZE; bool packed = (entry_width == 16); - u32 addr = rss_defs->addr; - u32 i, j; + u32 rss_addr = rss_defs->addr; + u32 i, addr; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, - addr, - total_size, + 0, + total_dwords, entry_width, packed, rss_defs->type_name, false, 0); if (!dump) { - offset += total_size; + offset += total_dwords; continue; } /* Dump RSS data */ - for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) { - qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr); - for (j = 0; j < BYTES_IN_DWORD; j++, offset++) - *(dump_buf + offset) = - qed_rd(p_hwfn, p_ptt, - RSS_REG_RSS_RAM_DATA + - DWORDS_TO_BYTES(j)); + for (i = 0; i < total_dwords; + i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) { + addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + + offset, + dump, + addr, + size); } } @@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u8 big_ram_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 total_blocks, ram_size, offset = 0, i; char mem_name[12] = "???_BIG_RAM"; char type_name[8] = "???_RAM"; - u32 ram_size, total_blocks; - u32 offset = 0, i, j; + struct big_ram_defs *big_ram; - total_blocks = - s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id]; + big_ram = &s_big_ram_defs[big_ram_id]; + total_blocks = big_ram->num_of_blocks[dev_data->chip_id]; ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS; - strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name, - strlen(s_big_ram_defs[big_ram_id].instance_name)); - strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name, - strlen(s_big_ram_defs[big_ram_id].instance_name)); + strncpy(type_name, big_ram->instance_name, + strlen(big_ram->instance_name)); + strncpy(mem_name, big_ram->instance_name, + strlen(big_ram->instance_name)); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, @@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, /* Read and dump Big RAM data */ for (i = 0; i < total_blocks / 2; i++) { - qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr, - i); - for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, - s_big_ram_defs[big_ram_id]. - data_reg_addr + - DWORDS_TO_BYTES(j)); + u32 addr, len; + + qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i); + addr = BYTES_TO_DWORDS(big_ram->data_reg_addr); + len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS; + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len); } return offset; @@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { bool block_enable[MAX_BLOCK_ID] = { 0 }; + u32 offset = 0, addr; bool halted = false; - u32 offset = 0; /* Halt MCP */ - if (dump) { + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { halted = !qed_mcp_halt(p_hwfn, p_ptt); if (!halted) DP_NOTICE(p_hwfn, "MCP halt failed!\n"); @@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, NULL, - MCP_REG_SCRATCH, + BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, 0, false, "MCP", false, 0); @@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, NULL, - MCP_REG_CPU_REG_FILE, + BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, 0, false, "MCP", false, 0); @@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP"); + addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, - BYTES_TO_DWORDS - (MISC_REG_SHARED_MEM_ADDR), 1); + addr, + 1); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, { u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 offset = 0, block_id, line_id, addr, i; + u32 offset = 0, block_id, line_id; struct block_defs *p_block_defs; if (dump) { @@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, if (dump && !dev_data->block_in_reset[block_id]) { u8 dbg_client_id = p_block_defs->dbg_client_id[dev_data->chip_id]; + u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); + u32 len = STATIC_DEBUG_LINE_DWORDS; /* Enable block's client */ qed_bus_enable_clients(p_hwfn, p_ptt, @@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, 0xf, 0, 0, 0); /* Read debug line info */ - for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA; - i < STATIC_DEBUG_LINE_DWORDS; - i++, offset++, addr += BYTES_IN_DWORD) - dump_buf[offset] = qed_rd(p_hwfn, p_ptt, - addr); + offset += + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len); } /* Disable block's client and debug output */ @@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, u8 i, port_mode = 0; u32 offset = 0; - /* Check if emulation platform */ *num_dumped_dwords = 0; - /* Fill GRC parameters that were not set by the user with their default - * value. - */ - qed_dbg_grc_set_params_default(p_hwfn); - /* Find port mode */ if (dump) { switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { @@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, } /* Disable all parities using MFW command */ - if (dump) { + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); if (!parities_masked) { + DP_NOTICE(p_hwfn, + "Failed to mask parities using MFW\n"); if (qed_grc_get_param (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE)) return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY; - else - DP_NOTICE(p_hwfn, - "Failed to mask parities using MFW\n"); } } @@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset, dump, block_enable, NULL, NULL); + + /* Dump special registers */ + offset += qed_grc_dump_special_regs(p_hwfn, + p_ptt, + dump_buf + offset, dump); } /* Dump memories */ @@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, } if (mode_match) { - u32 grc_addr = - DWORDS_TO_BYTES(GET_FIELD(reg->data, - DBG_IDLE_CHK_INFO_REG_ADDRESS)); + u32 addr = + GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_ADDRESS); /* Write register header */ struct dbg_idle_chk_result_reg_hdr *reg_hdr = @@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, memset(reg_hdr, 0, sizeof(*reg_hdr)); reg_hdr->size = reg->size; SET_FIELD(reg_hdr->data, - DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, - rule->num_cond_regs + reg_id); + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, + rule->num_cond_regs + reg_id); /* Write register values */ - for (i = 0; i < reg->size; - i++, offset++, grc_addr += 4) - dump_buf[offset] = - qed_rd(p_hwfn, p_ptt, grc_addr); - } + offset += + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + reg->size); } + } } return offset; @@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE]; - u32 i, j, offset = 0; + u32 i, offset = 0; u16 entry_id; u8 reg_id; @@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (!check_rule && dump) continue; + if (!dump) { + u32 entry_dump_size = + qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + 0, + NULL); + + offset += num_reg_entries * entry_dump_size; + (*num_failing_rules) += num_reg_entries; + continue; + } + /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { /* Read current entry of all condition registers */ - if (dump) { - u32 next_reg_offset = 0; - - for (reg_id = 0; - reg_id < rule->num_cond_regs; - reg_id++) { - const struct dbg_idle_chk_cond_reg - *reg = &cond_regs[reg_id]; - - /* Find GRC address (if it's a memory, - * the address of the specific entry is - * calculated). - */ - u32 grc_addr = - DWORDS_TO_BYTES( - GET_FIELD(reg->data, - DBG_IDLE_CHK_COND_REG_ADDRESS)); - - if (reg->num_entries > 1 || - reg->start_entry > 0) { - u32 padded_entry_size = - reg->entry_size > 1 ? - roundup_pow_of_two - (reg->entry_size) : 1; - - grc_addr += - DWORDS_TO_BYTES( - (reg->start_entry + - entry_id) - * padded_entry_size); - } + u32 next_reg_offset = 0; - /* Read registers */ - if (next_reg_offset + reg->entry_size >= - IDLE_CHK_MAX_ENTRIES_SIZE) { - DP_NOTICE(p_hwfn, - "idle check registers entry is too large\n"); - return 0; - } + for (reg_id = 0; reg_id < rule->num_cond_regs; + reg_id++) { + const struct dbg_idle_chk_cond_reg *reg = + &cond_regs[reg_id]; - for (j = 0; j < reg->entry_size; - j++, next_reg_offset++, - grc_addr += 4) - cond_reg_values[next_reg_offset] = - qed_rd(p_hwfn, p_ptt, grc_addr); + /* Find GRC address (if it's a memory,the + * address of the specific entry is calculated). + */ + u32 addr = + GET_FIELD(reg->data, + DBG_IDLE_CHK_COND_REG_ADDRESS); + + if (reg->num_entries > 1 || + reg->start_entry > 0) { + u32 padded_entry_size = + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) : + 1; + + addr += (reg->start_entry + entry_id) * + padded_entry_size; } + + /* Read registers */ + if (next_reg_offset + reg->entry_size >= + IDLE_CHK_MAX_ENTRIES_SIZE) { + DP_NOTICE(p_hwfn, + "idle check registers entry is too large\n"); + return 0; + } + + next_reg_offset += + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + cond_reg_values + + next_reg_offset, + dump, addr, + reg->entry_size); } /* Call rule's condition function - a return value of * true indicates failure. */ if ((*cond_arr[rule->cond_id])(cond_reg_values, - imm_values) || !dump) { + imm_values)) { offset += - qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - rule->rule_id, - rule, - entry_id, - cond_reg_values); + qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rule->rule_id, + rule, + entry_id, + cond_reg_values); (*num_failing_rules)++; break; } @@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, struct mcp_file_att file_att; /* Call NVRAM get file command */ - if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, &ret_mcp_resp, &ret_mcp_param, - &ret_txn_size, (u32 *)&file_att) != 0) - return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, + (u32 *)&file_att); /* Check response */ - if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + if (nvm_result || + (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; /* Update return values */ @@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, u32 running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; - enum dbg_status status; u32 nvram_image_type; *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); @@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; - status = qed_find_nvram_image(p_hwfn, - p_ptt, - nvram_image_type, - trace_meta_offset_bytes, - trace_meta_size_bytes); - - return status; -} - -/* Reads the MCP Trace data from the specified GRC address into the specified - * buffer. - */ -static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 grc_addr, u32 size_in_dwords, u32 *buf) -{ - u32 i; - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n", - size_in_dwords, grc_addr); - for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD) - buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr); + return qed_find_nvram_image(p_hwfn, + p_ptt, + nvram_image_type, + trace_meta_offset_bytes, + trace_meta_size_bytes); } /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified @@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; - u32 trace_meta_size_dwords, running_bundle_id, offset = 0; - u32 trace_meta_offset_bytes, trace_meta_size_bytes; + u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0; + u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0; enum dbg_status status; + bool mcp_access; int halted = 0; + mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); + *num_dumped_dwords = 0; /* Get trace data info */ @@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, * consistent if halt fails, MCP trace is taken anyway, with a small * risk that it may be corrupt. */ - if (dump) { + if (dump && mcp_access) { halted = !qed_mcp_halt(p_hwfn, p_ptt); if (!halted) DP_NOTICE(p_hwfn, "MCP halt failed!\n"); @@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump, "size", trace_data_size_dwords); /* Read trace data from scratchpad into dump buffer */ - if (dump) - qed_mcp_trace_read_data(p_hwfn, - p_ptt, - trace_data_grc_addr, - trace_data_size_dwords, - dump_buf + offset); - offset += trace_data_size_dwords; + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + BYTES_TO_DWORDS(trace_data_grc_addr), + trace_data_size_dwords); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0) @@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump, "mcp_trace_meta", 1); /* Read trace meta info */ - status = qed_mcp_trace_get_meta_info(p_hwfn, - p_ptt, - trace_data_size_bytes, - &running_bundle_id, - &trace_meta_offset_bytes, - &trace_meta_size_bytes); - if (status != DBG_STATUS_OK) - return status; + if (mcp_access) { + status = qed_mcp_trace_get_meta_info(p_hwfn, + p_ptt, + trace_data_size_bytes, + &running_bundle_id, + &trace_meta_offset_bytes, + &trace_meta_size_bytes); + if (status == DBG_STATUS_OK) + trace_meta_size_dwords = + BYTES_TO_DWORDS(trace_meta_size_bytes); + } - /* Dump trace meta size param (trace_meta_size_bytes is always - * dword-aligned). - */ - trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes); - offset += qed_dump_num_param(dump_buf + offset, dump, "size", - trace_meta_size_dwords); + /* Dump trace meta size param */ + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", trace_meta_size_dwords); /* Read trace meta image into dump buffer */ - if (dump) { + if (dump && trace_meta_size_dwords) status = qed_mcp_trace_read_meta(p_hwfn, - p_ptt, - trace_meta_offset_bytes, - trace_meta_size_bytes, - dump_buf + offset); - if (status != DBG_STATUS_OK) - return status; - } - - offset += trace_meta_size_dwords; + p_ptt, + trace_meta_offset_bytes, + trace_meta_size_bytes, + dump_buf + offset); + if (status == DBG_STATUS_OK) + offset += trace_meta_size_dwords; *num_dumped_dwords = offset; - return DBG_STATUS_OK; + /* If no mcp access, indicate that the dump doesn't contain the meta + * data from NVRAM. + */ + return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED; } /* Dump GRC FIFO */ @@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct fw_asserts_ram_section *asserts; char storm_letter_str[2] = "?"; struct fw_info fw_info; - u32 offset = 0, i; + u32 offset = 0; u8 storm_id; /* Dump global params */ @@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts"); for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, - last_list_idx, element_addr; + u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx; + u32 last_list_idx, addr; if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id]) continue; @@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, /* Read FW info for the current Storm */ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + asserts = &fw_info.fw_asserts_section; + /* Dump FW Asserts section header and params */ storm_letter_str[0] = s_storm_defs[storm_id].letter; offset += qed_dump_section_hdr(dump_buf + offset, dump, @@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str); offset += qed_dump_num_param(dump_buf + offset, dump, "size", - fw_info.fw_asserts_section. - list_element_dword_size); + asserts->list_element_dword_size); if (!dump) { - offset += fw_info.fw_asserts_section. - list_element_dword_size; + offset += asserts->list_element_dword_size; continue; } @@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, fw_asserts_section_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + - RAM_LINES_TO_BYTES(fw_info.fw_asserts_section. - section_ram_line_offset); + RAM_LINES_TO_BYTES(asserts->section_ram_line_offset); next_list_idx_addr = fw_asserts_section_addr + - DWORDS_TO_BYTES(fw_info.fw_asserts_section. - list_next_index_dword_offset); + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); last_list_idx = (next_list_idx > 0 ? next_list_idx - : fw_info.fw_asserts_section.list_num_elements) - - 1; - element_addr = - fw_asserts_section_addr + - DWORDS_TO_BYTES(fw_info.fw_asserts_section. - list_dword_offset) + - last_list_idx * - DWORDS_TO_BYTES(fw_info.fw_asserts_section. - list_element_dword_size); - for (i = 0; - i < fw_info.fw_asserts_section.list_element_dword_size; - i++, offset++, element_addr += BYTES_IN_DWORD) - dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr); + : asserts->list_num_elements) - 1; + addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + + asserts->list_dword_offset + + last_list_idx * asserts->list_element_dword_size; + offset += + qed_grc_dump_addr_range(p_hwfn, p_ptt, + dump_buf + offset, + dump, addr, + asserts->list_element_dword_size); } /* Dump last section */ @@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) { /* Convert binary data to debug arrays */ - u32 num_of_buffers = *(u32 *)bin_ptr; - struct bin_buffer_hdr *buf_array; + struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; - buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); - - for (buf_id = 0; buf_id < num_of_buffers; buf_id++) { + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset); s_dbg_arrays[buf_id].size_in_dwords = @@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +/* Assign default GRC param values */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 i; + + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + dev_data->grc.param_val[i] = + s_grc_param_defs[i].default_val[dev_data->chip_id]; +} + enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) @@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, /* GRC Dump */ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); - /* Clear all GRC params */ - qed_dbg_grc_clear_params(p_hwfn); + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + return status; } @@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, /* Idle Check Dump */ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + return DBG_STATUS_OK; } @@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 needed_buf_size_in_dwords; enum dbg_status status; - status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, + /* validate buffer size */ + status = + qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); - if (status != DBG_STATUS_OK) + if (status != DBG_STATUS_OK && + status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return status; + if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; @@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, qed_update_blocks_reset_state(p_hwfn, p_ptt); /* Perform dump */ - return qed_mcp_trace_dump(p_hwfn, - p_ptt, dump_buf, true, num_dumped_dwords); + status = qed_mcp_trace_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; } enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, @@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); - return qed_reg_fifo_dump(p_hwfn, - p_ptt, dump_buf, true, num_dumped_dwords); + + status = qed_reg_fifo_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; } enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, @@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); - return qed_igu_fifo_dump(p_hwfn, - p_ptt, dump_buf, true, num_dumped_dwords); + + status = qed_igu_fifo_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; } enum dbg_status @@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); - return qed_protection_override_dump(p_hwfn, - p_ptt, - dump_buf, true, num_dumped_dwords); + + status = qed_protection_override_dump(p_hwfn, + p_ptt, + dump_buf, + true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; } enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, @@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN]; enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) { /* Convert binary data to debug arrays */ - u32 num_of_buffers = *(u32 *)bin_ptr; - struct bin_buffer_hdr *buf_array; + struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; - buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); - - for (buf_id = 0; buf_id < num_of_buffers; buf_id++) { + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset); s_dbg_arrays[buf_id].size_in_dwords = @@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", + "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", elements[i].data, - GET_FIELD(elements[i].data, + (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ADDRESS) * REG_FIFO_ELEMENT_ADDR_FACTOR, s_access_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ACCESS)], - GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_PF), vf_str, - GET_FIELD(elements[i].data, + (u32)GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PF), vf_str, + (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PORT), s_privilege_strs[GET_FIELD(elements[i]. data, @@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n", + "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n", i, address, - GET_FIELD(elements[i].data, + (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE), - GET_FIELD(elements[i].data, + (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_READ), - GET_FIELD(elements[i].data, + (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_WRITE), s_protection_strs[GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)], @@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, */ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt, &buf_size_dwords); - if (rc != DBG_STATUS_OK) + if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return rc; feature->buf_size = buf_size_dwords * sizeof(u32); feature->dump_buf = vmalloc(feature->buf_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e518f914eab13f52d8f82a8e1a29a5a80a2f2b24..5f31140d0b77d3ce0ffb058526e413e2833bead5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -75,7 +75,8 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum BAR_ID bar_id) { u32 bar_reg = (bar_id == BAR_ID_0 ? PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); @@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) if (IS_VF(p_hwfn->cdev)) return 1 << 17; - val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + val = qed_rd(p_hwfn, p_ptt, bar_reg); if (val) return 1 << (val + 15); @@ -182,199 +183,573 @@ void qed_resc_free(struct qed_dev *cdev) } qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); - qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); + qed_dcbx_info_free(p_hwfn); } } -static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) +/******************** QM initialization *******************/ +#define ACTIVE_TCS_BMAP 0x9f +#define ACTIVE_TCS_BMAP_4PORT_K2 0xf + +/* determines the physical queue flags for a given PF. */ +static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) { - u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; - struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct init_qm_port_params *p_qm_port; - bool init_rdma_offload_pq = false; - bool init_pure_ack_pq = false; - bool init_ooo_pq = false; - u16 num_pqs, multi_cos_tcs = 1; - u8 pf_wfq = qm_info->pf_wfq; - u32 pf_rl = qm_info->pf_rl; - u16 num_pf_rls = 0; - u16 num_vfs = 0; - -#ifdef CONFIG_QED_SRIOV - if (p_hwfn->cdev->p_iov_info) - num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; -#endif - memset(qm_info, 0, sizeof(*qm_info)); + u32 flags; - num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ - num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); + /* common flags */ + flags = PQ_FLAGS_LB; - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { - num_pqs++; /* for RoCE queue */ - init_rdma_offload_pq = true; - /* we subtract num_vfs because each require a rate limiter, - * and one default rate limiter - */ - if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) - num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1; + /* feature flags */ + if (IS_QED_SRIOV(p_hwfn->cdev)) + flags |= PQ_FLAGS_VFS; - num_pqs += num_pf_rls; - qm_info->num_pf_rls = (u8) num_pf_rls; + /* protocol flags */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + flags |= PQ_FLAGS_MCOS; + break; + case QED_PCI_FCOE: + flags |= PQ_FLAGS_OFLD; + break; + case QED_PCI_ISCSI: + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + break; + case QED_PCI_ETH_ROCE: + flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + break; + default: + DP_ERR(p_hwfn, + "unknown personality %d\n", p_hwfn->hw_info.personality); + return 0; } - if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { - num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */ - init_pure_ack_pq = true; - init_ooo_pq = true; - } + return flags; +} - /* Sanity checking that setup requires legal number of resources */ - if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { - DP_ERR(p_hwfn, - "Need too many Physical queues - 0x%04x when only %04x are available\n", - num_pqs, RESC_NUM(p_hwfn, QED_PQ)); - return -EINVAL; - } +/* Getters for resource amounts necessary for qm initialization */ +u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->hw_info.num_hw_tc; +} - /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. - */ - qm_info->qm_pq_params = kcalloc(num_pqs, - sizeof(struct init_qm_pq_params), - b_sleepable ? GFP_KERNEL : GFP_ATOMIC); - if (!qm_info->qm_pq_params) - goto alloc_err; +u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) +{ + return IS_QED_SRIOV(p_hwfn->cdev) ? + p_hwfn->cdev->p_iov_info->total_vfs : 0; +} - qm_info->qm_vport_params = kcalloc(num_vports, - sizeof(struct init_qm_vport_params), - b_sleepable ? GFP_KERNEL - : GFP_ATOMIC); - if (!qm_info->qm_vport_params) - goto alloc_err; +#define NUM_DEFAULT_RLS 1 - qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, - sizeof(struct init_qm_port_params), - b_sleepable ? GFP_KERNEL - : GFP_ATOMIC); - if (!qm_info->qm_port_params) - goto alloc_err; +u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) +{ + u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); - qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), - b_sleepable ? GFP_KERNEL : GFP_ATOMIC); - if (!qm_info->wfq_data) - goto alloc_err; + /* num RLs can't exceed resource amount of rls or vports */ + num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); - vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); + /* Make sure after we reserve there's something left */ + if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) + return 0; - /* First init rate limited queues */ - for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) { - qm_info->qm_pq_params[curr_queue].vport_id = vport_id++; - qm_info->qm_pq_params[curr_queue].tc_id = - p_hwfn->hw_info.non_offload_tc; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - qm_info->qm_pq_params[curr_queue].rl_valid = 1; - } + /* subtract rls necessary for VFs and one default one for the PF */ + num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; - /* First init per-TC PQs */ - for (i = 0; i < multi_cos_tcs; i++) { - struct init_qm_pq_params *params = - &qm_info->qm_pq_params[curr_queue++]; + return num_pf_rls; +} - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || - p_hwfn->hw_info.personality == QED_PCI_ETH) { - params->vport_id = vport_id; - params->tc_id = p_hwfn->hw_info.non_offload_tc; - params->wrr_group = 1; - } else { - params->vport_id = vport_id; - params->tc_id = p_hwfn->hw_info.offload_tc; - params->wrr_group = 1; - } - } +u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + /* all pqs share the same vport, except for vfs and pf_rl pqs */ + return (!!(PQ_FLAGS_RLS & pq_flags)) * + qed_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_VFS & pq_flags)) * + qed_init_qm_get_num_vfs(p_hwfn) + 1; +} + +/* calc amount of PQs according to the requested flags */ +u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + return (!!(PQ_FLAGS_RLS & pq_flags)) * + qed_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_MCOS & pq_flags)) * + qed_init_qm_get_num_tcs(p_hwfn) + + (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + + (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) + + (!!(PQ_FLAGS_LLT & pq_flags)) + + (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); +} + +/* initialize the top level QM params */ +static void qed_init_qm_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool four_port; + + /* pq and vport bases for this PF */ + qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + + /* rate limiting and weighted fair queueing are always enabled */ + qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; + + /* TC config is different for AH 4 port */ + four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; + + /* in AH 4 port we have fewer TCs per port */ + qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : + NUM_OF_PHYS_TCS; + + /* unless MFW indicated otherwise, ooo_tc == 3 for + * AH 4-port and 4 otherwise. + */ + if (!qm_info->ooo_tc) + qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : + DCBX_TCP_OOO_TC; +} - /* Then init pure-LB PQ */ - qm_info->pure_lb_pq = curr_queue; - qm_info->qm_pq_params[curr_queue].vport_id = - (u8) RESC_START(p_hwfn, QED_VPORT); - qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - curr_queue++; - - qm_info->offload_pq = 0; - if (init_rdma_offload_pq) { - qm_info->offload_pq = curr_queue; - qm_info->qm_pq_params[curr_queue].vport_id = vport_id; - qm_info->qm_pq_params[curr_queue].tc_id = - p_hwfn->hw_info.offload_tc; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - curr_queue++; - } - - if (init_pure_ack_pq) { - qm_info->pure_ack_pq = curr_queue; - qm_info->qm_pq_params[curr_queue].vport_id = vport_id; - qm_info->qm_pq_params[curr_queue].tc_id = - p_hwfn->hw_info.offload_tc; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - curr_queue++; - } - - if (init_ooo_pq) { - qm_info->ooo_pq = curr_queue; - qm_info->qm_pq_params[curr_queue].vport_id = vport_id; - qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - curr_queue++; - } - - /* Then init per-VF PQs */ - vf_offset = curr_queue; - for (i = 0; i < num_vfs; i++) { - /* First vport is used by the PF */ - qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1; - qm_info->qm_pq_params[curr_queue].tc_id = - p_hwfn->hw_info.non_offload_tc; - qm_info->qm_pq_params[curr_queue].wrr_group = 1; - qm_info->qm_pq_params[curr_queue].rl_valid = 1; - curr_queue++; - } - - qm_info->vf_queues_offset = vf_offset; - qm_info->num_pqs = num_pqs; - qm_info->num_vports = num_vports; +/* initialize qm vport params */ +static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 i; + + /* all vports participate in weighted fair queueing */ + for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) + qm_info->qm_vport_params[i].vport_wfq = 1; +} +/* initialize qm port params */ +static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) +{ /* Initialize qm port parameters */ - num_ports = p_hwfn->cdev->num_ports_in_engines; + u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; + + /* indicate how ooo and high pri traffic is dealt with */ + active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? + ACTIVE_TCS_BMAP_4PORT_K2 : + ACTIVE_TCS_BMAP; + for (i = 0; i < num_ports; i++) { - p_qm_port = &qm_info->qm_port_params[i]; + struct init_qm_port_params *p_qm_port = + &p_hwfn->qm_info.qm_port_params[i]; + p_qm_port->active = 1; - if (num_ports == 4) - p_qm_port->active_phys_tcs = 0x7; - else - p_qm_port->active_phys_tcs = 0x9f; + p_qm_port->active_phys_tcs = active_phys_tcs; p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; } +} - qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS; +/* Reset the params which must be reset for qm init. QM init may be called as + * a result of flows other than driver load (e.g. dcbx renegotiation). Other + * params may be affected by the init but would simply recalculate to the same + * values. The allocations made for QM init, ports, vports, pqs and vfqs are not + * affected as these amounts stay the same. + */ +static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; - qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->num_pqs = 0; + qm_info->num_vports = 0; + qm_info->num_pf_rls = 0; + qm_info->num_vf_pqs = 0; + qm_info->first_vf_pq = 0; + qm_info->first_mcos_pq = 0; + qm_info->first_rl_pq = 0; +} + +static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + qm_info->num_vports++; + + if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", + qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); +} + +/* initialize a single pq and manage qm_info resources accounting. + * The pq_init_flags param determines whether the PQ is rate limited + * (for VF or PF) and whether a new vport is allocated to the pq or not + * (i.e. vport will be shared). + */ + +/* flags for pq init */ +#define PQ_INIT_SHARE_VPORT (1 << 0) +#define PQ_INIT_PF_RL (1 << 1) +#define PQ_INIT_VF_RL (1 << 2) + +/* defines for pq init */ +#define PQ_INIT_DEFAULT_WRR_GROUP 1 +#define PQ_INIT_DEFAULT_TC 0 +#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc) + +static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, + struct qed_qm_info *qm_info, + u8 tc, u32 pq_init_flags) +{ + u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); + + if (pq_idx > max_pq) + DP_ERR(p_hwfn, + "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); + + /* init pq params */ + qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + + qm_info->num_vports; + qm_info->qm_pq_params[pq_idx].tc_id = tc; + qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; + qm_info->qm_pq_params[pq_idx].rl_valid = + (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); + + /* qm params accounting */ + qm_info->num_pqs++; + if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) + qm_info->num_vports++; + + if (pq_init_flags & PQ_INIT_PF_RL) + qm_info->num_pf_rls++; + + if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", + qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); + + if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) + DP_ERR(p_hwfn, + "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", + qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); +} + +/* get pq index according to PQ_FLAGS */ +static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, + u32 pq_flags) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + /* Can't have multiple flags set here */ + if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) + goto err; + + switch (pq_flags) { + case PQ_FLAGS_RLS: + return &qm_info->first_rl_pq; + case PQ_FLAGS_MCOS: + return &qm_info->first_mcos_pq; + case PQ_FLAGS_LB: + return &qm_info->pure_lb_pq; + case PQ_FLAGS_OOO: + return &qm_info->ooo_pq; + case PQ_FLAGS_ACK: + return &qm_info->pure_ack_pq; + case PQ_FLAGS_OFLD: + return &qm_info->offload_pq; + case PQ_FLAGS_LLT: + return &qm_info->low_latency_pq; + case PQ_FLAGS_VFS: + return &qm_info->first_vf_pq; + default: + goto err; + } + +err: + DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); + return NULL; +} + +/* save pq index in qm info */ +static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, + u32 pq_flags, u16 pq_val) +{ + u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; +} + +/* get tx pq index, with the PQ TX base already set (ready for context init) */ +u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) +{ + u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + return *base_pq_idx + CM_TX_PQ_BASE; +} + +u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) +{ + u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); + + if (tc > max_tc) + DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); + + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; +} + +u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) +{ + u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); + + if (vf > max_vf) + DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); + + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; +} + +u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) +{ + u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); + + if (rl > max_rl) + DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); + + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; +} + +/* Functions for creating specific types of pqs */ +static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 tc_idx; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); + for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) + qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); qm_info->num_vf_pqs = num_vfs; - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) + qed_init_qm_pq(p_hwfn, + qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); +} - for (i = 0; i < qm_info->num_vports; i++) - qm_info->qm_vport_params[i].vport_wfq = 1; +static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) +{ + u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); + struct qed_qm_info *qm_info = &p_hwfn->qm_info; - qm_info->vport_rl_en = 1; - qm_info->vport_wfq_en = 1; - qm_info->pf_rl = pf_rl; - qm_info->pf_wfq = pf_wfq; + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); + for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) + qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL); +} + +static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) +{ + /* rate limited pqs, must come first (FW assumption) */ + qed_init_qm_rl_pqs(p_hwfn); + + /* pqs for multi cos */ + qed_init_qm_mcos_pqs(p_hwfn); + + /* pure loopback pq */ + qed_init_qm_lb_pq(p_hwfn); + + /* out of order pq */ + qed_init_qm_ooo_pq(p_hwfn); + + /* pure ack pq */ + qed_init_qm_pure_ack_pq(p_hwfn); + + /* pq for offloaded protocol */ + qed_init_qm_offload_pq(p_hwfn); + + /* low latency pq */ + qed_init_qm_low_latency_pq(p_hwfn); + + /* done sharing vports */ + qed_init_qm_advance_vport(p_hwfn); + + /* pqs for vfs */ + qed_init_qm_vf_pqs(p_hwfn); +} + +/* compare values of getters against resources amounts */ +static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) +{ + if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { + DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); + return -EINVAL; + } + + if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) { + DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); + return -EINVAL; + } return 0; +} -alloc_err: - qed_qm_info_free(p_hwfn); - return -ENOMEM; +static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct init_qm_vport_params *vport; + struct init_qm_port_params *port; + struct init_qm_pq_params *pq; + int i, tc; + + /* top level params */ + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n", + qm_info->start_pq, + qm_info->start_vport, + qm_info->pure_lb_pq, + qm_info->offload_pq, qm_info->pure_ack_pq); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", + qm_info->ooo_pq, + qm_info->first_vf_pq, + qm_info->num_pqs, + qm_info->num_vf_pqs, + qm_info->num_vports, qm_info->max_phys_tcs_per_port); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", + qm_info->pf_rl_en, + qm_info->pf_wfq_en, + qm_info->vport_rl_en, + qm_info->vport_wfq_en, + qm_info->pf_wfq, + qm_info->pf_rl, + qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); + + /* port table */ + for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { + port = &(qm_info->qm_port_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", + i, + port->active, + port->active_phys_tcs, + port->num_pbf_cmd_lines, + port->num_btb_blocks, port->reserved); + } + + /* vport table */ + for (i = 0; i < qm_info->num_vports; i++) { + vport = &(qm_info->qm_vport_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", + qm_info->start_vport + i, + vport->vport_rl, vport->vport_wfq); + for (tc = 0; tc < NUM_OF_TCS; tc++) + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "%d ", vport->first_tx_pq_id[tc]); + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); + } + + /* pq table */ + for (i = 0; i < qm_info->num_pqs; i++) { + pq = &(qm_info->qm_pq_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + qm_info->start_pq + i, + pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid); + } +} + +static void qed_init_qm_info(struct qed_hwfn *p_hwfn) +{ + /* reset params required for init run */ + qed_init_qm_reset_params(p_hwfn); + + /* init QM top level params */ + qed_init_qm_params(p_hwfn); + + /* init QM port params */ + qed_init_qm_port_params(p_hwfn); + + /* init QM vport params */ + qed_init_qm_vport_params(p_hwfn); + + /* init QM physical queue params */ + qed_init_qm_pq_params(p_hwfn); + + /* display all that init */ + qed_dp_init_qm_params(p_hwfn); } /* This function reconfigures the QM pf on the fly. @@ -391,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) bool b_rc; int rc; - /* qm_info is allocated in qed_init_qm_info() which is already called - * from qed_resc_alloc() or previous call of qed_qm_reconf(). - * The allocated size may change each init, so we free it before next - * allocation. - */ - qed_qm_info_free(p_hwfn); - /* initialize qed's qm data structure */ - rc = qed_init_qm_info(p_hwfn, false); - if (rc) - return rc; + qed_init_qm_info(p_hwfn); /* stop PF's qm queues */ spin_lock_bh(&qm_lock); @@ -415,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - qed_qm_init_pf(p_hwfn); + qed_qm_init_pf(p_hwfn, p_ptt); /* activate init tool on runtime array */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -434,6 +800,47 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } +static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + int rc; + + rc = qed_init_qm_sanity(p_hwfn); + if (rc) + goto alloc_err; + + qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * + qed_init_qm_get_num_pqs(p_hwfn), + GFP_KERNEL); + if (!qm_info->qm_pq_params) + goto alloc_err; + + qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * + qed_init_qm_get_num_vports(p_hwfn), + GFP_KERNEL); + if (!qm_info->qm_vport_params) + goto alloc_err; + + qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * + p_hwfn->cdev->num_ports_in_engines, + GFP_KERNEL); + if (!qm_info->qm_port_params) + goto alloc_err; + + qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) * + qed_init_qm_get_num_vports(p_hwfn), + GFP_KERNEL); + if (!qm_info->wfq_data) + goto alloc_err; + + return 0; + +alloc_err: + DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); + qed_qm_info_free(p_hwfn); + return -ENOMEM; +} + int qed_resc_alloc(struct qed_dev *cdev) { struct qed_iscsi_info *p_iscsi_info; @@ -442,8 +849,10 @@ int qed_resc_alloc(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; #endif + u32 rdma_tasks, excess_tasks; struct qed_consq *p_consq; struct qed_eq *p_eq; + u32 line_count; int i, rc = 0; if (IS_VF(cdev)) @@ -465,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev) /* Set the HW cid/tid numbers (in the contest manager) * Must be done prior to any further computations. */ - rc = qed_cxt_set_pf_params(p_hwfn); + rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); if (rc) goto alloc_err; - /* Prepare and process QM requirements */ - rc = qed_init_qm_info(p_hwfn, true); + rc = qed_alloc_qm_data(p_hwfn); if (rc) goto alloc_err; + /* init qm info */ + qed_init_qm_info(p_hwfn); + /* Compute the ILT client partition */ - rc = qed_cxt_cfg_ilt_compute(p_hwfn); - if (rc) - goto alloc_err; + rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); + if (rc) { + DP_NOTICE(p_hwfn, + "too many ILT lines; re-computing with less lines\n"); + /* In case there are not enough ILT lines we reduce the + * number of RDMA tasks and re-compute. + */ + excess_tasks = + qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); + if (!excess_tasks) + goto alloc_err; + + rdma_tasks = RDMA_MAX_TIDS - excess_tasks; + rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); + if (rc) + goto alloc_err; + + rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); + if (rc) { + DP_ERR(p_hwfn, + "failed ILT compute. Requested too many lines: %u\n", + line_count); + + goto alloc_err; + } + } /* CID map / ILT shadow table / T2 * The talbes sizes are determined by the computations above @@ -674,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, return rc; } -static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) +static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) { int hw_mode = 0; - hw_mode = (1 << MODE_BB_B0); + if (QED_IS_BB_B0(p_hwfn->cdev)) { + hw_mode |= 1 << MODE_BB; + } else if (QED_IS_AH(p_hwfn->cdev)) { + hw_mode |= 1 << MODE_K2; + } else { + DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", + p_hwfn->cdev->type); + return -EINVAL; + } switch (p_hwfn->cdev->num_ports_in_engines) { case 1: @@ -693,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) default: DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", p_hwfn->cdev->num_ports_in_engines); - return; + return -EINVAL; } switch (p_hwfn->cdev->mf_mode) { @@ -719,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), "Configuring function for hw_mode: 0x%08x\n", p_hwfn->hw_info.hw_mode); + + return 0; } /* Init run time data for all PFs on an engine. */ @@ -748,16 +1192,67 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) } } +static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 val, wr_mbs, cache_line_size; + + val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); + switch (val) { + case 0: + wr_mbs = 128; + break; + case 1: + wr_mbs = 256; + break; + case 2: + wr_mbs = 512; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + val); + return; + } + + cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); + switch (cache_line_size) { + case 32: + val = 0; + break; + case 64: + val = 1; + break; + case 128: + val = 2; + break; + case 256: + val = 3; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + cache_line_size); + } + + if (L1_CACHE_BYTES > wr_mbs) + DP_INFO(p_hwfn, + "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", + L1_CACHE_BYTES, wr_mbs); + + STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); +} + static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; - u8 vf_id; qed_init_cau_rt_data(cdev); @@ -784,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_cxt_hw_init_common(p_hwfn); - /* Close gate from NIG to BRB/Storm; By default they are open, but - * we close them to prevent NIG from passing data to reset blocks. - * Should have been done in the ENGINE phase, but init-tool lacks - * proper port-pretend capabilities. - */ - qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); - qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); - qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1); - qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); - qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); - qed_port_unpretend(p_hwfn, p_ptt); + qed_init_cache_line_size(p_hwfn, p_ptt); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) @@ -814,7 +1299,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } - for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { + max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; + for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -832,17 +1318,15 @@ static int qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) { - u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; - u32 dpi_bit_shift, dpi_count; + u32 dpi_bit_shift, dpi_count, dpi_page_size; u32 min_dpis; + u32 n_wids; /* Calculate DPI size */ - dpi_page_size_1 = QED_WID_SIZE * n_cpus; - dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE); - dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2); - dpi_page_size = roundup_pow_of_two(dpi_page_size); + n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); + dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); + dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); dpi_bit_shift = ilog2(dpi_page_size / 4096); - dpi_count = pwm_region_size / dpi_page_size; min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; @@ -870,13 +1354,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 pwm_regsize, norm_regsize; u32 non_pwm_conn, min_addr_reg1; - u32 db_bar_size, n_cpus; + u32 db_bar_size, n_cpus = 1; u32 roce_edpm_mode; u32 pf_dems_shift; int rc = 0; u8 cond; - db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1); + db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); if (p_hwfn->cdev->num_hwfns > 1) db_bar_size /= 2; @@ -931,6 +1415,8 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } + p_hwfn->wid_count = (u16) n_cpus; + DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", norm_regsize, @@ -967,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_tunn_start_params *p_tunn, + struct qed_tunnel_info *p_tunn, int hw_mode, bool b_hw_start, enum qed_int_mode int_mode, @@ -987,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, p_hwfn->qm_info.pf_rl = 100000; } - qed_cxt_hw_init_pf(p_hwfn); + qed_cxt_hw_init_pf(p_hwfn, p_ptt); qed_int_igu_init_rt(p_hwfn); @@ -1095,25 +1581,47 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); } -int qed_hw_init(struct qed_dev *cdev, - struct qed_tunn_start_params *p_tunn, - bool b_hw_start, - enum qed_int_mode int_mode, - bool allow_npar_tx_switch, - const u8 *bin_fw_data) +static void +qed_fill_load_req_params(struct qed_load_req_params *p_load_req, + struct qed_drv_load_params *p_drv_load) { + memset(p_load_req, 0, sizeof(*p_load_req)); + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; + p_load_req->timeout_val = p_drv_load->mfw_timeout_val; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; +} + +static int qed_vf_start(struct qed_hwfn *p_hwfn, + struct qed_hw_init_params *p_params) +{ + if (p_params->p_tunn) { + qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); + qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); + } + + p_hwfn->b_int_enabled = 1; + + return 0; +} + +int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) +{ + struct qed_load_req_params load_req_params; u32 load_code, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; - if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); return -EINVAL; } if (IS_PF(cdev)) { - rc = qed_init_fw_data(cdev, bin_fw_data); + rc = qed_init_fw_data(cdev, p_params->bin_fw_data); if (rc) return rc; } @@ -1128,26 +1636,32 @@ int qed_hw_init(struct qed_dev *cdev, } if (IS_VF(cdev)) { - p_hwfn->b_int_enabled = 1; + qed_vf_start(p_hwfn, p_params); continue; } /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); - qed_calc_hw_mode(p_hwfn); + rc = qed_calc_hw_mode(p_hwfn); + if (rc) + return rc; - rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); + qed_fill_load_req_params(&load_req_params, + p_params->p_drv_load_params); + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, + &load_req_params); if (rc) { - DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); + DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); return rc; } - qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); - + load_code = load_req_params.load_code; DP_VERBOSE(p_hwfn, QED_MSG_SP, - "Load request was sent. Resp:0x%x, Load code: 0x%x\n", - rc, load_code); + "Load request was sent. Load code: 0x%x\n", + load_code); + + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); @@ -1168,11 +1682,15 @@ int qed_hw_init(struct qed_dev *cdev, /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, - p_tunn, p_hwfn->hw_info.hw_mode, - b_hw_start, int_mode, - allow_npar_tx_switch); + p_params->p_tunn, + p_hwfn->hw_info.hw_mode, + p_params->b_hw_start, + p_params->int_mode, + p_params->allow_npar_tx_switch); break; default: + DP_NOTICE(p_hwfn, + "Unexpected load code [0x%08x]", load_code); rc = -EINVAL; break; } @@ -1212,10 +1730,7 @@ int qed_hw_init(struct qed_dev *cdev, if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); - drv_mb_param = (FW_MAJOR_VERSION << 24) | - (FW_MINOR_VERSION << 16) | - (FW_REVISION_VERSION << 8) | - (FW_ENGINEERING_VERSION); + drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, drv_mb_param, &load_code, ¶m); @@ -1290,27 +1805,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev) int qed_hw_stop(struct qed_dev *cdev) { - int rc = 0, t_rc; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc, rc2 = 0; int j; for_each_hwfn(cdev, j) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + p_hwfn = &cdev->hwfns[j]; + p_ptt = p_hwfn->p_main_ptt; DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); if (IS_VF(cdev)) { qed_vf_pf_int_cleanup(p_hwfn); + rc = qed_vf_pf_reset(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "qed_vf_pf_reset failed. rc = %d.\n", + rc); + rc2 = -EINVAL; + } continue; } /* mark the hw as uninitialized... */ p_hwfn->hw_init_done = false; + /* Send unload command to MCP */ + rc = qed_mcp_unload_req(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } + + qed_slowpath_irq_sync(p_hwfn); + + /* After this point no MFW attentions are expected, e.g. prevent + * race between pf stop and dcbx pf update. + */ rc = qed_sp_pf_stop(p_hwfn); - if (rc) + if (rc) { DP_NOTICE(p_hwfn, - "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); + "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", + rc); + rc2 = -EINVAL; + } qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -1333,34 +1874,54 @@ int qed_hw_stop(struct qed_dev *cdev) /* Need to wait 1ms to guarantee SBs are cleared */ usleep_range(1000, 2000); + + /* Disable PF in HW blocks */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); + qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + + qed_mcp_unload_done(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; + /* Disable DMAE in PXP - in CMT, this should only be done for * first hw-function, and only after all transactions have * stopped for all active hw-functions. */ - t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], - cdev->hwfns[0].p_main_ptt, false); - if (t_rc != 0) - rc = t_rc; + rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); + if (rc) { + DP_NOTICE(p_hwfn, + "qed_change_pci_hwfn failed. rc = %d.\n", rc); + rc2 = -EINVAL; + } } - return rc; + return rc2; } -void qed_hw_stop_fastpath(struct qed_dev *cdev) +int qed_hw_stop_fastpath(struct qed_dev *cdev) { int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_ptt *p_ptt; if (IS_VF(cdev)) { qed_vf_pf_int_cleanup(p_hwfn); continue; } + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); @@ -1378,100 +1939,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) /* Need to wait 1ms to guarantee SBs are cleared */ usleep_range(1000, 2000); - } -} - -void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) -{ - if (IS_VF(p_hwfn->cdev)) - return; - - /* Re-open incoming traffic */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); -} - -static int qed_reg_assert(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 reg, bool expected) -{ - u32 assert_val = qed_rd(p_hwfn, p_ptt, reg); - - if (assert_val != expected) { - DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n", - reg, expected); - return -EINVAL; + qed_ptt_release(p_hwfn, p_ptt); } return 0; } -int qed_hw_reset(struct qed_dev *cdev) +int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) { - int rc = 0; - u32 unload_resp, unload_param; - u32 wol_param; - int i; - - switch (cdev->wol_config) { - case QED_OV_WOL_DISABLED: - wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; - break; - case QED_OV_WOL_ENABLED: - wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; - break; - default: - DP_NOTICE(cdev, - "Unknown WoL configuration %02x\n", cdev->wol_config); - /* Fallthrough */ - case QED_OV_WOL_DEFAULT: - wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; - } - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - - if (IS_VF(cdev)) { - rc = qed_vf_pf_reset(p_hwfn); - if (rc) - return rc; - continue; - } - - DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); - - /* Check for incorrect states */ - qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, - QM_REG_USG_CNT_PF_TX, 0); - qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, - QM_REG_USG_CNT_PF_OTHER, 0); - - /* Disable PF in HW blocks */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - TCFC_REG_STRONG_ENABLE_PF, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - CCFC_REG_STRONG_ENABLE_PF, 0); + struct qed_ptt *p_ptt; - /* Send unload command to MCP */ - rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_UNLOAD_REQ, wol_param, - &unload_resp, &unload_param); - if (rc) { - DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); - unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE; - } + if (IS_VF(p_hwfn->cdev)) + return 0; - rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_UNLOAD_DONE, - 0, &unload_resp, &unload_param); - if (rc) { - DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n"); - return rc; - } - } + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; - return rc; + /* Re-open incoming traffic */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); + qed_ptt_release(p_hwfn, p_ptt); + + return 0; } /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ @@ -1485,10 +1974,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) { /* clear indirect access */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); - qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); + if (QED_IS_AH(p_hwfn->cdev)) { + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); + } else { + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); + } /* Clean Previous errors if such exist */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -1522,7 +2026,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; struct qed_sb_cnt_info sb_cnt_info; - int num_features = 1; + u32 non_l2_sbs = 0; if (IS_ENABLED(CONFIG_QED_RDMA) && p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { @@ -1530,204 +2034,260 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) * the status blocks equally between L2 / RoCE but with * consideration as to how many l2 queues / cnqs we have. */ - num_features++; - feat_num[QED_RDMA_CNQ] = - min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, + min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); - } - - feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / - num_features, - RESC_NUM(p_hwfn, QED_L2_QUEUE)); - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - feat_num[QED_VF_L2_QUE] = - min_t(u32, - RESC_NUM(p_hwfn, QED_L2_QUEUE) - - FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt); + non_l2_sbs = feat_num[QED_RDMA_CNQ]; + } + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || + p_hwfn->hw_info.personality == QED_PCI_ETH) { + /* Start by allocating VF queues, then PF's */ + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + feat_num[QED_VF_L2_QUE] = min_t(u32, + RESC_NUM(p_hwfn, QED_L2_QUEUE), + sb_cnt_info.sb_iov_cnt); + feat_num[QED_PF_L2_QUE] = min_t(u32, + RESC_NUM(p_hwfn, QED_SB) - + non_l2_sbs, + RESC_NUM(p_hwfn, + QED_L2_QUEUE) - + FEAT_NUM(p_hwfn, + QED_VF_L2_QUE)); + } + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), - RESC_NUM(p_hwfn, QED_SB), num_features); + (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), + RESC_NUM(p_hwfn, QED_SB)); } -static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id) +const char *qed_hw_get_resc_name(enum qed_resources res_id) { - enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; - switch (res_id) { - case QED_SB: - mfw_res_id = RESOURCE_NUM_SB_E; - break; case QED_L2_QUEUE: - mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; - break; + return "L2_QUEUE"; case QED_VPORT: - mfw_res_id = RESOURCE_NUM_VPORT_E; - break; + return "VPORT"; case QED_RSS_ENG: - mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; - break; + return "RSS_ENG"; case QED_PQ: - mfw_res_id = RESOURCE_NUM_PQ_E; - break; + return "PQ"; case QED_RL: - mfw_res_id = RESOURCE_NUM_RL_E; - break; + return "RL"; case QED_MAC: + return "MAC"; case QED_VLAN: - /* Each VFC resource can accommodate both a MAC and a VLAN */ - mfw_res_id = RESOURCE_VFC_FILTER_E; - break; + return "VLAN"; + case QED_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; case QED_ILT: - mfw_res_id = RESOURCE_ILT_E; - break; + return "ILT"; case QED_LL2_QUEUE: - mfw_res_id = RESOURCE_LL2_QUEUE_E; - break; - case QED_RDMA_CNQ_RAM: + return "LL2_QUEUE"; case QED_CMDQS_CQS: - /* CNQ/CMDQS are the same resource */ - mfw_res_id = RESOURCE_CQS_E; - break; + return "CMDQS_CQS"; case QED_RDMA_STATS_QUEUE: - mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; - break; + return "RDMA_STATS_QUEUE"; + case QED_BDQ: + return "BDQ"; + case QED_SB: + return "SB"; default: - break; + return "UNKNOWN_RESOURCE"; + } +} + +static int +__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp) +{ + int rc; + + rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, + resc_max_val, p_mcp_resp); + if (rc) { + DP_NOTICE(p_hwfn, + "MFW response failure for a max value setting of resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } + + if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) + DP_INFO(p_hwfn, + "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", + res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); + + return 0; +} + +static int +qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + bool b_ah = QED_IS_AH(p_hwfn->cdev); + u32 resc_max_val, mcp_resp; + u8 res_id; + int rc; + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { + switch (res_id) { + case QED_LL2_QUEUE: + resc_max_val = MAX_NUM_LL2_RX_QUEUES; + break; + case QED_RDMA_CNQ_RAM: + /* No need for a case for QED_CMDQS_CQS since + * CNQ/CMDQS are the same resource. + */ + resc_max_val = NUM_OF_CMDQS_CQS; + break; + case QED_RDMA_STATS_QUEUE: + resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 + : RDMA_NUM_STATISTIC_COUNTERS_BB; + break; + case QED_BDQ: + resc_max_val = BDQ_NUM_RESOURCES; + break; + default: + continue; + } + + rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, + resc_max_val, &mcp_resp); + if (rc) + return rc; + + /* There's no point to continue to the next resource if the + * command is not supported by the MFW. + * We do continue if the command is supported but the resource + * is unknown to the MFW. Such a resource will be later + * configured with the default allocation values. + */ + if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) + return -EINVAL; } - return mfw_res_id; + return 0; } -static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, - enum qed_resources res_id) +static +int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, + enum qed_resources res_id, + u32 *p_resc_num, u32 *p_resc_start) { u8 num_funcs = p_hwfn->num_funcs_on_engine; + bool b_ah = QED_IS_AH(p_hwfn->cdev); struct qed_sb_cnt_info sb_cnt_info; - u32 dflt_resc_num = 0; switch (res_id) { - case QED_SB: - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - dflt_resc_num = sb_cnt_info.sb_cnt; - break; case QED_L2_QUEUE: - dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs; + *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : + MAX_NUM_L2_QUEUES_BB) / num_funcs; break; case QED_VPORT: - dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; + *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : + MAX_NUM_VPORTS_BB) / num_funcs; break; case QED_RSS_ENG: - dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs; + *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : + ETH_RSS_ENGINE_NUM_BB) / num_funcs; break; case QED_PQ: - /* The granularity of the PQs is 8 */ - dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs; - dflt_resc_num &= ~0x7; + *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : + MAX_QM_TX_QUEUES_BB) / num_funcs; + *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ break; case QED_RL: - dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; + *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; break; case QED_MAC: case QED_VLAN: /* Each VFC resource can accommodate both a MAC and a VLAN */ - dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; + *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; break; case QED_ILT: - dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs; + *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : + PXP_NUM_ILT_RECORDS_BB) / num_funcs; break; case QED_LL2_QUEUE: - dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; + *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ - dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; + *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; break; case QED_RDMA_STATS_QUEUE: - dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs; + *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : + RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs; break; - default: + case QED_BDQ: + if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && + p_hwfn->hw_info.personality != QED_PCI_FCOE) + *p_resc_num = 0; + else + *p_resc_num = 1; break; + case QED_SB: + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + *p_resc_num = sb_cnt_info.sb_cnt; + break; + default: + return -EINVAL; } - return dflt_resc_num; -} - -static const char *qed_hw_get_resc_name(enum qed_resources res_id) -{ switch (res_id) { - case QED_SB: - return "SB"; - case QED_L2_QUEUE: - return "L2_QUEUE"; - case QED_VPORT: - return "VPORT"; - case QED_RSS_ENG: - return "RSS_ENG"; - case QED_PQ: - return "PQ"; - case QED_RL: - return "RL"; - case QED_MAC: - return "MAC"; - case QED_VLAN: - return "VLAN"; - case QED_RDMA_CNQ_RAM: - return "RDMA_CNQ_RAM"; - case QED_ILT: - return "ILT"; - case QED_LL2_QUEUE: - return "LL2_QUEUE"; - case QED_CMDQS_CQS: - return "CMDQS_CQS"; - case QED_RDMA_STATS_QUEUE: - return "RDMA_STATS_QUEUE"; + case QED_BDQ: + if (!*p_resc_num) + *p_resc_start = 0; + else if (p_hwfn->cdev->num_ports_in_engines == 4) + *p_resc_start = p_hwfn->port_id; + else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + *p_resc_start = p_hwfn->port_id; + else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + *p_resc_start = p_hwfn->port_id + 2; + break; default: - return "UNKNOWN_RESOURCE"; + *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; + break; } + + return 0; } -static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, - enum qed_resources res_id) +static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) { - u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param; - u32 *p_resc_num, *p_resc_start; - struct resource_info resc_info; + u32 dflt_resc_num = 0, dflt_resc_start = 0; + u32 mcp_resp, *p_resc_num, *p_resc_start; int rc; p_resc_num = &RESC_NUM(p_hwfn, res_id); p_resc_start = &RESC_START(p_hwfn, res_id); - /* Default values assumes that each function received equal share */ - dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id); - if (!dflt_resc_num) { + rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, + &dflt_resc_start); + if (rc) { DP_ERR(p_hwfn, "Failed to get default amount for resource %d [%s]\n", res_id, qed_hw_get_resc_name(res_id)); - return -EINVAL; - } - dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx; - - memset(&resc_info, 0, sizeof(resc_info)); - resc_info.res_id = qed_hw_get_mfw_res_id(res_id); - if (resc_info.res_id == RESOURCE_NUM_INVALID) { - DP_ERR(p_hwfn, - "Failed to match resource %d [%s] with the MFW resources\n", - res_id, qed_hw_get_resc_name(res_id)); - return -EINVAL; + return rc; } - rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info, - &mcp_resp, &mcp_param); + rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, + &mcp_resp, p_resc_num, p_resc_start); if (rc) { DP_NOTICE(p_hwfn, "MFW response failure for an allocation request for resource %d [%s]\n", @@ -1740,13 +2300,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, * - There is an internal error in the MFW while processing the request * - The resource ID is unknown to the MFW */ - if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK && - mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) { - DP_NOTICE(p_hwfn, - "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n", - res_id, - qed_hw_get_resc_name(res_id), - mcp_resp, dflt_resc_num, dflt_resc_start); + if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { + DP_INFO(p_hwfn, + "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", + res_id, + qed_hw_get_resc_name(res_id), + mcp_resp, dflt_resc_num, dflt_resc_start); *p_resc_num = dflt_resc_num; *p_resc_start = dflt_resc_start; goto out; @@ -1754,13 +2313,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, /* Special handling for status blocks; Would be revised in future */ if (res_id == QED_SB) { - resc_info.size -= 1; - resc_info.offset -= p_hwfn->enabled_func_idx; + *p_resc_num -= 1; + *p_resc_start -= p_hwfn->enabled_func_idx; } - - *p_resc_num = resc_info.size; - *p_resc_start = resc_info.offset; - out: /* PQs have to divide by 8 [that's the HW granularity]. * Reduce number so it would fit. @@ -1778,19 +2333,80 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, return 0; } -static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) +static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) { - u8 res_id; int rc; + u8 res_id; for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { - rc = qed_hw_set_resc_info(p_hwfn, res_id); + rc = __qed_hw_set_resc_info(p_hwfn, res_id); if (rc) return rc; } + return 0; +} + +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_unlock_params resc_unlock_params; + struct qed_resc_lock_params resc_lock_params; + bool b_ah = QED_IS_AH(p_hwfn->cdev); + u8 res_id; + int rc; + + /* Setting the max values of the soft resources and the following + * resources allocation queries should be atomic. Since several PFs can + * run in parallel - a resource lock is needed. + * If either the resource lock or resource set value commands are not + * supported - skip the the max values setting, release the lock if + * needed, and proceed to the queries. Other failures, including a + * failure to acquire the lock, will cause this function to fail. + */ + qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, + QED_RESC_LOCK_RESC_ALLOC, false); + + rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); + if (rc && rc != -EINVAL) { + return rc; + } else if (rc == -EINVAL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); + } else if (!rc && !resc_lock_params.b_granted) { + DP_NOTICE(p_hwfn, + "Failed to acquire the resource lock for the resource allocation commands\n"); + return -EBUSY; + } else { + rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); + if (rc && rc != -EINVAL) { + DP_NOTICE(p_hwfn, + "Failed to set the max values of the soft resources\n"); + goto unlock_and_exit; + } else if (rc == -EINVAL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, + &resc_unlock_params); + if (rc) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + } + + rc = qed_hw_set_resc_info(p_hwfn); + if (rc) + goto unlock_and_exit; + + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); + if (rc) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + /* Sanity for ILT */ - if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) { + if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || + (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", RESC_START(p_hwfn, QED_ILT), RESC_END(p_hwfn, QED_ILT) - 1); @@ -1799,8 +2415,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) qed_hw_set_feat(p_hwfn); - DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "The numbers for each resource are:\n"); for (res_id = 0; res_id < QED_MAX_RESC; res_id++) DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", qed_hw_get_resc_name(res_id), @@ -1808,6 +2422,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) RESC_START(p_hwfn, res_id)); return 0; + +unlock_and_exit: + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) + qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); + return rc; } static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1860,9 +2479,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; + break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: + p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; + break; default: DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); break; @@ -1976,8 +2601,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; + struct qed_dev *cdev = p_hwfn->cdev; - num_funcs = MAX_NUM_PFS_BB; + num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values * in the other bits are selected. @@ -1990,12 +2616,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); if (reg_function_hide & 0x1) { - if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { - num_funcs = 0; - eng_mask = 0xaaaa; + if (QED_IS_BB(cdev)) { + if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } } else { num_funcs = 1; - eng_mask = 0x5554; + eng_mask = 0xfffe; } /* Get the number of the enabled functions on the engine */ @@ -2027,24 +2658,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } -static int -qed_get_hw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_pci_personality personality) +static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { u32 port_mode; - int rc; - - /* Since all information is common, only first hwfns should do this */ - if (IS_LEAD_HWFN(p_hwfn)) { - rc = qed_iov_hw_info(p_hwfn); - if (rc) - return rc; - } - /* Read the port mode */ - port_mode = qed_rd(p_hwfn, p_ptt, - CNIG_REG_NW_PORT_MODE_BB_B0); + port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); if (port_mode < 3) { p_hwfn->cdev->num_ports_in_engines = 1; @@ -2057,6 +2676,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, /* Default num_ports_in_engines to something */ p_hwfn->cdev->num_ports_in_engines = 1; } +} + +static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 port; + int i; + + p_hwfn->cdev->num_ports_in_engines = 0; + + for (i = 0; i < MAX_NUM_PORTS_K2; i++) { + port = qed_rd(p_hwfn, p_ptt, + CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); + if (port & 1) + p_hwfn->cdev->num_ports_in_engines++; + } + + if (!p_hwfn->cdev->num_ports_in_engines) { + DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); + + /* Default num_ports_in_engine to something */ + p_hwfn->cdev->num_ports_in_engines = 1; + } +} + +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + if (QED_IS_BB(p_hwfn->cdev)) + qed_hw_info_port_num_bb(p_hwfn, p_ptt); + else + qed_hw_info_port_num_ah(p_hwfn, p_ptt); +} + +static int +qed_get_hw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality personality) +{ + int rc; + + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_iov_hw_info(p_hwfn); + if (rc) + return rc; + } + + qed_hw_info_port_num(p_hwfn, p_ptt); qed_hw_get_nvm_info(p_hwfn, p_ptt); @@ -2085,33 +2752,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = protocol; } + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + p_hwfn->hw_info.num_active_tc = 1; + qed_get_num_funcs(p_hwfn, p_ptt); if (qed_mcp_is_init(p_hwfn)) p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; - return qed_hw_get_resc(p_hwfn); + return qed_hw_get_resc(p_hwfn, p_ptt); } -static int qed_get_dev_info(struct qed_dev *cdev) +static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_dev *cdev = p_hwfn->cdev; + u16 device_id_mask; u32 tmp; /* Read Vendor Id / Device Id */ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); - cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_NUM); - cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_REV); + /* Determine type */ + device_id_mask = cdev->device_id & QED_DEV_ID_MASK; + switch (device_id_mask) { + case QED_DEV_ID_MASK_BB: + cdev->type = QED_DEV_TYPE_BB; + break; + case QED_DEV_ID_MASK_AH: + cdev->type = QED_DEV_TYPE_AH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); + return -EBUSY; + } + + cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); + cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); + MASK_FIELD(CHIP_REV, cdev->chip_rev); - cdev->type = QED_DEV_TYPE_BB; /* Learn number of HW-functions */ - tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CMT_ENABLED_FOR_PAIR); + tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); if (tmp & (1 << p_hwfn->rel_pf_id)) { DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); @@ -2120,15 +2802,17 @@ static int qed_get_dev_info(struct qed_dev *cdev) cdev->num_hwfns = 1; } - cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt, + cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG) >> 4; MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); - cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_METAL); + cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, cdev->chip_metal); DP_INFO(cdev->hwfns, - "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + QED_IS_BB(cdev) ? "BB" : "AH", + 'A' + cdev->chip_rev, + (int)cdev->chip_metal, cdev->chip_num, cdev->chip_rev, cdev->chip_bond_id, cdev->chip_metal); @@ -2174,7 +2858,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { - rc = qed_get_dev_info(p_hwfn->cdev); + rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); if (rc) goto err1; } @@ -2195,6 +2879,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, goto err2; } + /* Sending a mailbox to the MFW should be done after qed_get_hw_info() + * is called as it sets the ports number in an engine. + */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); + } + /* Allocate the init RT array and initialize the init-ops engine */ rc = qed_init_alloc(p_hwfn); if (rc) @@ -2236,11 +2929,14 @@ int qed_hw_prepare(struct qed_dev *cdev, u8 __iomem *addr; /* adjust bar offset for second engine */ - addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2; + addr = cdev->regview + + qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_0) / 2; p_regview = addr; - /* adjust doorbell bar offset for second engine */ - addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2; + addr = cdev->doorbells + + qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_1) / 2; p_doorbell = addr; /* prepare second hw function */ @@ -3363,3 +4059,22 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) memset(p_hwfn->qm_info.wfq_data, 0, sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); } + +int qed_device_num_engines(struct qed_dev *cdev) +{ + return QED_IS_BB(cdev) ? 2 : 1; +} + +static int qed_device_num_ports(struct qed_dev *cdev) +{ + /* in CMT always only one port */ + if (cdev->num_hwfns > 1) + return 1; + + return cdev->num_ports_in_engines * qed_device_num_engines(cdev); +} + +int qed_device_get_port_id(struct qed_dev *cdev) +{ + return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 6812003411cdc9870a6508de198a962e4bd1e68e..cefe3ee9064a2670c4579f7ec1af3c17861200fe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev); */ void qed_resc_setup(struct qed_dev *cdev); +enum qed_override_force_load { + QED_OVERRIDE_FORCE_LOAD_NONE, + QED_OVERRIDE_FORCE_LOAD_ALWAYS, + QED_OVERRIDE_FORCE_LOAD_NEVER, +}; + +struct qed_drv_load_params { + /* Indicates whether the driver is running over a crash kernel. + * As part of the load request, this will be used for providing the + * driver role to the MFW. + * In case of a crash kernel over PDA - this should be set to false. + */ + bool is_crash_kernel; + + /* The timeout value that the MFW should use when locking the engine for + * the driver load process. + * A value of '0' means the default value, and '255' means no timeout. + */ + u8 mfw_timeout_val; +#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0 +#define QED_LOAD_REQ_LOCK_TO_NONE 255 + + /* Avoid engine reset when first PF loads on it */ + bool avoid_eng_reset; + + /* Allow overriding the default force load behavior */ + enum qed_override_force_load override_force_load; +}; + +struct qed_hw_init_params { + /* Tunneling parameters */ + struct qed_tunnel_info *p_tunn; + + bool b_hw_start; + + /* Interrupt mode [msix, inta, etc.] to use */ + enum qed_int_mode int_mode; + + /* NPAR tx switching to be used for vports for tx-switching */ + bool allow_npar_tx_switch; + + /* Binary fw data pointer in binary fw file */ + const u8 *bin_fw_data; + + /* Driver load parameters */ + struct qed_drv_load_params *p_drv_load_params; +}; + /** * @brief qed_hw_init - * * @param cdev - * @param p_tunn - * @param b_hw_start - * @param int_mode - interrupt mode [msix, inta, etc.] to use. - * @param allow_npar_tx_switch - npar tx switching to be used - * for vports configured for tx-switching. - * @param bin_fw_data - binary fw data pointer in binary fw file. - * Pass NULL if not using binary fw file. + * @param p_params * * @return int */ -int qed_hw_init(struct qed_dev *cdev, - struct qed_tunn_start_params *p_tunn, - bool b_hw_start, - enum qed_int_mode int_mode, - bool allow_npar_tx_switch, - const u8 *bin_fw_data); +int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** * @brief qed_hw_timers_stop_all - stop the timers HW block @@ -128,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev); * * @param cdev * + * @return int */ -void qed_hw_stop_fastpath(struct qed_dev *cdev); +int qed_hw_stop_fastpath(struct qed_dev *cdev); /** * @brief qed_hw_start_fastpath -restart fastpath traffic, * only if hw_stop_fastpath was called * - * @param cdev - * - */ -void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); - -/** - * @brief qed_hw_reset - - * - * @param cdev + * @param p_hwfn * * @return int */ -int qed_hw_reset(struct qed_dev *cdev); +int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); + /** * @brief qed_hw_prepare - @@ -441,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, */ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 coalesce, u8 qid, u16 sb_id); + +const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cbc81412174f9e1ea07a763a90072348e883699f..21a58fffd02bed96ba07580446c72e2280da47eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; - p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); + p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); @@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, struct fcoe_conn_offload_ramrod_data *p_data; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - u16 pq_id = 0, tmp; + u16 physical_q0, tmp; int rc; /* Get SPQ entry */ @@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, p_data = &p_ramrod->offload_ramrod_data; /* Transmission PQ is the first of the PF */ - pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL); - p_conn->physical_q0 = cpu_to_le16(pq_id); - p_data->physical_q0 = cpu_to_le16(pq_id); + physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_conn->physical_q0 = cpu_to_le16(physical_q0); + p_data->physical_q0 = cpu_to_le16(physical_q0); p_data->conn_id = cpu_to_le16(p_conn->conn_id); DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); @@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, static int qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u32 active_segs = 0; @@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { - u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); - - return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, + QED_BDQ), + bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } } static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { - u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); - - return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, + QED_BDQ), + bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } } struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) @@ -753,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, static int qed_fcoe_stop(struct qed_dev *cdev) { + struct qed_ptt *p_ptt; int rc; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { @@ -766,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev) return -EINVAL; } + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (!p_ptt) + return -EAGAIN; + /* Stop the fcoe */ - rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), + rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt, QED_SPQ_MODE_EBLOCK, NULL); cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 37c2bfb663bb481c3d7241e8ccfc614d389d7535..858a57a735894d9f7788f30cf8fab386ca965808 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -574,6 +574,7 @@ enum core_event_opcode { CORE_EVENT_TX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_START, CORE_EVENT_RX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_FLUSH, MAX_CORE_EVENT_OPCODE }; @@ -625,6 +626,7 @@ enum core_ramrod_cmd_id { CORE_RAMROD_TX_QUEUE_START, CORE_RAMROD_RX_QUEUE_STOP, CORE_RAMROD_TX_QUEUE_STOP, + CORE_RAMROD_RX_QUEUE_FLUSH, MAX_CORE_RAMROD_CMD_ID }; @@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe { u8 type; u8 ramrod_cmd_id; __le16 echo; - __le32 reserved1[7]; + struct core_rx_cqe_opaque_data opaque_data; + __le32 reserved1[5]; }; union core_rx_cqe_union { @@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data { __le16 reserved2[2]; }; -struct core_tx_bd_flags { - u8 as_bitfield; -#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 -#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 -#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 -#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 -#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 -#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 -#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 -#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 -#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 -#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 -#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 -#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 -#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 +struct core_tx_bd_data { + __le16 as_bitfield; +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_DATA_START_BD_MASK 0x1 +#define CORE_TX_BD_DATA_START_BD_SHIFT 2 +#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 +#define CORE_TX_BD_DATA_NBDS_MASK 0xF +#define CORE_TX_BD_DATA_NBDS_SHIFT 8 +#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 +#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 +#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 +#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3 +#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14 }; struct core_tx_bd { struct regpair addr; __le16 nbytes; __le16 nw_vlan_or_lb_echo; - u8 bitfield0; -#define CORE_TX_BD_NBDS_MASK 0xF -#define CORE_TX_BD_NBDS_SHIFT 0 -#define CORE_TX_BD_ROCE_FLAV_MASK 0x1 -#define CORE_TX_BD_ROCE_FLAV_SHIFT 4 -#define CORE_TX_BD_RESERVED0_MASK 0x7 -#define CORE_TX_BD_RESERVED0_SHIFT 5 - struct core_tx_bd_flags bd_flags; + struct core_tx_bd_data bd_data; __le16 bitfield1; #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 #define CORE_TX_BD_TX_DST_MASK 0x1 #define CORE_TX_BD_TX_DST_SHIFT 14 -#define CORE_TX_BD_RESERVED1_MASK 0x1 -#define CORE_TX_BD_RESERVED1_SHIFT 15 +#define CORE_TX_BD_RESERVED_MASK 0x1 +#define CORE_TX_BD_RESERVED_SHIFT 15 }; enum core_tx_dest { @@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data { __le32 reserved0[2]; }; +enum dcb_dhcp_update_flag { + DONT_UPDATE_DCB_DHCP, + UPDATE_DCB, + UPDATE_DSCP, + UPDATE_DCB_DSCP, + MAX_DCB_DHCP_UPDATE_FLAG +}; + struct eth_mstorm_per_pf_stat { struct regpair gre_discard_pkts; struct regpair vxlan_discard_pkts; @@ -893,6 +905,12 @@ union event_ring_element { struct event_ring_next_addr next_addr; }; +enum fw_flow_ctrl_mode { + flow_ctrl_pause, + flow_ctrl_pfc, + MAX_FW_FLOW_CTRL_MODE +}; + /* Major and Minor hsi Versions */ struct hsi_fp_ver_struct { u8 minor_ver_arr[2]; @@ -921,6 +939,7 @@ enum malicious_vf_error_id { ETH_EDPM_OUT_OF_SYNC, ETH_TUNN_IPV6_EXT_NBD_ERR, ETH_CONTROL_PACKET_VIOLATION, + ETH_ANTI_SPOOFING_ERR, MAX_MALICIOUS_VF_ERROR_ID }; @@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat { struct regpair ll2_mac_filter_discard; struct regpair ll2_conn_disabled_discard; struct regpair iscsi_irregular_pkt; - struct regpair reserved; + struct regpair fcoe_irregular_pkt; struct regpair roce_irregular_pkt; + struct regpair reserved; struct regpair eth_irregular_pkt; struct regpair reserved1; struct regpair preroce_irregular_pkt; @@ -1648,6 +1668,11 @@ enum block_addr { GRCBASE_MS = 0x6a0000, GRCBASE_PHY_PCIE = 0x620000, GRCBASE_LED = 0x6b8000, + GRCBASE_AVS_WRAP = 0x6b0000, + GRCBASE_RGFS = 0x19d0000, + GRCBASE_TGFS = 0x19e0000, + GRCBASE_PTLD = 0x19f0000, + GRCBASE_YPLD = 0x1a10000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -1732,6 +1757,11 @@ enum block_id { BLOCK_MS, BLOCK_PHY_PCIE, BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_RGFS, + BLOCK_TGFS, + BLOCK_PTLD, + BLOCK_YPLD, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID @@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result { __le32 data; #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF -#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24 - __le16 attn_idx_offset; +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 + __le16 block_attn_offset; __le16 reserved; __le32 sts_val; __le32 mask_val; @@ -1815,12 +1845,12 @@ struct dbg_mode_hdr { /* Attention register */ struct dbg_attn_reg { struct dbg_mode_hdr mode; - __le16 attn_idx_offset; + __le16 block_attn_offset; __le32 data; #define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF -#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24 +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 __le32 sts_clr_address; __le32 mask_address; }; @@ -2001,6 +2031,20 @@ enum dbg_bus_clients { MAX_DBG_BUS_CLIENTS }; +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ, + DBG_BUS_CONSTRAINT_OP_NE, + DBG_BUS_CONSTRAINT_OP_LT, + DBG_BUS_CONSTRAINT_OP_LTC, + DBG_BUS_CONSTRAINT_OP_LE, + DBG_BUS_CONSTRAINT_OP_LEC, + DBG_BUS_CONSTRAINT_OP_GT, + DBG_BUS_CONSTRAINT_OP_GTC, + DBG_BUS_CONSTRAINT_OP_GE, + DBG_BUS_CONSTRAINT_OP_GEC, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + /* Debug Bus memory address */ struct dbg_bus_mem_addr { __le32 lo; @@ -2092,10 +2136,18 @@ struct dbg_bus_data { * DBG_BUS_TARGET_ID_PCI. */ __le16 reserved; - struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */ + struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */ struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */ }; +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF, + DBG_BUS_FILTER_TYPE_PRE, + DBG_BUS_FILTER_TYPE_POST, + DBG_BUS_FILTER_TYPE_ON, + MAX_DBG_BUS_FILTER_TYPES +}; + /* Debug bus frame modes */ enum dbg_bus_frame_modes { DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */ @@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes { MAX_DBG_BUS_FRAME_MODES }; +enum dbg_bus_input_types { + DBG_BUS_INPUT_TYPE_STORM, + DBG_BUS_INPUT_TYPE_BLOCK, + MAX_DBG_BUS_INPUT_TYPES +}; + +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX, + MAX_DBG_BUS_OTHER_ENGINE_MODES +}; + +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD, + DBG_BUS_POST_TRIGGER_DROP, + MAX_DBG_BUS_POST_TRIGGER_TYPES +}; + +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_START_FROM_ZERO, + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, + DBG_BUS_PRE_TRIGGER_DROP, + MAX_DBG_BUS_PRE_TRIGGER_TYPES +}; + +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, + MAX_DBG_BUS_SEMI_FRAME_MODES +}; + /* Debug bus states */ enum dbg_bus_states { DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */ @@ -2115,6 +2201,19 @@ enum dbg_bus_states { MAX_DBG_BUS_STATES }; +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF, + DBG_BUS_STORM_MODE_PRAM_ADDR, + DBG_BUS_STORM_MODE_DRA_RW, + DBG_BUS_STORM_MODE_DRA_W, + DBG_BUS_STORM_MODE_LD_ST_ADDR, + DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_FOC, + DBG_BUS_STORM_MODE_EXT_STORE, + MAX_DBG_BUS_STORM_MODES +}; + /* Debug bus target IDs */ enum dbg_bus_targets { /* records debug bus to DBG block internal buffer */ @@ -2128,13 +2227,10 @@ enum dbg_bus_targets { /* GRC Dump data */ struct dbg_grc_data { - __le32 param_val[40]; /* Value of each GRC parameter. Array size must - * match the enum dbg_grc_params. - */ - u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was - * set by the user (0/1). Array size must - * match the enum dbg_grc_params. - */ + u8 params_initialized; + u8 reserved1; + __le16 reserved2; + __le32 param_val[48]; }; /* Debug GRC params */ @@ -2181,6 +2277,8 @@ enum dbg_grc_params { DBG_GRC_PARAM_PARITY_SAFE, DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */ DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */ + DBG_GRC_PARAM_NO_MCP, + DBG_GRC_PARAM_NO_FW_VER, MAX_DBG_GRC_PARAMS }; @@ -2280,7 +2378,7 @@ struct dbg_tools_data { struct dbg_bus_data bus; /* Debug Bus data */ struct idle_chk_data idle_chk; /* Idle Check data */ u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */ - u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1). + u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1). */ u8 chip_id; /* Chip ID (from enum chip_ids) */ u8 platform_id; /* Platform ID (from enum platform_ids) */ @@ -2404,7 +2502,7 @@ struct fw_info_location { enum init_modes { MODE_RESERVED, - MODE_BB_B0, + MODE_BB, MODE_K2, MODE_ASIC, MODE_RESERVED2, @@ -2418,7 +2516,6 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_40G, MODE_RESERVED6, MAX_INIT_MODES }; @@ -2685,6 +2782,13 @@ struct iro { * @param bin_ptr - a pointer to the binary data with debug arrays. */ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); +/** + * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their + * default value. + * + * @param p_hwfn - HW device data + */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); /** * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for * GRC Dump. @@ -3369,6 +3473,11 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable); +void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 pf_id); +void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 pf_id, bool tcp, bool udp, + bool ipv4, bool ipv6); #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -3418,7 +3527,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) #define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ (IRO[22].base + ((pf_id) * IRO[22].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) #define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) #define USTORM_QUEUE_STAT_SIZE (IRO[23].size) @@ -3482,7 +3591,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, - {0x4cb0, 0x78, 0x0, 0x0, 0x78}, + {0x4cb0, 0x80, 0x0, 0x0, 0x80}, {0x6318, 0x20, 0x0, 0x0, 0x20}, {0xb00, 0x8, 0x0, 0x0, 0x4}, {0xa80, 0x8, 0x0, 0x0, 0x4}, @@ -3521,13 +3630,13 @@ static const struct iro iro_arr[47] = { {0xd888, 0x38, 0x0, 0x0, 0x24}, {0x12c38, 0x10, 0x0, 0x0, 0x8}, {0x11aa0, 0x38, 0x0, 0x0, 0x18}, - {0xa8c0, 0x30, 0x0, 0x0, 0x10}, - {0x86f8, 0x28, 0x0, 0x0, 0x18}, + {0xa8c0, 0x38, 0x0, 0x0, 0x10}, + {0x86f8, 0x30, 0x0, 0x0, 0x18}, {0x101f8, 0x10, 0x0, 0x0, 0x10}, {0xdd08, 0x48, 0x0, 0x0, 0x38}, {0x10660, 0x20, 0x0, 0x0, 0x20}, {0x2b80, 0x80, 0x0, 0x0, 0x10}, - {0x5000, 0x10, 0x0, 0x0, 0x10}, + {0x5020, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ @@ -4595,6 +4704,12 @@ enum eth_ipv4_frag_type { MAX_ETH_IPV4_FRAG_TYPE }; +enum eth_ip_type { + ETH_IPV4, + ETH_IPV6, + MAX_ETH_IP_TYPE +}; + enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, ETH_RAMROD_VPORT_START, @@ -4752,6 +4867,18 @@ struct eth_vport_tx_mode { __le16 reserved2[3]; }; +enum gft_filter_update_action { + GFT_ADD_FILTER, + GFT_DELETE_FILTER, + MAX_GFT_FILTER_UPDATE_ACTION +}; + +enum gft_logic_filter_type { + GFT_FILTER_TYPE, + RFS_FILTER_TYPE, + MAX_GFT_LOGIC_FILTER_TYPE +}; + /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -4822,6 +4949,16 @@ struct rx_udp_filter_data { __le32 tenant_id; }; +struct rx_update_gft_filter_data { + struct regpair pkt_hdr_addr; + __le16 pkt_hdr_length; + __le16 rx_qid_or_action_icid; + u8 vport_id; + u8 filter_type; + u8 filter_action; + u8 reserved; +}; + /* Ramrod data for rx queue start ramrod */ struct tx_queue_start_ramrod_data { __le16 sb_id; @@ -4944,7 +5081,10 @@ struct vport_update_ramrod_data_cmn { u8 update_mtu_flg; __le16 mtu; - u8 reserved[2]; + u8 update_ctl_frame_checks_en_flg; + u8 ctl_frame_mac_check_en; + u8 ctl_frame_ethtype_check_en; + u8 reserved[15]; }; struct vport_update_ramrod_mcast { @@ -4962,6 +5102,652 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; +struct gft_cam_line { + __le32 camline; +#define GFT_CAM_LINE_VALID_MASK 0x1 +#define GFT_CAM_LINE_VALID_SHIFT 0 +#define GFT_CAM_LINE_DATA_MASK 0x3FFF +#define GFT_CAM_LINE_DATA_SHIFT 1 +#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF +#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 +#define GFT_CAM_LINE_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_RESERVED1_SHIFT 29 +}; + +struct gft_cam_line_mapped { + __le32 camline; +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + +union gft_cam_line_union { + struct gft_cam_line cam_line; + struct gft_cam_line_mapped cam_line_mapped; +}; + +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + +struct gft_ram_line { + __le32 low32bits; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 high32bits; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + +struct mstorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct xstorm_eth_conn_agctxdq_ext_ldpart { + u8 reserved0; + u8 eth_state; + u8 flags0; +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 + u8 flags4; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 quota; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 tx_class; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; +}; + +struct xstorm_eth_hw_conn_ag_ctx { + u8 reserved0; + u8 eth_state; + u8 flags0; +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 quota; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 tx_class; + __le16 conn_dpi; +}; + struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; @@ -6165,7 +6951,7 @@ struct ystorm_roce_conn_st_ctx { }; struct xstorm_roce_conn_st_ctx { - struct regpair temp[22]; + struct regpair temp[24]; }; struct tstorm_roce_conn_st_ctx { @@ -6220,7 +7006,7 @@ struct roce_create_qp_req_ramrod_data { __le16 mtu; __le16 pd; __le16 sq_num_pages; - __le16 reseved2; + __le16 low_latency_phy_queue; struct regpair sq_pbl_addr; struct regpair orq_pbl_addr; __le16 local_mac_addr[3]; @@ -6234,7 +7020,7 @@ struct roce_create_qp_req_ramrod_data { u8 stats_counter_id; u8 reserved3[7]; __le32 cq_cid; - __le16 physical_queue0; + __le16 regular_latency_phy_queue; __le16 dpi; }; @@ -6282,15 +7068,16 @@ struct roce_create_qp_resp_ramrod_data { __le32 dst_gid[4]; struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; - __le32 reserved2[2]; + __le16 low_latency_phy_queue; + u8 reserved2[6]; __le32 cq_cid; - __le16 physical_queue0; + __le16 regular_latency_phy_queue; __le16 dpi; }; struct roce_destroy_qp_req_output_params { __le32 num_bound_mw; - __le32 reserved; + __le32 cq_prod; }; struct roce_destroy_qp_req_ramrod_data { @@ -6299,7 +7086,7 @@ struct roce_destroy_qp_req_ramrod_data { struct roce_destroy_qp_resp_output_params { __le32 num_invalidated_mw; - __le32 reserved; + __le32 cq_prod; }; struct roce_destroy_qp_resp_ramrod_data { @@ -7426,6 +8213,7 @@ struct ystorm_fcoe_conn_st_ctx { u8 fcp_rsp_size; __le16 mss; struct regpair reserved; + __le16 min_frame_size; u8 protection_info_flags; #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 @@ -7444,7 +8232,6 @@ struct ystorm_fcoe_conn_st_ctx { #define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F #define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 u8 fcp_xfer_size; - u8 reserved3[2]; }; struct fcoe_vlan_fields { @@ -8273,10 +9060,10 @@ struct xstorm_iscsi_conn_ag_ctx { #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 #define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 @@ -8322,10 +9109,10 @@ struct xstorm_iscsi_conn_ag_ctx { #define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 #define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 @@ -8335,8 +9122,8 @@ struct xstorm_iscsi_conn_ag_ctx { #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 #define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 @@ -8440,7 +9227,7 @@ struct xstorm_iscsi_conn_ag_ctx { __le32 reg10; __le32 reg11; __le32 exp_stat_sn; - __le32 reg13; + __le32 ongoing_fast_rxmit_seq; __le32 reg14; __le32 reg15; __le32 reg16; @@ -8466,10 +9253,10 @@ struct tstorm_iscsi_conn_ag_ctx { #define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 #define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 @@ -8490,10 +9277,10 @@ struct tstorm_iscsi_conn_ag_ctx { #define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; @@ -8539,7 +9326,7 @@ struct tstorm_iscsi_conn_ag_ctx { __le32 reg6; __le32 reg7; __le32 reg8; - u8 byte2; + u8 cid_offload_cnt; u8 byte3; __le16 word0; }; @@ -8831,11 +9618,24 @@ struct eth_stats { u64 r511; u64 r1023; u64 r1518; - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; + + union { + struct { + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + } bb0; + struct { + u64 unused1; + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + u64 rfcs; u64 rxcf; u64 rxpf; @@ -8852,14 +9652,36 @@ struct eth_stats { u64 t511; u64 t1023; u64 t1518; - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; + + union { + struct { + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + } bb1; + struct { + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + u64 txpf; u64 txpp; - u64 tlpiec; - u64 tncl; + + union { + struct { + u64 tlpiec; + u64 tncl; + } bb2; + struct { + u64 unused9; + u64 unused10; + } ah2; + } u2; + u64 rbyte; u64 rxuca; u64 rxmca; @@ -8943,12 +9765,12 @@ struct dcbx_ets_feature { #define DCBX_ETS_CBS_SHIFT 3 #define DCBX_ETS_MAX_TCS_MASK 0x000000f0 #define DCBX_ETS_MAX_TCS_SHIFT 4 -#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00 -#define DCBX_ISCSI_OOO_TC_SHIFT 8 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_SHIFT 8 u32 pri_tc_tbl[1]; -#define DCBX_ISCSI_OOO_TC (4) +#define DCBX_TCP_OOO_TC (4) -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) #define DCBX_CEE_STRICT_PRIORITY 0xf u32 tc_bw_tbl[2]; u32 tc_tsa_tbl[2]; @@ -8957,6 +9779,9 @@ struct dcbx_ets_feature { #define DCBX_ETS_TSA_ETS 2 }; +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + struct dcbx_app_priority_entry { u32 entry; #define DCBX_APP_PRI_MAP_MASK 0x000000ff @@ -9067,6 +9892,10 @@ struct dcb_dscp_map { struct public_global { u32 max_path; u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 u32 debug_mb_offset; u32 phymod_dbg_mb_offset; struct couple_mode_teaming cmt; @@ -9248,9 +10077,11 @@ struct public_func { #define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff #define DRV_ID_PDA_COMP_VER_SHIFT 0 +#define LOAD_REQ_HSI_VERSION 2 #define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 #define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) #define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_SHIFT 24 @@ -9345,6 +10176,7 @@ enum resource_id_enum { RESOURCE_NUM_RSS_ENGINES_E = 14, RESOURCE_LL2_QUEUE_E = 15, RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, RESOURCE_MAX_NUM, RESOURCE_NUM_INVALID = 0xFFFFFFFF }; @@ -9362,6 +10194,46 @@ struct resource_info { #define RESOURCE_ELEMENT_STRICT (1 << 0) }; +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + union drv_union_data { u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; struct mcp_mac wol_mac; @@ -9393,6 +10265,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000 #define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000 @@ -9405,12 +10278,14 @@ struct public_drv_mb { #define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 +#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 +#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 -#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 +#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 @@ -9436,6 +10311,33 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 +#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_SHIFT 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 +#define RESOURCE_OPCODE_REQ 1 +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_SHIFT 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 +#define RESOURCE_OPCODE_GNT 1 +#define RESOURCE_OPCODE_BUSY 2 +#define RESOURCE_OPCODE_RELEASED 3 +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +#define RESOURCE_DUMP 0 + #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000 @@ -9524,12 +10426,16 @@ struct public_drv_mb { u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_UNSUPPORTED 0x00000000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 @@ -9549,6 +10455,10 @@ struct public_drv_mb { #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 fw_mb_param; +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 /* get pf rdma protocol command responce */ #define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 @@ -9659,6 +10569,8 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF + u32 e_lane_cfg1; u32 e_lane_cfg2; u32 f_lane_cfg1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 899cad7f97ea41a5b8dc5e1aaccd68b068593cae..a05feb38c6eebc778f1471a950e4c59d1ddba092 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -58,6 +58,7 @@ struct qed_ptt { struct list_head list_entry; unsigned int idx; struct pxp_ptt_entry pxp; + u8 hwfn_id; }; struct qed_ptt_pool { @@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) p_pool->ptts[i].idx = i; p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; p_pool->ptts[i].pxp.pretend.control = 0; + p_pool->ptts[i].hwfn_id = p_hwfn->my_id; if (i >= RESERVED_PTT_MAX) list_add(&p_pool->ptts[i].list_entry, &p_pool->free_list); @@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, offset = hw_addr - win_hw_addr; + if (p_ptt->hwfn_id != p_hwfn->my_id) + DP_NOTICE(p_hwfn, + "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n", + p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id); + /* Verify the address is within the window */ if (hw_addr < win_hw_addr || offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { @@ -800,55 +807,3 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, return rc; } -u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, union qed_qm_pq_params *p_params) -{ - u16 pq_id = 0; - - if ((proto == PROTOCOLID_CORE || - proto == PROTOCOLID_ETH || - proto == PROTOCOLID_ISCSI || - proto == PROTOCOLID_ROCE) && !p_params) { - DP_NOTICE(p_hwfn, - "Protocol %d received NULL PQ params\n", proto); - return 0; - } - - switch (proto) { - case PROTOCOLID_CORE: - if (p_params->core.tc == LB_TC) - pq_id = p_hwfn->qm_info.pure_lb_pq; - else if (p_params->core.tc == OOO_LB_TC) - pq_id = p_hwfn->qm_info.ooo_pq; - else - pq_id = p_hwfn->qm_info.offload_pq; - break; - case PROTOCOLID_ETH: - pq_id = p_params->eth.tc; - if (p_params->eth.is_vf) - pq_id += p_hwfn->qm_info.vf_queues_offset + - p_params->eth.vf_id; - break; - case PROTOCOLID_ISCSI: - if (p_params->iscsi.q_idx == 1) - pq_id = p_hwfn->qm_info.pure_ack_pq; - break; - case PROTOCOLID_ROCE: - if (p_params->roce.dcqcn) - pq_id = p_params->roce.qpid; - else - pq_id = p_hwfn->qm_info.offload_pq; - if (pq_id > p_hwfn->qm_info.num_pf_rls) - pq_id = p_hwfn->qm_info.offload_pq; - break; - case PROTOCOLID_FCOE: - pq_id = p_hwfn->qm_info.offload_pq; - break; - default: - pq_id = 0; - } - - pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); - - return pq_id; -} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 9277264d2e6552a92a9ca88853501b80763a5dbb..f2505c691c264198e73ce946cfa5933b63292e86 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -297,9 +297,6 @@ union qed_qm_pq_params { } roce; }; -u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, union qed_qm_pq_params *params); - int qed_init_fw_data(struct qed_dev *cdev, const u8 *fw_data); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index d891a68526950609f9efbe75ecacfb40ce49b97a..67200c5498ab01d3911735f25e6ccea05aed200e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, { u32 qm_line_crd; - /* In A0 - Limit the size of pbf queue so that only 511 commands with - * the minimum size of 4 (FCoE minimum size) - */ - bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); - - if (is_bb_a0) - cmdq_lines = min_t(u32, cmdq_lines, 1022); qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), (u32)cmdq_lines); @@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init( u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; - bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); u16 i, pq_id, pq_group; /* a bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; - u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; - u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width; + u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); u32 mem_addr_4kb = base_mem_addr_4kb; @@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init( bool is_vf_pq = (i >= p_params->num_pf_pqs); struct qm_rf_pq_map tx_pq_map; + bool rl_valid = p_params->pq_params[i].rl_valid && + (p_params->pq_params[i].vport_id < + MAX_QM_GLOBAL_RLS); + /* update first Tx PQ of VPORT/TC */ u8 vport_id_in_pf = p_params->pq_params[i].vport_id - p_params->start_vport; @@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init( (p_params->pf_id << QM_WFQ_VP_PQ_PF_SHIFT)); } + + if (p_params->pq_params[i].rl_valid && !rl_valid) + DP_NOTICE(p_hwfn, + "Invalid VPORT ID for rate limiter configuration"); /* fill PQ map entry */ memset(&tx_pq_map, 0, sizeof(tx_pq_map)); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, - p_params->pq_params[i].rl_valid ? 1 : 0); + SET_FIELD(tx_pq_map.reg, + QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, - p_params->pq_params[i].rl_valid ? + rl_valid ? p_params->pq_params[i].vport_id : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, @@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init( /* if PQ is associated with a VF, add indication * to PQ VF mask */ - tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= - (1 << (pq_id % tx_pq_vf_mask_width)); + tx_pq_vf_mask[pq_id / + QM_PF_QUEUE_GROUP_SIZE] |= + BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); mem_addr_4kb += vport_pq_mem_4kb; } else { mem_addr_4kb += pq_mem_4kb; @@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, if (p_params->pf_id < MAX_NUM_PFS_BB) crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; else - crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET + - (p_params->pf_id % MAX_NUM_PFS_BB); + crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET; + crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB; inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { @@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, QM_WFQ_CRD_REG_SIGN_BIT); } - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, - inc_val); STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, + inc_val); return 0; } @@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, { u8 i, vport_id; + if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, + "Invalid VPORT ID for rate limiter configuration"); + return -1; + } + /* go over all PF VPORTs */ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); @@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, { u32 inc_val = QM_RL_INC_VAL(vport_rl); + if (vport_id >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, + "Invalid VPORT ID for rate limiter configuration"); + return -1; + } + if (inc_val > QM_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); return -1; @@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); - /* comp ver */ - reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0; - qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val); - qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val); - qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val); - /* EDPM with geneve tunnel not supported in BB_B0 */ if (QED_IS_BB_B0(p_hwfn->cdev)) return; @@ -955,3 +961,132 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, ip_geneve_enable ? 1 : 0); } + +#define T_ETH_PACKET_MATCH_RFS_EVENTID 25 +#define PARSER_ETH_CONN_CM_HDR (0x0) +#define CAM_LINE_SIZE sizeof(u32) +#define RAM_LINE_SIZE sizeof(u64) +#define REG_SIZE sizeof(u32) + +void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 pf_id) +{ + union gft_cam_line_union camline; + struct gft_ram_line ramline; + u32 *p_ramline, i; + + p_ramline = (u32 *)&ramline; + + /*stop using gft logic */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0); + memset(&camline, 0, sizeof(union gft_cam_line_union)); + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, + camline.cam_line_mapped.camline); + memset(&ramline, 0, sizeof(union gft_cam_line_union)); + + for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { + u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; + + hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE); + + qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i)); + } +} + +void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 pf_id, bool tcp, bool udp, + bool ipv4, bool ipv6) +{ + u32 rfs_cm_hdr_event_id, *p_ramline; + union gft_cam_line_union camline; + struct gft_ram_line ramline; + int i; + + rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); + p_ramline = (u32 *)&ramline; + + if (!ipv6 && !ipv4) + DP_NOTICE(p_hwfn, + "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6"); + if (!tcp && !udp) + DP_NOTICE(p_hwfn, + "set_rfs_mode_enable: must accept at least on of - udp or tcp"); + + rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << + PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); + + /* Configure Registers for RFS mode */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); + qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); + camline.cam_line_mapped.camline = 0; + + /* cam line is now valid!! */ + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_VALID, 1); + + /* filters are per PF!! */ + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1); + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + if (!(tcp && udp)) { + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1); + if (tcp) + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_TCP_PROTOCOL); + else + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_UDP_PROTOCOL); + } + + if (!(ipv4 && ipv6)) { + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); + if (ipv4) + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV4); + else + SET_FIELD(camline.cam_line_mapped.camline, + GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV6); + } + + /* write characteristics to cam */ + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, + camline.cam_line_mapped.camline); + camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt, + PRS_REG_GFT_CAM + + CAM_LINE_SIZE * pf_id); + + /* write line to RAM - compare to filter 4 tuple */ + ramline.low32bits = 0; + ramline.high32bits = 0; + SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1); + + /* each iteration write to reg */ + for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + + i * REG_SIZE, *(p_ramline + i)); + + /* set default profile so that no filter match will happen */ + ramline.low32bits = 0xffff; + ramline.high32bits = 0xffff; + + for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) + qed_wr(p_hwfn, p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE, + *(p_ramline + i)); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 243b64e0d4dc3ed36f92e570022aa68c7af58901..4a2e7be5bf7210acc93f3ded8d20e1240e3aa6ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) } /* First Dword contains metadata and should be skipped */ - buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); + buf_hdr = (struct bin_buffer_hdr *)data; offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; fw->fw_ver_info = (struct fw_ver_info *)(data + offset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 84310b60849b4881557cfd62761549a1a182f2d9..0ed24d6e6c6520450ed05b50e8486e54b712a354 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2500,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + u8 num_tc = p_hwfn->hw_info.num_hw_tc; u8 timeset, timer_res; - u8 num_tc = 1, i; + u8 i; /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 098766f7fe88a6e0a131712330cfa3b144c32738..339c91dfa658e700a570ba55408c9c1a4a445fb6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_params = &p_hwfn->pf_params.iscsi_pf_params; p_queue = &p_init->q_params; + /* Sanity */ + if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", + p_params->num_queues, + p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]); + return -EINVAL; + } + SET_FIELD(p_init->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE); p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC; @@ -216,7 +225,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); } - p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id); + p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); @@ -270,11 +279,10 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, struct tcp_offload_params *p_tcp = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - union qed_qm_pq_params pq_params; - u16 pq0_id = 0, pq1_id = 0; dma_addr_t r2tq_pbl_addr; dma_addr_t xhq_pbl_addr; dma_addr_t uhq_pbl_addr; + u16 physical_q; int rc = 0; u32 dval; u16 wval; @@ -297,16 +305,14 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.iscsi_conn_offload; /* Transmission PQ is the first of the PF */ - memset(&pq_params, 0, sizeof(pq_params)); - pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); - p_conn->physical_q0 = cpu_to_le16(pq0_id); - p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_conn->physical_q0 = cpu_to_le16(physical_q); + p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q); /* iSCSI Pure-ACK PQ */ - pq_params.iscsi.q_idx = 1; - pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params); - p_conn->physical_q1 = cpu_to_le16(pq1_id); - p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_conn->physical_q1 = cpu_to_le16(physical_q); + p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN; SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE, @@ -593,21 +599,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { - u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); - - return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, - bdq_id); + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, + QED_BDQ), + bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } } static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { - u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); - - return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, - bdq_id); + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, + QED_BDQ), + bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } } static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, @@ -857,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt); p_stats->iscsi_rx_packet_cnt = HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt); + p_stats->iscsi_rx_new_ooo_isle_events_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt); p_stats->iscsi_cmdq_threshold_cnt = le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt); p_stats->iscsi_rq_threshold_cnt = @@ -1003,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev, info->secondary_bdq_rq_addr = qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ); + return rc; } @@ -1304,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); } +void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats) +{ + struct qed_iscsi_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_iscsi_stats(cdev, &proto_stats)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect ISCSI statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt; + stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt; + stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt; + stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt; +} + static const struct qed_iscsi_ops qed_iscsi_ops_pass = { .common = &qed_common_ops_pass, .ll2 = &qed_ll2_ops_pass, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 20c187f4ed0b8aa8ff2125633f81aa19a1ca546b..ae98f772cbc0e0b3542d693252aaa0c066cde864 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn, void qed_iscsi_free(struct qed_hwfn *p_hwfn, struct qed_iscsi_info *p_iscsi_info); + +/** + * @brief - Fills provided statistics struct with statistics. + * + * @param cdev + * @param stats - points to struct that will be filled with statistics. + */ +void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ static inline struct qed_iscsi_info *qed_iscsi_alloc( struct qed_hwfn *p_hwfn) { return NULL; } static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn, struct qed_iscsi_info *p_iscsi_info) {} static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn, - struct qed_iscsi_info *p_iscsi_info) {} + struct qed_iscsi_info *p_iscsi_info) {} +static inline void +qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats) {} #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index df932be5a4e5aa1e47b16530d51a5dcf78f15706..746fed4099c8881ec839772be8a61bde12349d00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -938,15 +938,12 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { - union qed_qm_pq_params pq_params; int rc; - memset(&pq_params, 0, sizeof(pq_params)); rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, pbl_size, - qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, - &pq_params)); + qed_get_cm_pq_idx_mcos(p_hwfn, tc)); if (rc) return rc; @@ -1470,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, memset(&pstats, 0, sizeof(pstats)); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); - p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); - p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); - p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); - p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); - p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); - p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); - p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); + p_stats->common.tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->common.tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->common.tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->common.tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->common.tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->common.tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->common.tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); } static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, @@ -1502,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, memset(&tstats, 0, sizeof(tstats)); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); - p_stats->mftag_filter_discards += - HILO_64_REGPAIR(tstats.mftag_filter_discard); - p_stats->mac_filter_discards += - HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->common.mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); } static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, @@ -1539,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, memset(&ustats, 0, sizeof(ustats)); qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); - p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); - p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); - p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); - p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); - p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); - p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); + p_stats->common.rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->common.rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->common.rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); } static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, @@ -1578,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, memset(&mstats, 0, sizeof(mstats)); qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); - p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); - p_stats->packet_too_big_discard += - HILO_64_REGPAIR(mstats.packet_too_big_discard); - p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); - p_stats->tpa_coalesced_pkts += - HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); - p_stats->tpa_coalesced_events += - HILO_64_REGPAIR(mstats.tpa_coalesced_events); - p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); - p_stats->tpa_coalesced_bytes += - HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); + p_stats->common.no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->common.packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->common.tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->common.tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->common.tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->common.tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats) { + struct qed_eth_stats_common *p_common = &p_stats->common; struct port_stats port_stats; int j; @@ -1605,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, offsetof(struct public_port, stats), sizeof(port_stats)); - p_stats->rx_64_byte_packets += port_stats.eth.r64; - p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; - p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; - p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; - p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; - p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; - p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; - p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; - p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; - p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; - p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; - p_stats->rx_crc_errors += port_stats.eth.rfcs; - p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; - p_stats->rx_pause_frames += port_stats.eth.rxpf; - p_stats->rx_pfc_frames += port_stats.eth.rxpp; - p_stats->rx_align_errors += port_stats.eth.raln; - p_stats->rx_carrier_errors += port_stats.eth.rfcr; - p_stats->rx_oversize_packets += port_stats.eth.rovr; - p_stats->rx_jabbers += port_stats.eth.rjbr; - p_stats->rx_undersize_packets += port_stats.eth.rund; - p_stats->rx_fragments += port_stats.eth.rfrg; - p_stats->tx_64_byte_packets += port_stats.eth.t64; - p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; - p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; - p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; - p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; - p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; - p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; - p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; - p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; - p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; - p_stats->tx_pause_frames += port_stats.eth.txpf; - p_stats->tx_pfc_frames += port_stats.eth.txpp; - p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; - p_stats->tx_total_collisions += port_stats.eth.tncl; - p_stats->rx_mac_bytes += port_stats.eth.rbyte; - p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; - p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; - p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; - p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; - p_stats->tx_mac_bytes += port_stats.eth.tbyte; - p_stats->tx_mac_uc_packets += port_stats.eth.txuca; - p_stats->tx_mac_mc_packets += port_stats.eth.txmca; - p_stats->tx_mac_bc_packets += port_stats.eth.txbca; - p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; + p_common->rx_64_byte_packets += port_stats.eth.r64; + p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_common->rx_crc_errors += port_stats.eth.rfcs; + p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_common->rx_pause_frames += port_stats.eth.rxpf; + p_common->rx_pfc_frames += port_stats.eth.rxpp; + p_common->rx_align_errors += port_stats.eth.raln; + p_common->rx_carrier_errors += port_stats.eth.rfcr; + p_common->rx_oversize_packets += port_stats.eth.rovr; + p_common->rx_jabbers += port_stats.eth.rjbr; + p_common->rx_undersize_packets += port_stats.eth.rund; + p_common->rx_fragments += port_stats.eth.rfrg; + p_common->tx_64_byte_packets += port_stats.eth.t64; + p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_common->tx_pause_frames += port_stats.eth.txpf; + p_common->tx_pfc_frames += port_stats.eth.txpp; + p_common->rx_mac_bytes += port_stats.eth.rbyte; + p_common->rx_mac_uc_packets += port_stats.eth.rxuca; + p_common->rx_mac_mc_packets += port_stats.eth.rxmca; + p_common->rx_mac_bc_packets += port_stats.eth.rxbca; + p_common->rx_mac_frames_ok += port_stats.eth.rxpok; + p_common->tx_mac_bytes += port_stats.eth.tbyte; + p_common->tx_mac_uc_packets += port_stats.eth.txuca; + p_common->tx_mac_mc_packets += port_stats.eth.txmca; + p_common->tx_mac_bc_packets += port_stats.eth.txbca; + p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { - p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; - p_stats->brb_discards += port_stats.brb.brb_discard[j]; + p_common->brb_truncates += port_stats.brb.brb_truncate[j]; + p_common->brb_discards += port_stats.brb.brb_discard[j]; + } + + if (QED_IS_BB(p_hwfn->cdev)) { + struct qed_eth_stats_bb *p_bb = &p_stats->bb; + + p_bb->rx_1519_to_1522_byte_packets += + port_stats.eth.u0.bb0.r1522; + p_bb->rx_1519_to_2047_byte_packets += + port_stats.eth.u0.bb0.r2047; + p_bb->rx_2048_to_4095_byte_packets += + port_stats.eth.u0.bb0.r4095; + p_bb->rx_4096_to_9216_byte_packets += + port_stats.eth.u0.bb0.r9216; + p_bb->rx_9217_to_16383_byte_packets += + port_stats.eth.u0.bb0.r16383; + p_bb->tx_1519_to_2047_byte_packets += + port_stats.eth.u1.bb1.t2047; + p_bb->tx_2048_to_4095_byte_packets += + port_stats.eth.u1.bb1.t4095; + p_bb->tx_4096_to_9216_byte_packets += + port_stats.eth.u1.bb1.t9216; + p_bb->tx_9217_to_16383_byte_packets += + port_stats.eth.u1.bb1.t16383; + p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; + p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; + } else { + struct qed_eth_stats_ah *p_ah = &p_stats->ah; + + p_ah->rx_1519_to_max_byte_packets += + port_stats.eth.u0.ah0.r1519_to_max; + p_ah->tx_1519_to_max_byte_packets = + port_stats.eth.u1.ah1.t1519_to_max; } } @@ -1768,6 +1799,84 @@ void qed_reset_vport_stats(struct qed_dev *cdev) _qed_get_vport_stats(cdev, cdev->reset_stats); } +static void +qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params) +{ + if (p_cfg_params->arfs_enable) { + qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, p_cfg_params->udp, + p_cfg_params->ipv4, p_cfg_params->ipv6); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", + p_cfg_params->tcp ? "Enable" : "Disable", + p_cfg_params->udp ? "Enable" : "Disable", + p_cfg_params->ipv4 ? "Enable" : "Disable", + p_cfg_params->ipv6 ? "Enable" : "Disable"); + } else { + qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", + p_cfg_params->arfs_enable ? "Enable" : "Disable"); +} + +static int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_spq_comp_cb *p_cb, + dma_addr_t p_addr, u16 length, u16 qid, + u8 vport_id, bool b_is_add) +{ + struct rx_update_gft_filter_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc) + return rc; + + rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); + if (rc) + return rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (p_cb) { + init_data.comp_mode = QED_SPQ_MODE_CB; + init_data.p_comp_data = p_cb; + } else { + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + } + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_GFT_UPDATE_FILTER, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_update_gft; + DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); + p_ramrod->pkt_hdr_length = cpu_to_le16(length); + p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); + p_ramrod->vport_id = abs_vport_id; + p_ramrod->filter_type = RFS_FILTER_TYPE; + p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", + abs_vport_id, abs_rx_q_id, + b_is_add ? "Adding" : "Removing", (u64)p_addr, length); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -1898,7 +2007,11 @@ static int qed_start_vport(struct qed_dev *cdev, return rc; } - qed_hw_start_fastpath(p_hwfn); + rc = qed_hw_start_fastpath(p_hwfn); + if (rc) { + DP_ERR(cdev, "Failed to start VPORT fastpath\n"); + return rc; + } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started V-PORT %d with MTU %d\n", @@ -2141,7 +2254,13 @@ static int qed_start_txq(struct qed_dev *cdev, #define QED_HW_STOP_RETRY_LIMIT (10) static int qed_fastpath_stop(struct qed_dev *cdev) { - qed_hw_stop_fastpath(cdev); + int rc; + + rc = qed_hw_stop_fastpath(cdev); + if (rc) { + DP_ERR(cdev, "Failed to stop Fastpath\n"); + return rc; + } return 0; } @@ -2166,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) static int qed_tunn_configure(struct qed_dev *cdev, struct qed_tunn_params *tunn_params) { - struct qed_tunn_update_params tunn_info; + struct qed_tunnel_info tunn_info; int i, rc; - if (IS_VF(cdev)) - return 0; - memset(&tunn_info, 0, sizeof(tunn_info)); - if (tunn_params->update_vxlan_port == 1) { - tunn_info.update_vxlan_udp_port = 1; - tunn_info.vxlan_udp_port = tunn_params->vxlan_port; + if (tunn_params->update_vxlan_port) { + tunn_info.vxlan_port.b_update_port = true; + tunn_info.vxlan_port.port = tunn_params->vxlan_port; } - if (tunn_params->update_geneve_port == 1) { - tunn_info.update_geneve_udp_port = 1; - tunn_info.geneve_udp_port = tunn_params->geneve_port; + if (tunn_params->update_geneve_port) { + tunn_info.geneve_port.b_update_port = true; + tunn_info.geneve_port.port = tunn_params->geneve_port; } for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_tunnel_info *tun; + + tun = &hwfn->cdev->tunnel; rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, QED_SPQ_MODE_EBLOCK, NULL); - if (rc) return rc; + + if (IS_PF_SRIOV(hwfn)) { + u16 vxlan_port, geneve_port; + int j; + + vxlan_port = tun->vxlan_port.port; + geneve_port = tun->geneve_port.port; + + qed_for_each_vf(hwfn, j) { + qed_iov_bulletin_set_udp_ports(hwfn, j, + vxlan_port, + geneve_port); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } } return 0; @@ -2315,6 +2449,59 @@ static int qed_configure_filter(struct qed_dev *cdev, } } +static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_arfs_config_params arfs_config_params; + + memset(&arfs_config_params, 0, sizeof(arfs_config_params)); + arfs_config_params.tcp = true; + arfs_config_params.udp = true; + arfs_config_params.ipv4 = true; + arfs_config_params.ipv6 = true; + arfs_config_params.arfs_enable = en_searcher; + + qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, + &arfs_config_params); + return 0; +} + +static void +qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, + void *cookie, union event_ring_data *data, + u8 fw_return_code) +{ + struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; + void *dev = p_hwfn->cdev->ops_cookie; + + op->arfs_filter_op(dev, cookie, fw_return_code); +} + +static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, + dma_addr_t mapping, u16 length, + u16 vport_id, u16 rx_queue_id, + bool add_filter) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_spq_comp_cb cb; + int rc = -EINVAL; + + cb.function = qed_arfs_sp_response_handler; + cb.cookie = cookie; + + rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, + &cb, mapping, length, rx_queue_id, + vport_id, add_filter); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to issue a-RFS filter configuration\n"); + else + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Successfully issued a-RFS filter configuration\n"); + + return rc; +} + static int qed_fp_cqe_completion(struct qed_dev *dev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { @@ -2356,6 +2543,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, .tunn_config = &qed_tunn_configure, + .ntuple_filter_config = &qed_ntuple_arfs_filter_config, + .configure_arfs_searcher = &qed_configure_arfs_searcher, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index e763abd334f64e08d3df948d6b78e7993f6ca251..6f44229899ebc06acdcfa44fb8dab7b6e625006f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -185,6 +185,14 @@ struct qed_filter_accept_flags { #define QED_ACCEPT_BCAST 0x20 }; +struct qed_arfs_config_params { + bool tcp; + bool udp; + bool ipv4; + bool ipv6; + bool arfs_enable; +}; + struct qed_sp_vport_update_params { u16 opaque_fid; u8 vport_id; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 0d3cef409c96d0849c7860e8f03a920b3b8966f1..09c86411918c1ea9dae48d64d5376a7157605426 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) u8 bd_flags = 0; if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) - SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); + SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1); return bd_flags; } @@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, p_buffer->placement_offset; parse_flags = p_buffer->parse_flags; bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); - SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); - SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); + SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); + SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, p_buffer->vlan, bd_flags, @@ -1090,7 +1090,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, struct core_tx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - union qed_qm_pq_params pq_params; u16 pq_id = 0, pbl_size; int rc = -EINVAL; @@ -1127,9 +1126,18 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); p_ramrod->pbl_size = cpu_to_le16(pbl_size); - memset(&pq_params, 0, sizeof(pq_params)); - pq_params.core.tc = p_ll2_conn->conn.tx_tc; - pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); + switch (p_ll2_conn->conn.tx_tc) { + case LB_TC: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + break; + case OOO_LB_TC: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); + break; + default: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + break; + } + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); switch (conn_type) { @@ -1400,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_info *p_ll2_conn; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; + struct qed_ptt *p_ptt; int rc = -EINVAL; u32 i, capacity; u8 qid; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); - if (!p_ll2_conn) - return -EINVAL; + if (!p_ll2_conn) { + rc = -EINVAL; + goto out; + } + p_rx = &p_ll2_conn->rx_queue; p_tx = &p_ll2_conn->tx_queue; @@ -1439,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) p_tx->cur_completing_frag_num = 0; *p_tx->p_fw_cons = 0; - qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); + if (rc) + goto out; qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle; p_ll2_conn->queue_id = qid; @@ -1453,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) - return rc; + goto out; rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); if (rc) - return rc; + goto out; if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) - qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1); + qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { - qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + qed_llh_add_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); - qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + qed_llh_add_protocol_filter(p_hwfn, p_ptt, 0x8914, 0, QED_LLH_FILTER_ETHERTYPE); } +out: + qed_ptt_release(p_hwfn, p_ptt); return rc; } @@ -1591,33 +1611,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, p_tx->cur_send_frag_num++; } -static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2, - struct qed_ll2_tx_packet *p_curp, - u8 num_of_bds, - enum core_tx_dest tx_dest, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum core_roce_flavor_type type, - dma_addr_t first_frag, - u16 first_frag_len) +static void +qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2, + struct qed_ll2_tx_packet *p_curp, + u8 num_of_bds, + enum core_tx_dest tx_dest, + u16 vlan, + u8 bd_flags, + u16 l4_hdr_offset_w, + enum core_roce_flavor_type roce_flavor, + dma_addr_t first_frag, + u16 first_frag_len) { struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); struct core_tx_bd *start_bd = NULL; - u16 frag_idx; + u16 bd_data = 0, frag_idx; start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, cpu_to_le16(l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); - start_bd->bd_flags.as_bitfield = bd_flags; - start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << - CORE_TX_BD_FLAGS_START_BD_SHIFT; - SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); - SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); + bd_data |= bd_flags; + SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); + SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds); + SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); + start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, first_frag); start_bd->nbytes = cpu_to_le16(first_frag_len); @@ -1642,9 +1663,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - (*p_bd)->bd_flags.as_bitfield = 0; + (*p_bd)->bd_data.as_bitfield = 0; (*p_bd)->bitfield1 = 0; - (*p_bd)->bitfield0 = 0; p_curp->bds_set[frag_idx].tx_frag = 0; p_curp->bds_set[frag_idx].frag_len = 0; } @@ -1823,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) { struct qed_ll2_info *p_ll2_conn = NULL; int rc = -EINVAL; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); - if (!p_ll2_conn) - return -EINVAL; + if (!p_ll2_conn) { + rc = -EINVAL; + goto out; + } /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) - return rc; + goto out; qed_ll2_txq_flush(p_hwfn, connection_handle); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) - return rc; + goto out; qed_ll2_rxq_flush(p_hwfn, connection_handle); } @@ -1847,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { - qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); - qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 0x8914, 0, QED_LLH_FILTER_ETHERTYPE); } +out: + qed_ptt_release(p_hwfn, p_ptt); return rc; } @@ -2241,11 +2270,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) /* Request HW to calculate IP csum */ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); + flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); if (skb_vlan_tag_present(skb)) { vlan = skb_vlan_tag_get(skb); - flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT); + flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); } rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index eef30a598b408e5ded4109860a0d4a37362fbaee..59992cf20d42eb1f0245ae5f8446b40ff9a4903c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,8 @@ #include "qed_dev_api.h" #include "qed_ll2.h" #include "qed_fcoe.h" +#include "qed_iscsi.h" + #include "qed_mcp.h" #include "qed_hw.h" #include "qed_selftest.h" @@ -227,10 +230,25 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info) { + struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_ptt *ptt; memset(dev_info, 0, sizeof(struct qed_dev_info)); + if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->vxlan.b_mode_enabled) + dev_info->vxlan_enable = true; + + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && + tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) + dev_info->gre_enable = true; + + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && + tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) + dev_info->geneve_enable = true; + dev_info->num_hwfns = cdev->num_hwfns; dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; @@ -238,6 +256,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); + dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); if (IS_PF(cdev)) { @@ -588,6 +607,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) return rc; } +void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u8 id = p_hwfn->my_id; + u32 int_mode; + + int_mode = cdev->int_params.out.int_mode; + if (int_mode == QED_INT_MODE_MSIX) + synchronize_irq(cdev->int_params.msix_table[id].vector); + else + synchronize_irq(cdev->pdev->irq); +} + static void qed_slowpath_irq_free(struct qed_dev *cdev) { int i; @@ -630,19 +662,6 @@ static int qed_nic_stop(struct qed_dev *cdev) return rc; } -static int qed_nic_reset(struct qed_dev *cdev) -{ - int rc; - - rc = qed_hw_reset(cdev); - if (rc) - return rc; - - qed_resc_free(cdev); - - return 0; -} - static int qed_nic_setup(struct qed_dev *cdev) { int rc, i; @@ -743,7 +762,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - cdev->num_hwfns; - if (!IS_ENABLED(CONFIG_QED_RDMA)) + if (!IS_ENABLED(CONFIG_QED_RDMA) || + QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) return 0; for_each_hwfn(cdev, i) @@ -875,10 +895,12 @@ static void qed_update_pf_params(struct qed_dev *cdev, params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; /* divide by 3 the MRs to avoid MF ILT overflow */ - params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } + if (cdev->num_hwfns > 1 || IS_VF(cdev)) + params->eth_pf_params.num_arfs_filters = 0; + /* In case we might support RDMA, don't allow qede to be greedy * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. */ @@ -900,11 +922,15 @@ static void qed_update_pf_params(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { - struct qed_tunn_start_params tunn_info; + struct qed_drv_load_params drv_load_params; + struct qed_hw_init_params hw_init_params; struct qed_mcp_drv_version drv_version; + struct qed_tunnel_info tunn_info; const u8 *data = NULL; struct qed_hwfn *hwfn; +#ifdef CONFIG_RFS_ACCEL struct qed_ptt *p_ptt; +#endif int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -920,13 +946,18 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } - p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); - if (p_ptt) { - QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt; - } else { - DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n"); - goto err; +#ifdef CONFIG_RFS_ACCEL + if (cdev->num_hwfns == 1) { + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (p_ptt) { + QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; + } else { + DP_NOTICE(cdev, + "Failed to acquire PTT for aRFS\n"); + goto err; + } } +#endif } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -953,27 +984,47 @@ static int qed_slowpath_start(struct qed_dev *cdev, qed_dbg_pf_init(cdev); } - memset(&tunn_info, 0, sizeof(tunn_info)); - tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | - 1 << QED_MODE_L2GRE_TUNN | - 1 << QED_MODE_IPGRE_TUNN | - 1 << QED_MODE_L2GENEVE_TUNN | - 1 << QED_MODE_IPGENEVE_TUNN; - - tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; - tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; - tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; - /* Start the slowpath */ - rc = qed_hw_init(cdev, &tunn_info, true, - cdev->int_params.out.int_mode, - true, data); + memset(&hw_init_params, 0, sizeof(hw_init_params)); + memset(&tunn_info, 0, sizeof(tunn_info)); + tunn_info.vxlan.b_mode_enabled = true; + tunn_info.l2_gre.b_mode_enabled = true; + tunn_info.ip_gre.b_mode_enabled = true; + tunn_info.l2_geneve.b_mode_enabled = true; + tunn_info.ip_geneve.b_mode_enabled = true; + tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + hw_init_params.p_tunn = &tunn_info; + hw_init_params.b_hw_start = true; + hw_init_params.int_mode = cdev->int_params.out.int_mode; + hw_init_params.allow_npar_tx_switch = true; + hw_init_params.bin_fw_data = data; + + memset(&drv_load_params, 0, sizeof(drv_load_params)); + drv_load_params.is_crash_kernel = is_kdump_kernel(); + drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; + drv_load_params.avoid_eng_reset = false; + drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; + hw_init_params.p_drv_load_params = &drv_load_params; + + rc = qed_hw_init(cdev, &hw_init_params); if (rc) goto err2; DP_INFO(cdev, "HW initialization and function start completed successfully\n"); + if (IS_PF(cdev)) { + cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | + BIT(QED_MODE_L2GENEVE_TUNN) | + BIT(QED_MODE_IPGENEVE_TUNN) | + BIT(QED_MODE_L2GRE_TUNN) | + BIT(QED_MODE_IPGRE_TUNN)); + } + /* Allocate LL2 interface if needed */ if (QED_LEADING_HWFN(cdev)->using_ll2) { rc = qed_ll2_alloc_if(cdev); @@ -1014,9 +1065,12 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (IS_PF(cdev)) release_firmware(cdev->firmware); - if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt) +#ifdef CONFIG_RFS_ACCEL + if (IS_PF(cdev) && (cdev->num_hwfns == 1) && + QED_LEADING_HWFN(cdev)->p_arfs_ptt) qed_ptt_release(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->p_ptp_ptt); + QED_LEADING_HWFN(cdev)->p_arfs_ptt); +#endif qed_iov_wq_stop(cdev, false); @@ -1031,8 +1085,11 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { - qed_ptt_release(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->p_ptp_ptt); +#ifdef CONFIG_RFS_ACCEL + if (cdev->num_hwfns == 1) + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_arfs_ptt); +#endif qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); @@ -1042,7 +1099,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) } qed_disable_msix(cdev); - qed_nic_reset(cdev); + + qed_resc_free(cdev); qed_iov_wq_stop(cdev, true); @@ -1653,13 +1711,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev, switch (type) { case QED_MCP_LAN_STATS: qed_get_vport_stats(cdev, ð_stats); - stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; - stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; + stats->lan_stats.ucast_rx_pkts = + eth_stats.common.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = + eth_stats.common.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; case QED_MCP_FCOE_STATS: qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); break; + case QED_MCP_ISCSI_STATS: + qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); + break; default: DP_ERR(cdev, "Invalid protocol type = %d\n", type); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 87fde205149fdbf3181befd79ca62508b2daa388..7266b36a26555286d034451404628fc86dd962f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } } +struct qed_mcp_cmd_elem { + struct list_head list; + struct qed_mcp_mb_params *p_mb_params; + u16 expected_seq_num; + bool b_is_completed; +}; + +/* Must be called while cmd_lock is acquired */ +static struct qed_mcp_cmd_elem * +qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn, + struct qed_mcp_mb_params *p_mb_params, + u16 expected_seq_num) +{ + struct qed_mcp_cmd_elem *p_cmd_elem = NULL; + + p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC); + if (!p_cmd_elem) + goto out; + + p_cmd_elem->p_mb_params = p_mb_params; + p_cmd_elem->expected_seq_num = expected_seq_num; + list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); +out: + return p_cmd_elem; +} + +/* Must be called while cmd_lock is acquired */ +static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn, + struct qed_mcp_cmd_elem *p_cmd_elem) +{ + list_del(&p_cmd_elem->list); + kfree(p_cmd_elem); +} + +/* Must be called while cmd_lock is acquired */ +static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn, + u16 seq_num) +{ + struct qed_mcp_cmd_elem *p_cmd_elem = NULL; + + list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) { + if (p_cmd_elem->expected_seq_num == seq_num) + return p_cmd_elem; + } + + return NULL; +} + int qed_mcp_free(struct qed_hwfn *p_hwfn) { if (p_hwfn->mcp_info) { + struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp; + kfree(p_hwfn->mcp_info->mfw_mb_cur); kfree(p_hwfn->mcp_info->mfw_mb_shadow); + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + list_for_each_entry_safe(p_cmd_elem, + p_tmp, + &p_hwfn->mcp_info->cmd_list, list) { + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + } + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } + kfree(p_hwfn->mcp_info); return 0; @@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & DRV_PULSE_SEQ_MASK; - p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); return 0; } @@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) goto err; p_info = p_hwfn->mcp_info; + /* Initialize the MFW spinlock */ + spin_lock_init(&p_info->cmd_lock); + spin_lock_init(&p_info->link_lock); + + INIT_LIST_HEAD(&p_info->cmd_list); + if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { DP_NOTICE(p_hwfn, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that @@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; - /* Initialize the MFW spinlock */ - spin_lock_init(&p_info->lock); - spin_lock_init(&p_info->link_lock); - return 0; err: @@ -201,68 +262,39 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return -ENOMEM; } -/* Locks the MFW mailbox of a PF to ensure a single access. - * The lock is achieved in most cases by holding a spinlock, causing other - * threads to wait till a previous access is done. - * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single - * access is achieved by setting a blocking flag, which will fail other - * competing contexts to send their mailboxes. - */ -static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) +static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { - spin_lock_bh(&p_hwfn->mcp_info->lock); + u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); - /* The spinlock shouldn't be acquired when the mailbox command is - * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel - * pending [UN]LOAD_REQ command of another PF together with a spinlock - * (i.e. interrupts are disabled) - can lead to a deadlock. - * It is assumed that for a single PF, no other mailbox commands can be - * sent from another context while sending LOAD_REQ, and that any - * parallel commands to UNLOAD_REQ can be cancelled. + /* Use MCP history register to check if MCP reset occurred between init + * time and now. */ - if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) - p_hwfn->mcp_info->block_mb_sending = false; - - if (p_hwfn->mcp_info->block_mb_sending) { - DP_NOTICE(p_hwfn, - "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", - cmd); - spin_unlock_bh(&p_hwfn->mcp_info->lock); - return -EBUSY; - } + if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", + p_hwfn->mcp_info->mcp_hist, generic_por_0); - if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { - p_hwfn->mcp_info->block_mb_sending = true; - spin_unlock_bh(&p_hwfn->mcp_info->lock); + qed_load_mcp_offsets(p_hwfn, p_ptt); + qed_mcp_cmd_port_init(p_hwfn, p_ptt); } - - return 0; -} - -static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) -{ - if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) - spin_unlock_bh(&p_hwfn->mcp_info->lock); } int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; - u8 delay = CHIP_MCP_RESP_ITER_US; - u32 org_mcp_reset_seq, cnt = 0; + u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; int rc = 0; - /* Ensure that only a single thread is accessing the mailbox at a - * certain time. - */ - rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); - if (rc != 0) - return rc; + /* Ensure that only a single thread is accessing the mailbox */ + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); - /* Set drv command along with the updated sequence */ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); - DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, - (DRV_MSG_CODE_MCP_RESET | seq)); + + /* Set drv command along with the updated sequence */ + qed_mcp_reread_offsets(p_hwfn, p_ptt); + seq = ++p_hwfn->mcp_info->drv_mb_seq; + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); do { /* Wait for MFW response */ @@ -281,72 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) rc = -EAGAIN; } - qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); return rc; } -static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param) +/* Must be called while cmd_lock is acquired */ +static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn) { - u8 delay = CHIP_MCP_RESP_ITER_US; - u32 seq, cnt = 1, actual_mb_seq; - int rc = 0; - - /* Get actual driver mailbox sequence */ - actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & - DRV_MSG_SEQ_NUMBER_MASK; + struct qed_mcp_cmd_elem *p_cmd_elem; - /* Use MCP history register to check if MCP reset occurred between - * init time and now. + /* There is at most one pending command at a certain time, and if it + * exists - it is placed at the HEAD of the list. */ - if (p_hwfn->mcp_info->mcp_hist != - qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { - DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); - qed_load_mcp_offsets(p_hwfn, p_ptt); - qed_mcp_cmd_port_init(p_hwfn, p_ptt); + if (!list_empty(&p_hwfn->mcp_info->cmd_list)) { + p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list, + struct qed_mcp_cmd_elem, list); + return !p_cmd_elem->b_is_completed; } - seq = ++p_hwfn->mcp_info->drv_mb_seq; - /* Set drv param */ - DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); + return false; +} - /* Set drv command along with the updated sequence */ - DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); +/* Must be called while cmd_lock is acquired */ +static int +qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params *p_mb_params; + struct qed_mcp_cmd_elem *p_cmd_elem; + u32 mcp_resp; + u16 seq_num; + + mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); + + /* Return if no new non-handled response has been received */ + if (seq_num != p_hwfn->mcp_info->drv_mb_seq) + return -EAGAIN; + + p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num); + if (!p_cmd_elem) { + DP_ERR(p_hwfn, + "Failed to find a pending mailbox cmd that expects sequence number %d\n", + seq_num); + return -EINVAL; + } + + p_mb_params = p_cmd_elem->p_mb_params; + + /* Get the MFW response along with the sequence number */ + p_mb_params->mcp_resp = mcp_resp; + + /* Get the MFW param */ + p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); + + /* Get the union data */ + if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) { + u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, + union_data); + qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, + union_data_addr, p_mb_params->data_dst_size); + } + + p_cmd_elem->b_is_completed = true; + + return 0; +} + +/* Must be called while cmd_lock is acquired */ +static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params, + u16 seq_num) +{ + union drv_union_data union_data; + u32 union_data_addr; + + /* Set the union data */ + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data); + memset(&union_data, 0, sizeof(union_data)); + if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size) + memcpy(&union_data, p_mb_params->p_data_src, + p_mb_params->data_src_size); + qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, + sizeof(union_data)); + + /* Set the drv param */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); + + /* Set the drv command along with the sequence number */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); DP_VERBOSE(p_hwfn, QED_MSG_SP, - "wrote command (%x) to MFW MB param 0x%08x\n", - (cmd | seq), param); + "MFW mailbox: command 0x%08x param 0x%08x\n", + (p_mb_params->cmd | seq_num), p_mb_params->param); +} +static int +_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params, + u32 max_retries, u32 delay) +{ + struct qed_mcp_cmd_elem *p_cmd_elem; + u32 cnt = 0; + u16 seq_num; + int rc = 0; + + /* Wait until the mailbox is non-occupied */ do { - /* Wait for MFW response */ + /* Exit the loop if there is no pending command, or if the + * pending command is completed during this iteration. + * The spinlock stays locked until the command is sent. + */ + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (!qed_mcp_has_pending_cmd(p_hwfn)) + break; + + rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (!rc) + break; + else if (rc != -EAGAIN) + goto err; + + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); udelay(delay); - *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + } while (++cnt < max_retries); - /* Give the FW up to 5 second (500*10ms) */ - } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && - (cnt++ < QED_DRV_MB_MAX_RETRIES)); + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, + "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return -EAGAIN; + } - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "[after %d ms] read (%x) seq is (%x) from FW MB\n", - cnt * delay, *o_mcp_resp, seq); - - /* Is this a reply to our command? */ - if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { - *o_mcp_resp &= FW_MSG_CODE_MASK; - /* Get the MCP param */ - *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); - } else { - /* FW BUG! */ - DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", - cmd, param); - *o_mcp_resp = 0; - rc = -EAGAIN; + /* Send the mailbox command */ + qed_mcp_reread_offsets(p_hwfn, p_ptt); + seq_num = ++p_hwfn->mcp_info->drv_mb_seq; + p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); + if (!p_cmd_elem) { + rc = -ENOMEM; + goto err; + } + + __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + /* Wait for the MFW response */ + do { + /* Exit the loop if the command is already completed, or if the + * command is completed during this iteration. + * The spinlock stays locked until the list element is removed. + */ + + udelay(delay); + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (p_cmd_elem->b_is_completed) + break; + + rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (!rc) + break; + else if (rc != -EAGAIN) + goto err; + + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + } while (++cnt < max_retries); + + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, + "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + return -EAGAIN; } + + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", + p_mb_params->mcp_resp, + p_mb_params->mcp_param, + (cnt * delay) / 1000, (cnt * delay) % 1000); + + /* Clear the sequence number from the MFW response */ + p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; + + return 0; + +err: + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); return rc; } @@ -354,9 +521,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params) { - u32 union_data_addr; - - int rc; + size_t union_data_size = sizeof(union drv_union_data); + u32 max_retries = QED_DRV_MB_MAX_RETRIES; + u32 delay = CHIP_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -364,33 +531,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EBUSY; } - union_data_addr = p_hwfn->mcp_info->drv_mb_addr + - offsetof(struct public_drv_mb, union_data); - - /* Ensure that only a single thread is accessing the mailbox at a - * certain time. - */ - rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); - if (rc) - return rc; - - if (p_mb_params->p_data_src != NULL) - qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, - p_mb_params->p_data_src, - sizeof(*p_mb_params->p_data_src)); - - rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, - p_mb_params->param, &p_mb_params->mcp_resp, - &p_mb_params->mcp_param); - - if (p_mb_params->p_data_dst != NULL) - qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, - union_data_addr, - sizeof(*p_mb_params->p_data_dst)); - - qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); + if (p_mb_params->data_src_size > union_data_size || + p_mb_params->data_dst_size > union_data_size) { + DP_ERR(p_hwfn, + "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", + p_mb_params->data_src_size, + p_mb_params->data_dst_size, union_data_size); + return -EINVAL; + } - return rc; + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, + delay); } int qed_mcp_cmd(struct qed_hwfn *p_hwfn, @@ -401,32 +552,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param) { struct qed_mcp_mb_params mb_params; - union drv_union_data data_src; int rc; memset(&mb_params, 0, sizeof(mb_params)); - memset(&data_src, 0, sizeof(data_src)); mb_params.cmd = cmd; mb_params.param = param; - /* In case of UNLOAD_DONE, set the primary MAC */ - if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) && - (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) { - u8 *p_mac = p_hwfn->cdev->wol_mac; - - data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; - data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | - p_mac[4] << 8 | p_mac[5]; - - DP_VERBOSE(p_hwfn, - (QED_MSG_SP | NETIF_MSG_IFDOWN), - "Setting WoL MAC: %pM --> [%08x,%08x]\n", - p_mac, data_src.wol_mac.mac_upper, - data_src.wol_mac.mac_lower); - - mb_params.p_data_src = &data_src; - } - rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; @@ -445,13 +576,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) { struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; - mb_params.p_data_dst = &union_data; + mb_params.p_data_dst = raw_data; + + /* Use the maximal value since the actual one is part of the response */ + mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; @@ -460,55 +595,413 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, *o_mcp_param = mb_params.mcp_param; *o_txn_size = *o_mcp_param; - memcpy(o_buf, &union_data.raw_data, *o_txn_size); + memcpy(o_buf, raw_data, *o_txn_size); return 0; } -int qed_mcp_load_req(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *p_load_code) +static bool +qed_mcp_can_force_load(u8 drv_role, + u8 exist_drv_role, + enum qed_override_force_load override_force_load) +{ + bool can_force_load = false; + + switch (override_force_load) { + case QED_OVERRIDE_FORCE_LOAD_ALWAYS: + can_force_load = true; + break; + case QED_OVERRIDE_FORCE_LOAD_NEVER: + can_force_load = false; + break; + default: + can_force_load = (drv_role == DRV_ROLE_OS && + exist_drv_role == DRV_ROLE_PREBOOT) || + (drv_role == DRV_ROLE_KDUMP && + exist_drv_role == DRV_ROLE_OS); + break; + } + + return can_force_load; +} + +static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, + &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send cancel load request, rc = %d\n", rc); + + return rc; +} + +#define CONFIG_QEDE_BITMAP_IDX BIT(0) +#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1) +#define CONFIG_QEDR_BITMAP_IDX BIT(2) +#define CONFIG_QEDF_BITMAP_IDX BIT(4) +#define CONFIG_QEDI_BITMAP_IDX BIT(5) +#define CONFIG_QED_LL2_BITMAP_IDX BIT(6) + +static u32 qed_get_config_bitmap(void) +{ + u32 config_bitmap = 0x0; + + if (IS_ENABLED(CONFIG_QEDE)) + config_bitmap |= CONFIG_QEDE_BITMAP_IDX; + + if (IS_ENABLED(CONFIG_QED_SRIOV)) + config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX; + + if (IS_ENABLED(CONFIG_QED_RDMA)) + config_bitmap |= CONFIG_QEDR_BITMAP_IDX; + + if (IS_ENABLED(CONFIG_QED_FCOE)) + config_bitmap |= CONFIG_QEDF_BITMAP_IDX; + + if (IS_ENABLED(CONFIG_QED_ISCSI)) + config_bitmap |= CONFIG_QEDI_BITMAP_IDX; + + if (IS_ENABLED(CONFIG_QED_LL2)) + config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX; + + return config_bitmap; +} + +struct qed_load_req_in_params { + u8 hsi_ver; +#define QED_LOAD_REQ_HSI_VER_DEFAULT 0 +#define QED_LOAD_REQ_HSI_VER_1 1 + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u8 drv_role; + u8 timeout_val; + u8 force_cmd; + bool avoid_eng_reset; +}; + +struct qed_load_req_out_params { + u32 load_code; + u32 exist_drv_ver_0; + u32 exist_drv_ver_1; + u32 exist_fw_ver; + u8 exist_drv_role; + u8 mfw_hsi_ver; + bool drv_exists; +}; + +static int +__qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_load_req_in_params *p_in_params, + struct qed_load_req_out_params *p_out_params) { - struct qed_dev *cdev = p_hwfn->cdev; struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + u32 hsi_ver; int rc; + memset(&load_req, 0, sizeof(load_req)); + load_req.drv_ver_0 = p_in_params->drv_ver_0; + load_req.drv_ver_1 = p_in_params->drv_ver_1; + load_req.fw_ver = p_in_params->fw_ver; + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, + p_in_params->timeout_val); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE, + p_in_params->force_cmd); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, + p_in_params->avoid_eng_reset); + + hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ? + DRV_ID_MCP_HSI_VER_CURRENT : + (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); + memset(&mb_params, 0, sizeof(mb_params)); - /* Load Request */ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; - mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | - cdev->drv_type; - memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); - mb_params.p_data_src = &union_data; - rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type; + mb_params.p_data_src = &load_req; + mb_params.data_src_size = sizeof(load_req); + mb_params.p_data_dst = &load_rsp; + mb_params.data_dst_size = sizeof(load_rsp); - /* if mcp fails to respond we must abort */ + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", + mb_params.param, + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); + + if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", + load_req.drv_ver_0, + load_req.drv_ver_1, + load_req.fw_ver, + load_req.misc0, + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE), + QED_MFW_GET_FIELD(load_req.misc0, + LOAD_REQ_LOCK_TO), + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE), + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); + } + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) { - DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc); return rc; } - *p_load_code = mb_params.mcp_resp; + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Response: resp 0x%08x\n", mb_params.mcp_resp); + p_out_params->load_code = mb_params.mcp_resp; + + if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 && + p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", + load_rsp.drv_ver_0, + load_rsp.drv_ver_1, + load_rsp.fw_ver, + load_rsp.misc0, + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI), + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); + + p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; + p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; + p_out_params->exist_fw_ver = load_rsp.fw_ver; + p_out_params->exist_drv_role = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); + p_out_params->mfw_hsi_ver = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI); + p_out_params->drv_exists = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & + LOAD_RSP_FLAGS0_DRV_EXISTS; + } + + return 0; +} + +static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn, + enum qed_drv_role drv_role, + u8 *p_mfw_drv_role) +{ + switch (drv_role) { + case QED_DRV_ROLE_OS: + *p_mfw_drv_role = DRV_ROLE_OS; + break; + case QED_DRV_ROLE_KDUMP: + *p_mfw_drv_role = DRV_ROLE_KDUMP; + break; + default: + DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role); + return -EINVAL; + } + + return 0; +} + +enum qed_load_req_force { + QED_LOAD_REQ_FORCE_NONE, + QED_LOAD_REQ_FORCE_PF, + QED_LOAD_REQ_FORCE_ALL, +}; + +static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, + + enum qed_load_req_force force_cmd, + u8 *p_mfw_force_cmd) +{ + switch (force_cmd) { + case QED_LOAD_REQ_FORCE_NONE: + *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; + break; + case QED_LOAD_REQ_FORCE_PF: + *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; + break; + case QED_LOAD_REQ_FORCE_ALL: + *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; + break; + } +} + +int qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_load_req_params *p_params) +{ + struct qed_load_req_out_params out_params; + struct qed_load_req_in_params in_params; + u8 mfw_drv_role, mfw_force_cmd; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; + in_params.drv_ver_0 = QED_VERSION; + in_params.drv_ver_1 = qed_get_config_bitmap(); + in_params.fw_ver = STORM_FW_VERSION; + rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); + if (rc) + return rc; + + in_params.drv_role = mfw_drv_role; + in_params.timeout_val = p_params->timeout_val; + qed_get_mfw_force_cmd(p_hwfn, + QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); + + in_params.force_cmd = mfw_force_cmd; + in_params.avoid_eng_reset = p_params->avoid_eng_reset; - /* If MFW refused (e.g. other port is in diagnostic mode) we - * must abort. This can happen in the following cases: - * - Other port is in diagnostic mode - * - Previously loaded function on the engine is not compliant with - * the requester. - * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. - * - + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc) + return rc; + + /* First handle cases where another load request should/might be sent: + * - MFW expects the old interface [HSI version = 1] + * - MFW responds that a force load request is required */ - if (!(*p_load_code) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { - DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); + if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_INFO(p_hwfn, + "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n"); + + in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1; + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc) + return rc; + } else if (out_params.load_code == + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { + if (qed_mcp_can_force_load(in_params.drv_role, + out_params.exist_drv_role, + p_params->override_force_load)) { + DP_INFO(p_hwfn, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + qed_get_mfw_force_cmd(p_hwfn, + QED_LOAD_REQ_FORCE_ALL, + &mfw_force_cmd); + + in_params.force_cmd = mfw_force_cmd; + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + } else { + DP_NOTICE(p_hwfn, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + DP_NOTICE(p_hwfn, + "Avoid sending a force load request to prevent disruption of active PFs\n"); + + qed_mcp_cancel_load_req(p_hwfn, p_ptt); + return -EBUSY; + } + } + + /* Now handle the other types of responses. + * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not + * expected here after the additional revised load requests were sent. + */ + switch (out_params.load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + case FW_MSG_CODE_DRV_LOAD_PORT: + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 && + out_params.drv_exists) { + /* The role and fw/driver version match, but the PF is + * already loaded and has not been unloaded gracefully. + */ + DP_NOTICE(p_hwfn, + "PF is already loaded\n"); + return -EINVAL; + } + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", + out_params.load_code); return -EBUSY; } + p_params->load_code = out_params.load_code; + return 0; } +int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 wol_param, mcp_resp, mcp_param; + + switch (p_hwfn->cdev->wol_config) { + case QED_OV_WOL_DISABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; + break; + default: + DP_NOTICE(p_hwfn, + "Unknown WoL configuration %02x\n", + p_hwfn->cdev->wol_config); + /* Fallthrough */ + case QED_OV_WOL_DEFAULT: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; + } + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, + &mcp_resp, &mcp_param); +} + +int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params; + struct mcp_mac wol_mac; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; + + /* Set the primary MAC if WoL is enabled */ + if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) { + u8 *p_mac = p_hwfn->cdev->wol_mac; + + memset(&wol_mac, 0, sizeof(wol_mac)); + wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; + wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | + p_mac[4] << 8 | p_mac[5]; + + DP_VERBOSE(p_hwfn, + (QED_MSG_SP | NETIF_MSG_IFDOWN), + "Setting WoL MAC: %pM --> [%08x,%08x]\n", + p_mac, wol_mac.mac_upper, wol_mac.mac_lower); + + mb_params.p_data_src = &wol_mac; + mb_params.data_src_size = sizeof(wol_mac); + } + + return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -549,7 +1042,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, u32 func_addr = SECTION_ADDR(mfw_func_offsize, MCP_PF_ID(p_hwfn)); struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; int rc; int i; @@ -560,8 +1052,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; - memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); - mb_params.p_data_src = &union_data; + mb_params.p_data_src = vfs_to_ack; + mb_params.data_src_size = VF_MAX_STATIC / 8; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) { DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); @@ -744,33 +1236,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; - struct eth_phy_cfg *phy_cfg; + struct eth_phy_cfg phy_cfg; int rc = 0; u32 cmd; /* Set the shmem configuration according to params */ - phy_cfg = &union_data.drv_phy_cfg; - memset(phy_cfg, 0, sizeof(*phy_cfg)); + memset(&phy_cfg, 0, sizeof(phy_cfg)); cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) - phy_cfg->speed = params->speed.forced_speed; - phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; - phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; - phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; - phy_cfg->adv_speed = params->speed.advertised_speeds; - phy_cfg->loopback_mode = params->loopback_mode; + phy_cfg.speed = params->speed.forced_speed; + phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; + phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; + phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg->speed, - phy_cfg->pause, - phy_cfg->adv_speed, - phy_cfg->loopback_mode, - phy_cfg->feature_config_flags); + phy_cfg.speed, + phy_cfg.pause, + phy_cfg.adv_speed, + phy_cfg.loopback_mode, + phy_cfg.feature_config_flags); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); @@ -778,7 +1268,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; - mb_params.p_data_src = &union_data; + mb_params.p_data_src = &phy_cfg; + mb_params.data_src_size = sizeof(phy_cfg); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ @@ -805,7 +1296,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, enum qed_mcp_protocol_type stats_type; union qed_mcp_protocol_stats stats; struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; u32 hsi_param; switch (type) { @@ -835,8 +1325,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_STATS; mb_params.param = hsi_param; - memcpy(&union_data, &stats, sizeof(stats)); - mb_params.p_data_src = &union_data; + mb_params.p_data_src = &stats; + mb_params.data_src_size = sizeof(stats); qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } @@ -963,7 +1453,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, qed_mcp_update_bw(p_hwfn, p_ptt); break; default: - DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); + DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; } } @@ -1316,24 +1806,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver) { - struct drv_version_stc *p_drv_version; struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; + struct drv_version_stc drv_version; __be32 val; u32 i; int rc; - p_drv_version = &union_data.drv_version; - p_drv_version->version = p_ver->version; - + memset(&drv_version, 0, sizeof(drv_version)); + drv_version.version = p_ver->version; for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); - *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; + *(__be32 *)&drv_version.name[i * sizeof(u32)] = val; } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_VERSION; - mb_params.p_data_src = &union_data; + mb_params.p_data_src = &drv_version; + mb_params.data_src_size = sizeof(drv_version); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); @@ -1450,7 +1939,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *mac) { struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; + u32 mfw_mac[2]; int rc; memset(&mb_params, 0, sizeof(mb_params)); @@ -1458,8 +1947,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << DRV_MSG_CODE_VMAC_TYPE_SHIFT; mb_params.param |= MCP_PF_ID(p_hwfn); - ether_addr_copy(&union_data.raw_data[0], mac); - mb_params.p_data_src = &union_data; + + /* MCP is BE, and on LE platforms PCI would swap access to SHMEM + * in 32-bit granularity. + * So the MAC has to be set in native order [and not byte order], + * otherwise it would be read incorrectly by MFW after swap. + */ + mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3]; + mfw_mac[1] = mac[4] << 24 | mac[5] << 16; + + mb_params.p_data_src = (u8 *)mfw_mac; + mb_params.data_src_size = 8; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); @@ -1724,52 +2222,426 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, return rc; } -#define QED_RESC_ALLOC_VERSION_MAJOR 1 +static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) +{ + enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; + + switch (res_id) { + case QED_SB: + mfw_res_id = RESOURCE_NUM_SB_E; + break; + case QED_L2_QUEUE: + mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; + break; + case QED_VPORT: + mfw_res_id = RESOURCE_NUM_VPORT_E; + break; + case QED_RSS_ENG: + mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; + break; + case QED_PQ: + mfw_res_id = RESOURCE_NUM_PQ_E; + break; + case QED_RL: + mfw_res_id = RESOURCE_NUM_RL_E; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + mfw_res_id = RESOURCE_VFC_FILTER_E; + break; + case QED_ILT: + mfw_res_id = RESOURCE_ILT_E; + break; + case QED_LL2_QUEUE: + mfw_res_id = RESOURCE_LL2_QUEUE_E; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + mfw_res_id = RESOURCE_CQS_E; + break; + case QED_RDMA_STATS_QUEUE: + mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; + break; + case QED_BDQ: + mfw_res_id = RESOURCE_BDQ_E; + break; + default: + break; + } + + return mfw_res_id; +} + +#define QED_RESC_ALLOC_VERSION_MAJOR 2 #define QED_RESC_ALLOC_VERSION_MINOR 0 #define QED_RESC_ALLOC_VERSION \ ((QED_RESC_ALLOC_VERSION_MAJOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ (QED_RESC_ALLOC_VERSION_MINOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) -int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct resource_info *p_resc_info, - u32 *p_mcp_resp, u32 *p_mcp_param) + +struct qed_resc_alloc_in_params { + u32 cmd; + enum qed_resources res_id; + u32 resc_max_val; +}; + +struct qed_resc_alloc_out_params { + u32 mcp_resp; + u32 mcp_param; + u32 resc_num; + u32 resc_start; + u32 vf_resc_num; + u32 vf_resc_start; + u32 flags; +}; + +static int +qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_alloc_in_params *p_in_params, + struct qed_resc_alloc_out_params *p_out_params) { struct qed_mcp_mb_params mb_params; - union drv_union_data union_data; + struct resource_info mfw_resc_info; int rc; + memset(&mfw_resc_info, 0, sizeof(mfw_resc_info)); + + mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id); + if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { + DP_ERR(p_hwfn, + "Failed to match resource %d [%s] with the MFW resources\n", + p_in_params->res_id, + qed_hw_get_resc_name(p_in_params->res_id)); + return -EINVAL; + } + + switch (p_in_params->cmd) { + case DRV_MSG_SET_RESOURCE_VALUE_MSG: + mfw_resc_info.size = p_in_params->resc_max_val; + /* Fallthrough */ + case DRV_MSG_GET_RESOURCE_ALLOC_MSG: + break; + default: + DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", + p_in_params->cmd); + return -EINVAL; + } + memset(&mb_params, 0, sizeof(mb_params)); - memset(&union_data, 0, sizeof(union_data)); - mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + mb_params.cmd = p_in_params->cmd; mb_params.param = QED_RESC_ALLOC_VERSION; + mb_params.p_data_src = &mfw_resc_info; + mb_params.data_src_size = sizeof(mfw_resc_info); + mb_params.p_data_dst = mb_params.p_data_src; + mb_params.data_dst_size = mb_params.data_src_size; - /* Need to have a sufficient large struct, as the cmd_and_union - * is going to do memcpy from and to it. - */ - memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info)); + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", + p_in_params->cmd, + p_in_params->res_id, + qed_hw_get_resc_name(p_in_params->res_id), + QED_MFW_GET_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + QED_MFW_GET_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_in_params->resc_max_val); - mb_params.p_data_src = &union_data; - mb_params.p_data_dst = &union_data; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; - /* Copy the data back */ - memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info)); - *p_mcp_resp = mb_params.mcp_resp; - *p_mcp_param = mb_params.mcp_param; + p_out_params->mcp_resp = mb_params.mcp_resp; + p_out_params->mcp_param = mb_params.mcp_param; + p_out_params->resc_num = mfw_resc_info.size; + p_out_params->resc_start = mfw_resc_info.offset; + p_out_params->vf_resc_num = mfw_resc_info.vf_size; + p_out_params->vf_resc_start = mfw_resc_info.vf_offset; + p_out_params->flags = mfw_resc_info.flags; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", + QED_MFW_GET_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + QED_MFW_GET_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_out_params->resc_num, + p_out_params->resc_start, + p_out_params->vf_resc_num, + p_out_params->vf_resc_start, p_out_params->flags); + + return 0; +} + +int +qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp) +{ + struct qed_resc_alloc_out_params out_params; + struct qed_resc_alloc_in_params in_params; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; + in_params.res_id = res_id; + in_params.resc_max_val = resc_max_val; + memset(&out_params, 0, sizeof(out_params)); + rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + return 0; +} + +int +qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start) +{ + struct qed_resc_alloc_out_params out_params; + struct qed_resc_alloc_in_params in_params; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + in_params.res_id = res_id; + memset(&out_params, 0, sizeof(out_params)); + rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { + *p_resc_num = out_params.resc_num; + *p_resc_start = out_params.resc_start; + } + + return 0; +} + +int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, + &mcp_resp, &mcp_param); +} + +static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 param, u32 *p_mcp_resp, u32 *p_mcp_param) +{ + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, + p_mcp_resp, p_mcp_param); + if (rc) + return rc; + + if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The resource command is unsupported by the MFW\n"); + return -EINVAL; + } + + if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { + u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE); + + DP_NOTICE(p_hwfn, + "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", + param, opcode); + return -EINVAL; + } + + return rc; +} + +int +__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_lock_params *p_params) +{ + u32 param = 0, mcp_resp, mcp_param; + u8 opcode; + int rc; + + switch (p_params->timeout) { + case QED_MCP_RESC_LOCK_TO_DEFAULT: + opcode = RESOURCE_OPCODE_REQ; + p_params->timeout = 0; + break; + case QED_MCP_RESC_LOCK_TO_NONE: + opcode = RESOURCE_OPCODE_REQ_WO_AGING; + p_params->timeout = 0; + break; + default: + opcode = RESOURCE_OPCODE_REQ_W_AGING; + break; + } + + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", + param, p_params->timeout, opcode, p_params->resource); + + /* Attempt to acquire the resource */ + rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); + if (rc) + return rc; + + /* Analyze the response */ + p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); + opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, QED_MSG_SP, - "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n", - *p_mcp_param, - p_resc_info->res_id, - p_resc_info->size, - p_resc_info->offset, - p_resc_info->vf_size, - p_resc_info->vf_offset, p_resc_info->flags); + "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", + mcp_param, opcode, p_params->owner); + + switch (opcode) { + case RESOURCE_OPCODE_GNT: + p_params->b_granted = true; + break; + case RESOURCE_OPCODE_BUSY: + p_params->b_granted = false; + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return -EINVAL; + } return 0; } + +int +qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) +{ + u32 retry_cnt = 0; + int rc; + + do { + /* No need for an interval before the first iteration */ + if (retry_cnt) { + if (p_params->sleep_b4_retry) { + u16 retry_interval_in_ms = + DIV_ROUND_UP(p_params->retry_interval, + 1000); + + msleep(retry_interval_in_ms); + } else { + udelay(p_params->retry_interval); + } + } + + rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params); + if (rc) + return rc; + + if (p_params->b_granted) + break; + } while (retry_cnt++ < p_params->retry_num); + + return 0; +} + +int +qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_unlock_params *p_params) +{ + u32 param = 0, mcp_resp, mcp_param; + u8 opcode; + int rc; + + opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE + : RESOURCE_OPCODE_RELEASE; + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", + param, opcode, p_params->resource); + + /* Attempt to release the resource */ + rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); + if (rc) + return rc; + + /* Analyze the response */ + opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", + mcp_param, opcode); + + switch (opcode) { + case RESOURCE_OPCODE_RELEASED_PREVIOUS: + DP_INFO(p_hwfn, + "Resource unlock request for an already released resource [%d]\n", + p_params->resource); + /* Fallthrough */ + case RESOURCE_OPCODE_RELEASED: + p_params->b_released = true; + break; + case RESOURCE_OPCODE_WRONG_OWNER: + p_params->b_released = false; + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return -EINVAL; + } + + return 0; +} + +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent) +{ + if (p_lock) { + memset(p_lock, 0, sizeof(*p_lock)); + + /* Permanent resources don't require aging, and there's no + * point in trying to acquire them more than once since it's + * unexpected another entity would release them. + */ + if (b_is_permanent) { + p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE; + } else { + p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT; + p_lock->retry_interval = + QED_MCP_RESC_LOCK_RETRY_VAL_DFLT; + p_lock->sleep_b4_retry = true; + } + + p_lock->resource = resource; + } + + if (p_unlock) { + memset(p_unlock, 0, sizeof(*p_unlock)); + p_unlock->resource = resource; + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 368e88de146cdbc7a7a66210c6ff9b1302867a8f..5ae35d6cc7d1fbd9f3822a03311c52cb2c362d65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -39,6 +39,7 @@ #include #include #include "qed_hsi.h" +#include "qed_dev_api.h" struct qed_mcp_link_speed_params { bool autoneg; @@ -479,14 +480,18 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, rel_pfid) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) -/* TODO - this is only correct as long as only BB is supported, and - * no port-swapping is implemented; Afterwards we'll need to fix it. - */ -#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ - ((_p_hwfn)->cdev->num_ports_in_engines * 2)) +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + ((_p_hwfn)->cdev->num_ports_in_engines * \ + qed_device_num_engines((_p_hwfn)->cdev))) + struct qed_mcp_info { - /* Spinlock used for protecting the access to the MFW mailbox */ - spinlock_t lock; + /* List for mailbox commands which were sent and wait for a response */ + struct list_head cmd_list; + + /* Spinlock used for protecting the access to the mailbox commands list + * and the sending of the commands. + */ + spinlock_t cmd_lock; /* Spinlock used for syncing SW link-changes and link-changes * originating from attention context. @@ -506,14 +511,16 @@ struct qed_mcp_info { u8 *mfw_mb_cur; u8 *mfw_mb_shadow; u16 mfw_mb_length; - u16 mcp_hist; + u32 mcp_hist; }; struct qed_mcp_mb_params { u32 cmd; u32 param; - union drv_union_data *p_data_src; - union drv_union_data *p_data_dst; + void *p_data_src; + u8 data_src_size; + void *p_data_dst; + u8 data_dst_size; u32 mcp_resp; u32 mcp_param; }; @@ -564,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn); int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +enum qed_drv_role { + QED_DRV_ROLE_OS, + QED_DRV_ROLE_KDUMP, +}; + +struct qed_load_req_params { + /* Input params */ + enum qed_drv_role drv_role; + u8 timeout_val; + bool avoid_eng_reset; + enum qed_override_force_load override_force_load; + + /* Output params */ + u32 load_code; +}; + /** - * @brief Sends a LOAD_REQ to the MFW, and in case operation - * succeed, returns whether this PF is the first on the - * chip/engine/port or function. This function should be - * called when driver is ready to accept MFW events after - * Storms initializations are done. + * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, + * returns whether this PF is the first on the engine/port or function. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_load_code - The MCP response param containing one - * of the following: - * FW_MSG_CODE_DRV_LOAD_ENGINE - * FW_MSG_CODE_DRV_LOAD_PORT - * FW_MSG_CODE_DRV_LOAD_FUNCTION - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return int - 0 - Operation was successful. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 *p_load_code); + struct qed_load_req_params *p_params); + +/** + * @brief Sends a UNLOAD_REQ message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return int - 0 - Operation was successful. + */ +int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief Sends a UNLOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return int - 0 - Operation was successful. + */ +int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** * @brief Read the MFW mailbox into Current buffer. @@ -707,6 +742,41 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); +/** + * @brief - Sets the MFW's max value for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param res_id + * @param resc_max_val + * @param p_mcp_resp + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp); + +/** + * @brief - Gets the MFW allocation info for the given resource + * + * @param p_hwfn + * @param p_ptt + * @param res_id + * @param p_mcp_resp + * @param p_resc_num + * @param p_resc_start + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); + /** * @brief Send eswitch mode to MFW * @@ -720,19 +790,106 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_eswitch eswitch); +#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP +#define QED_MCP_RESC_LOCK_MAX_VAL 31 + +enum qed_resc_lock { + QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL, + QED_RESC_LOCK_PTP_PORT0, + QED_RESC_LOCK_PTP_PORT1, + QED_RESC_LOCK_PTP_PORT2, + QED_RESC_LOCK_PTP_PORT3, + QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL, + QED_RESC_LOCK_RESC_INVALID +}; + /** - * @brief - Gets the MFW allocation info for the given resource + * @brief - Initiates PF FLR * * @param p_hwfn * @param p_ptt - * @param p_resc_info - descriptor of requested resource - * @param p_mcp_resp - * @param p_mcp_param * * @return int - 0 - operation was successful. */ -int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct resource_info *p_resc_info, - u32 *p_mcp_resp, u32 *p_mcp_param); +int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +struct qed_resc_lock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Lock timeout value in seconds [default, none or 1..254] */ + u8 timeout; +#define QED_MCP_RESC_LOCK_TO_DEFAULT 0 +#define QED_MCP_RESC_LOCK_TO_NONE 255 + + /* Number of times to retry locking */ + u8 retry_num; +#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 + + /* The interval in usec between retries */ + u16 retry_interval; +#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 + + /* Use sleep or delay between retries */ + bool sleep_b4_retry; + + /* Will be set as true if the resource is free and granted */ + bool b_granted; + + /* Will be filled with the resource owner. + * [0..15 = PF0-15, 16 = MFW] + */ + u8 owner; +}; + +/** + * @brief Acquires MFW generic resource lock + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params); + +struct qed_resc_unlock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Allow to release a resource even if belongs to another PF */ + bool b_force; + + /* Will be set as true if the resource is released */ + bool b_released; +}; + +/** + * @brief Releases MFW generic resource lock + * + * @param p_hwfn + * @param p_ptt + * @param p_params + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_unlock_params *p_params); + +/** + * @brief - default initialization for lock/unlock resource structs + * + * @param p_lock - lock params struct to be initialized; Can be NULL + * @param p_unlock - unlock params struct to be initialized; Can be NULL + * @param resource - the requested resource + * @paral b_is_permanent - disable retries & aging when set + */ +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 378afce58b3f0abd4c2a3bd43f403a50e1a9a1da..db96670192c7563e0d0384370e92845ff17d8e48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -41,6 +41,7 @@ #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_ooo.h" +#include "qed_cxt.h" static struct qed_ooo_archipelago *qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn, @@ -48,15 +49,18 @@ static struct qed_ooo_archipelago *p_ooo_info, u32 cid) { - struct qed_ooo_archipelago *p_archipelago = NULL; + u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; + struct qed_ooo_archipelago *p_archipelago; - list_for_each_entry(p_archipelago, - &p_ooo_info->archipelagos_list, list_entry) { - if (p_archipelago->cid == cid) - return p_archipelago; - } + if (idx >= p_ooo_info->max_num_archipelagos) + return NULL; - return NULL; + p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; + + if (list_empty(&p_archipelago->isles_list)) + return NULL; + + return p_archipelago; } static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, @@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) { + u16 max_num_archipelagos = 0, cid_base; struct qed_ooo_info *p_ooo_info; - u16 max_num_archipelagos = 0; u16 max_num_isles = 0; u32 i; @@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; + cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI); if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, @@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) if (!p_ooo_info) return NULL; + p_ooo_info->cid_base = cid_base; + p_ooo_info->max_num_archipelagos = max_num_archipelagos; + INIT_LIST_HEAD(&p_ooo_info->free_buffers_list); INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list); INIT_LIST_HEAD(&p_ooo_info->free_isles_list); - INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list); - INIT_LIST_HEAD(&p_ooo_info->archipelagos_list); p_ooo_info->p_isles_mem = kcalloc(max_num_isles, sizeof(struct qed_ooo_isle), @@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn) if (!p_ooo_info->p_archipelagos_mem) goto no_archipelagos_mem; - for (i = 0; i < max_num_archipelagos; i++) { + for (i = 0; i < max_num_archipelagos; i++) INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list); - list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry, - &p_ooo_info->free_archipelagos_list); - } p_ooo_info->ooo_history.p_cqes = kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES, @@ -178,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_archipelago *p_archipelago; struct qed_ooo_buffer *p_buffer; struct qed_ooo_isle *p_isle; - bool b_found = false; - - if (list_empty(&p_ooo_info->archipelagos_list)) - return; - list_for_each_entry(p_archipelago, - &p_ooo_info->archipelagos_list, list_entry) { - if (p_archipelago->cid == cid) { - list_del(&p_archipelago->list_entry); - b_found = true; - break; - } - } - - if (!b_found) + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago) return; while (!list_empty(&p_archipelago->isles_list)) { @@ -216,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); } - - list_add_tail(&p_archipelago->list_entry, - &p_ooo_info->free_archipelagos_list); } void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) { - struct qed_ooo_archipelago *p_arch; + struct qed_ooo_archipelago *p_archipelago; struct qed_ooo_buffer *p_buffer; struct qed_ooo_isle *p_isle; + u32 i; - while (!list_empty(&p_ooo_info->archipelagos_list)) { - p_arch = list_first_entry(&p_ooo_info->archipelagos_list, - struct qed_ooo_archipelago, - list_entry); - - list_del(&p_arch->list_entry); + for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) { + p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]); - while (!list_empty(&p_arch->isles_list)) { - p_isle = list_first_entry(&p_arch->isles_list, + while (!list_empty(&p_archipelago->isles_list)) { + p_isle = list_first_entry(&p_archipelago->isles_list, struct qed_ooo_isle, list_entry); @@ -258,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); } - list_add_tail(&p_arch->list_entry, - &p_ooo_info->free_archipelagos_list); } if (!list_empty(&p_ooo_info->ready_buffers_list)) list_splice_tail_init(&p_ooo_info->ready_buffers_list, @@ -378,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, p_ooo_info->cur_isles_number--; list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list); } - - if (list_empty(&p_archipelago->isles_list)) { - list_del(&p_archipelago->list_entry); - list_add(&p_archipelago->list_entry, - &p_ooo_info->free_archipelagos_list); - } } void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, @@ -426,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, return; } - if (!p_archipelago && - !list_empty(&p_ooo_info->free_archipelagos_list)) { - p_archipelago = - list_first_entry(&p_ooo_info->free_archipelagos_list, - struct qed_ooo_archipelago, list_entry); + if (!p_archipelago) { + u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; - list_del(&p_archipelago->list_entry); - if (!list_empty(&p_archipelago->isles_list)) { - DP_NOTICE(p_hwfn, - "Free OOO connection is not empty\n"); - INIT_LIST_HEAD(&p_archipelago->isles_list); - } - p_archipelago->cid = cid; - list_add(&p_archipelago->list_entry, - &p_ooo_info->archipelagos_list); - } else if (!p_archipelago) { - DP_NOTICE(p_hwfn, "No more free OOO connections\n"); - list_add(&p_isle->list_entry, - &p_ooo_info->free_isles_list); - list_add(&p_buffer->list_entry, - &p_ooo_info->free_buffers_list); - return; + p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; } list_add(&p_buffer->list_entry, &p_isle->buffers_list); @@ -517,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, } else { list_splice_tail_init(&p_right_isle->buffers_list, &p_ooo_info->ready_buffers_list); - if (list_empty(&p_archipelago->isles_list)) { - list_del(&p_archipelago->list_entry); - list_add(&p_archipelago->list_entry, - &p_ooo_info->free_archipelagos_list); - } } list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 4f138fb5f533e0ac68cbdf9acab9a41aa4d33ff7..791ad0f8b7595d00bcad4b61afba26ac882fbcc9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -60,9 +60,7 @@ struct qed_ooo_isle { }; struct qed_ooo_archipelago { - struct list_head list_entry; struct list_head isles_list; - u32 cid; }; struct qed_ooo_history { @@ -75,14 +73,14 @@ struct qed_ooo_info { struct list_head free_buffers_list; struct list_head ready_buffers_list; struct list_head free_isles_list; - struct list_head free_archipelagos_list; - struct list_head archipelagos_list; struct qed_ooo_archipelago *p_archipelagos_mem; struct qed_ooo_isle *p_isles_mem; struct qed_ooo_history ooo_history; u32 cur_isles_number; u32 max_isles_number; u32 gen_isles_number; + u16 max_num_archipelagos; + u16 cid_base; }; #if IS_ENABLED(CONFIG_QED_ISCSI) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index d27aa85da23cb526ca9319a152107a64c6df0e4a..434a164a76ed99b15425c13353d3c9f0e74f59fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -34,7 +34,7 @@ #include "qed_dev_api.h" #include "qed_hw.h" #include "qed_l2.h" -#include "qed_ptp.h" +#include "qed_mcp.h" #include "qed_reg_addr.h" /* 16 nano second time quantas to wait before making a Drift adjustment */ @@ -45,6 +45,82 @@ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) +static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) +{ + switch (qed_device_get_port_id(p_hwfn->cdev)) { + case 0: + return QED_RESC_LOCK_PTP_PORT0; + case 1: + return QED_RESC_LOCK_PTP_PORT1; + case 2: + return QED_RESC_LOCK_PTP_PORT2; + case 3: + return QED_RESC_LOCK_PTP_PORT3; + default: + return QED_RESC_LOCK_RESC_INVALID; + } +} + +static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_lock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true); + + rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms); + if (rc && rc != -EINVAL) { + return rc; + } else if (rc == -EINVAL) { + /* MFW doesn't support resource locking, first PF on the port + * has lock ownership. + */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) + return 0; + + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EBUSY; + } else if (!rc && !params.b_granted) { + DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); + return -EBUSY; + } + + return rc; +} + +static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_unlock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true); + + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms); + if (rc == -EINVAL) { + /* MFW doesn't support locking, first PF has lock ownership */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) { + rc = 0; + } else { + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EINVAL; + } + } else if (rc) { + DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n"); + } + + return rc; +} + /* Read Rx timestamp */ static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) { @@ -112,39 +188,73 @@ static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) } /* Filter PTP protocol packets that need to be timestamped */ -static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev, - enum qed_ptp_filter_type type) +static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev, + enum qed_ptp_filter_type rx_type, + enum qed_ptp_hwtstamp_tx_type tx_type) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; - u32 rule_mask, parm_mask; + u32 rule_mask, enable_cfg = 0x0; - switch (type) { - case QED_PTP_FILTER_L2_IPV4_IPV6: - parm_mask = 0x6AA; - rule_mask = 0x3EEE; + switch (rx_type) { + case QED_PTP_FILTER_NONE: + enable_cfg = 0x0; + rule_mask = 0x3FFF; break; - case QED_PTP_FILTER_L2: - parm_mask = 0x6BF; - rule_mask = 0x3EFF; + case QED_PTP_FILTER_ALL: + enable_cfg = 0x7; + rule_mask = 0x3CAA; break; - case QED_PTP_FILTER_IPV4_IPV6: - parm_mask = 0x7EA; - rule_mask = 0x3FFE; + case QED_PTP_FILTER_V1_L4_EVENT: + enable_cfg = 0x3; + rule_mask = 0x3FFA; break; - case QED_PTP_FILTER_IPV4: - parm_mask = 0x7EE; + case QED_PTP_FILTER_V1_L4_GEN: + enable_cfg = 0x3; rule_mask = 0x3FFE; break; + case QED_PTP_FILTER_V2_L4_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3FAA; + break; + case QED_PTP_FILTER_V2_L4_GEN: + enable_cfg = 0x5; + rule_mask = 0x3FEE; + break; + case QED_PTP_FILTER_V2_L2_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3CFF; + break; + case QED_PTP_FILTER_V2_L2_GEN: + enable_cfg = 0x5; + rule_mask = 0x3EFF; + break; + case QED_PTP_FILTER_V2_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3CAA; + break; + case QED_PTP_FILTER_V2_GEN: + enable_cfg = 0x5; + rule_mask = 0x3EEE; + break; default: - DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type); + DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type); return -EINVAL; } - qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg); - qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1); + if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + } else { + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask); + } /* Reset possibly old timestamps */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, @@ -248,7 +358,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) static int qed_ptp_hw_enable(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + struct qed_ptt *p_ptt; + int rc; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n"); + return -EBUSY; + } + + p_hwfn->p_ptp_ptt = p_ptt; + + rc = qed_ptp_res_lock(p_hwfn, p_ptt); + if (rc) { + DP_INFO(p_hwfn, + "Couldn't acquire the resource lock, skip ptp enable for this PF\n"); + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + return rc; + } /* Reset PTP event detection rules - will be configured in the IOCTL */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); @@ -262,12 +390,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); /* Pause free running counter */ - qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); + if (QED_IS_BB_B0(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); + if (QED_IS_AH(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); /* Resume free running counter */ - qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); + if (QED_IS_BB_B0(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); + if (QED_IS_AH(p_hwfn->cdev)) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4); + qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1); + } /* Disable drift register */ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); @@ -281,22 +417,13 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev) return 0; } -static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev) -{ - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; - - qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA); - qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE); - - return 0; -} - static int qed_ptp_hw_disable(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + qed_ptp_res_unlock(p_hwfn, p_ptt); + /* Reset PTP event detection rules */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); @@ -308,12 +435,14 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + return 0; } const struct qed_eth_ptp_ops qed_ptp_ops_pass = { - .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on, - .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters, + .cfg_filters = qed_ptp_hw_cfg_filters, .read_rx_ts = qed_ptp_hw_read_rx_ts, .read_tx_ts = qed_ptp_hw_read_tx_ts, .read_cc = qed_ptp_hw_read_cc, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h deleted file mode 100644 index 63c666d0b7394d1cb69d29d89d22dbc7a1787ee9..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.h +++ /dev/null @@ -1,47 +0,0 @@ -/* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _QED_PTP_H -#define _QED_PTP_H -#include - -int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_ptp_filter_type type); -int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); -int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts); -int qed_ptp_read_cc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u64 *cycles); -int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb); -int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); - -#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d59d9df60cd24c20f031e66e7f034975d5244cba..1ae73b2d6d1e1e7d96abcedc6fbdfa683ed8a566 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -160,13 +160,13 @@ 0x2e0704UL #define CCFC_REG_STRONG_ENABLE_PF \ 0x2e0708UL -#define PGLUE_B_REG_PGL_ADDR_88_F0 \ +#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \ 0x2aa404UL -#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ +#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \ 0x2aa408UL -#define PGLUE_B_REG_PGL_ADDR_90_F0 \ +#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \ 0x2aa40cUL -#define PGLUE_B_REG_PGL_ADDR_94_F0 \ +#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \ 0x2aa410UL #define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ 0x2aa138UL @@ -356,6 +356,10 @@ 0x238804UL #define RDIF_REG_STOP_ON_ERROR \ 0x300040UL +#define RDIF_REG_DEBUG_ERROR_INFO \ + 0x300400UL +#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \ + 64 #define SRC_REG_SOFT_RST \ 0x23874cUL #define TCFC_REG_ACTIVITY_COUNTER \ @@ -370,6 +374,10 @@ 0x1700004UL #define TDIF_REG_STOP_ON_ERROR \ 0x310040UL +#define TDIF_REG_DEBUG_ERROR_INFO \ + 0x310400UL +#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \ + 64 #define UCM_REG_INIT \ 0x1280000UL #define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ @@ -1236,6 +1244,26 @@ 0x1901534UL #define USEM_REG_DBG_FORCE_FRAME \ 0x1901538UL +#define NWS_REG_DBG_SELECT \ + 0x700128UL +#define NWS_REG_DBG_DWORD_ENABLE \ + 0x70012cUL +#define NWS_REG_DBG_SHIFT \ + 0x700130UL +#define NWS_REG_DBG_FORCE_VALID \ + 0x700134UL +#define NWS_REG_DBG_FORCE_FRAME \ + 0x700138UL +#define MS_REG_DBG_SELECT \ + 0x6a0228UL +#define MS_REG_DBG_DWORD_ENABLE \ + 0x6a022cUL +#define MS_REG_DBG_SHIFT \ + 0x6a0230UL +#define MS_REG_DBG_FORCE_VALID \ + 0x6a0234UL +#define MS_REG_DBG_FORCE_FRAME \ + 0x6a0238UL #define PCIE_REG_DBG_COMMON_SELECT \ 0x054398UL #define PCIE_REG_DBG_COMMON_DWORD_ENABLE \ @@ -1448,6 +1476,8 @@ 0x000b48UL #define RSS_REG_RSS_RAM_DATA \ 0x238c20UL +#define RSS_REG_RSS_RAM_DATA_SIZE \ + 4 #define MISC_REG_BLOCK_256B_EN \ 0x008c14UL #define NWS_REG_NWS_CMU \ @@ -1520,4 +1550,22 @@ #define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL #define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL #define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL +#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL +#define PSWRQ2_REG_WR_MBS0 0x240400UL + +#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL +#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL +#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL +#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL +#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL +#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL + +#define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_CM_HDR_GFT 0x1f11c8UL +#define PRS_REG_GFT_CAM 0x1f1100UL +#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL +#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0 +#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8 +#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index d9ff6b28591c19faf288a130e28c19445fd3ba9b..56289d7cd306a0255a5d8854f1abb4298d7cd4dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -66,17 +66,31 @@ #include "qed_roce.h" #include "qed_ll2.h" -void qed_async_roce_event(struct qed_hwfn *p_hwfn, - struct event_ring_entry *p_eqe) +static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); + +void qed_roce_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, union rdma_eqe_data *rdma_data) { - struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { + u16 icid = + (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid); + + /* icid release in this async event can occur only if the icid + * was offloaded to the FW. In case it wasn't offloaded this is + * handled in qed_roce_sp_destroy_qp. + */ + qed_roce_free_real_icid(p_hwfn, icid); + } else { + struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; - p_rdma_info->events.affiliated_event(p_rdma_info->events.context, - p_eqe->opcode, &p_eqe->data); + events->affiliated_event(p_hwfn->p_rdma_info->events.context, + fw_event_code, + &rdma_data->async_handle); + } } static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, - struct qed_bmap *bmap, u32 max_count) + struct qed_bmap *bmap, u32 max_count, char *name) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); @@ -90,43 +104,62 @@ static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, return -ENOMEM; } - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n", - bmap->bitmap); + snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); return 0; } static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 *id_num) { - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap); - *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); - - if (*id_num >= bmap->max_count) { - DP_NOTICE(p_hwfn, "no id available max_count=%d\n", - bmap->max_count); + if (*id_num >= bmap->max_count) return -EINVAL; - } __set_bit(*id_num, bmap->bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", + bmap->name, *id_num); + return 0; } +static void qed_bmap_set_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return; + + __set_bit(id_num, bmap->bitmap); +} + static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num) { bool b_acquired; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num); if (id_num >= bmap->max_count) return; b_acquired = test_and_clear_bit(id_num, bmap->bitmap); if (!b_acquired) { - DP_NOTICE(p_hwfn, "ID %d already released\n", id_num); + DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", + bmap->name, id_num); return; } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", + bmap->name, id_num); +} + +static int qed_bmap_test_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return -1; + + return test_bit(id_num, bmap->bitmap); } static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) @@ -170,7 +203,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Queue zone lines are shared between RoCE and L2 in such a way that * they can be used by each without obstructing the other. */ - p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE); + p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); @@ -191,7 +225,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, } /* Allocate bit map for pd's */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, + "PD"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate pd_map, rc = %d\n", @@ -201,7 +236,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate DPI bitmap */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, - p_hwfn->dpi_count); + p_hwfn->dpi_count, "DPI"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate DPI bitmap, rc = %d\n", rc); @@ -212,7 +247,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, * twice the number of QPs. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, - p_rdma_info->num_qps * 2); + p_rdma_info->num_qps * 2, "CQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate cq bitmap, rc = %d\n", rc); @@ -224,7 +259,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, * The maximum number of CQs is bounded to twice the number of QPs. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, - p_rdma_info->num_qps * 2); + p_rdma_info->num_qps * 2, "Toggle"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate toogle bits, rc = %d\n", rc); @@ -233,7 +268,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate bitmap for itids */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, - p_rdma_info->num_mrs); + p_rdma_info->num_mrs, "MR"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate itids bitmaps, rc = %d\n", rc); @@ -241,16 +276,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, } /* Allocate bitmap for cids used for qps. */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, + "CID"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate cid bitmap, rc = %d\n", rc); goto free_tid_map; } + /* Allocate bitmap for cids used for responders/requesters. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, + "REAL_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate real cid bitmap, rc = %d\n", rc); + goto free_cid_map; + } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; +free_cid_map: + kfree(p_rdma_info->cid_map.bitmap); free_tid_map: kfree(p_rdma_info->tid_map.bitmap); free_toggle_map: @@ -271,16 +317,79 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, return rc; } +static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, bool check) +{ + int weight = bitmap_weight(bmap->bitmap, bmap->max_count); + int last_line = bmap->max_count / (64 * 8); + int last_item = last_line * 8 + + DIV_ROUND_UP(bmap->max_count % (64 * 8), 64); + u64 *pmap = (u64 *)bmap->bitmap; + int line, item, offset; + u8 str_last_line[200] = { 0 }; + + if (!weight || !check) + goto end; + + DP_NOTICE(p_hwfn, + "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", + bmap->name, bmap->max_count, weight); + + /* print aligned non-zero lines, if any */ + for (item = 0, line = 0; line < last_line; line++, item += 8) + if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8)) + DP_NOTICE(p_hwfn, + "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + line, + pmap[item], + pmap[item + 1], + pmap[item + 2], + pmap[item + 3], + pmap[item + 4], + pmap[item + 5], + pmap[item + 6], pmap[item + 7]); + + /* print last unaligned non-zero line, if any */ + if ((bmap->max_count % (64 * 8)) && + (bitmap_weight((unsigned long *)&pmap[item], + bmap->max_count - item * 64))) { + offset = sprintf(str_last_line, "line 0x%04x: ", line); + for (; item < last_item; item++) + offset += sprintf(str_last_line + offset, + "0x%016llx ", pmap[item]); + DP_NOTICE(p_hwfn, "%s\n", str_last_line); + } + +end: + kfree(bmap->bitmap); + bmap->bitmap = NULL; +} + static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) { + struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + int wait_count = 0; - kfree(p_rdma_info->cid_map.bitmap); - kfree(p_rdma_info->tid_map.bitmap); - kfree(p_rdma_info->toggle_bits.bitmap); - kfree(p_rdma_info->cq_map.bitmap); - kfree(p_rdma_info->dpi_map.bitmap); - kfree(p_rdma_info->pd_map.bitmap); + /* when destroying a_RoCE QP the control is returned to the user after + * the synchronous part. The asynchronous part may take a little longer. + * We delay for a short while if an async destroy QP is still expected. + * Beyond the added delay we clear the bitmap anyway. + */ + while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { + msleep(100); + if (wait_count++ > 20) { + DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); + break; + } + } + + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -675,6 +784,7 @@ static int qed_rdma_add_user(void *rdma_cxt, ((out_params->dpi) * p_hwfn->dpi_size); out_params->dpi_size = p_hwfn->dpi_size; + out_params->wid_count = p_hwfn->wid_count; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); return rc; @@ -693,6 +803,8 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) p_port->link_speed = p_hwfn->mcp_info->link_output.speed; + p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; + return p_port; } @@ -724,6 +836,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) u32 addr; p_hwfn = (struct qed_hwfn *)rdma_cxt; + + if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { + DP_NOTICE(p_hwfn, + "queue zone offset %d is too large (max is %d)\n", + qz_offset, p_hwfn->p_rdma_info->max_queue_zones); + return; + } + qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); @@ -737,9 +857,12 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) static int qed_fill_rdma_dev_info(struct qed_dev *cdev, struct qed_dev_rdma_info *info) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + memset(info, 0, sizeof(*info)); info->rdma_type = QED_RDMA_TYPE_ROCE; + info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); qed_fill_dev_info(cdev, &info->common); @@ -887,8 +1010,7 @@ static int qed_rdma_create_cq(void *rdma_cxt, /* Allocate icid */ spin_lock_bh(&p_info->lock); - rc = qed_rdma_bmap_alloc_id(p_hwfn, - &p_info->cq_map, &returned_id); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); spin_unlock_bh(&p_info->lock); if (rc) { @@ -1080,6 +1202,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) return flavor; } +void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) +{ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; @@ -1139,15 +1269,22 @@ static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) return rc; } +static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; - union qed_qm_pq_params qm_params; enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 physical_queue0 = 0; + u16 regular_latency_queue; + enum protocol_type proto; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -1229,15 +1366,16 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); - p_ramrod->stats_counter_id = p_hwfn->rel_pf_id; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); - memset(&qm_params, 0, sizeof(qm_params)); - qm_params.roce.qpid = qp->icid >> 1; - physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); + regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + + p_ramrod->regular_latency_phy_queue = + cpu_to_le16(regular_latency_queue); + p_ramrod->low_latency_phy_queue = + cpu_to_le16(regular_latency_queue); - p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); @@ -1253,13 +1391,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n", - rc, physical_queue0); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "rc = %d regular physical queue = 0x%x\n", rc, + regular_latency_queue); if (rc) goto err; qp->resp_offloaded = true; + qp->cq_prod = 0; + + proto = p_hwfn->p_rdma_info->proto; + qed_roce_set_real_cid(p_hwfn, qp->icid - + qed_cxt_get_proto_cid_start(p_hwfn, proto)); return rc; @@ -1277,10 +1421,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, { struct roce_create_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; - union qed_qm_pq_params qm_params; enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 physical_queue0 = 0; + u16 regular_latency_queue; + enum protocol_type proto; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -1351,15 +1495,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); - p_ramrod->stats_counter_id = p_hwfn->rel_pf_id; - p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | - qp->sq_cq_id); + p_ramrod->cq_cid = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); - memset(&qm_params, 0, sizeof(qm_params)); - qm_params.roce.qpid = qp->icid >> 1; - physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params); + regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + + p_ramrod->regular_latency_phy_queue = + cpu_to_le16(regular_latency_queue); + p_ramrod->low_latency_phy_queue = + cpu_to_le16(regular_latency_queue); - p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); @@ -1378,6 +1523,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, goto err; qp->req_offloaded = true; + proto = p_hwfn->p_rdma_info->proto; + qed_roce_set_real_cid(p_hwfn, + qp->icid + 1 - + qed_cxt_get_proto_cid_start(p_hwfn, proto)); return rc; @@ -1577,7 +1726,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw) + u32 *num_invalidated_mw, + u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; struct roce_destroy_qp_resp_ramrod_data *p_ramrod; @@ -1588,8 +1738,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - if (!qp->resp_offloaded) + *num_invalidated_mw = 0; + *cq_prod = qp->cq_prod; + + if (!qp->resp_offloaded) { + /* If a responder was never offload, we need to free the cids + * allocated in create_qp as a FW async event will never arrive + */ + u32 cid; + + cid = qp->icid - + qed_cxt_get_proto_cid_start(p_hwfn, + p_hwfn->p_rdma_info->proto); + qed_roce_free_cid_pair(p_hwfn, (u16)cid); + return 0; + } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1624,6 +1788,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, goto err; *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); + *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); + qp->cq_prod = *cq_prod; /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -1827,10 +1993,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, out_params->draining = false; - if (rq_err_state) + if (rq_err_state || sq_err_state) qp->cur_state = QED_ROCE_QP_STATE_ERR; - else if (sq_err_state) - qp->cur_state = QED_ROCE_QP_STATE_SQE; else if (sq_draining) out_params->draining = true; out_params->state = qp->cur_state; @@ -1849,10 +2013,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn, static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 num_invalidated_mw = 0; u32 num_bound_mw = 0; - u32 start_cid; + u32 cq_prod; int rc; /* Destroys the specified QP */ @@ -1866,7 +2029,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw); + &num_invalidated_mw, + &cq_prod); if (rc) return rc; @@ -1881,21 +2045,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) "number of invalidate memory windows is different from bounded ones\n"); return -EINVAL; } - - spin_lock_bh(&p_rdma_info->lock); - - start_cid = qed_cxt_get_proto_cid_start(p_hwfn, - p_rdma_info->proto); - - /* Release responder's icid */ - qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, - qp->icid - start_cid); - - /* Release requester's icid */ - qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, - qp->icid + 1 - start_cid); - - spin_unlock_bh(&p_rdma_info->lock); } return 0; @@ -2097,8 +2246,7 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, params->modify_flags); return rc; - } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR || - qp->cur_state == QED_ROCE_QP_STATE_SQE) { + } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { /* ->ERR */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, params->modify_flags); @@ -2110,12 +2258,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, return rc; } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { /* Any state -> RESET */ + u32 cq_prod; + + /* Send destroy responder ramrod */ + rc = qed_roce_sp_destroy_qp_responder(p_hwfn, + qp, + &num_invalidated_mw, + &cq_prod); - rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw); if (rc) return rc; + qp->cq_prod = cq_prod; + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw); @@ -2357,6 +2512,8 @@ qed_rdma_register_tid(void *rdma_cxt, } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) + return rc; if (fw_return_code != RDMA_RETURN_OK) { DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); @@ -2454,6 +2611,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) return rc; } +static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 start_cid, cid, xcid; + + /* an even icid belongs to a responder while an odd icid belongs to a + * requester. The 'cid' received as an input can be either. We calculate + * the "partner" icid and call it xcid. Only if both are free then the + * "cid" map can be cleared. + */ + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); + cid = icid - start_cid; + xcid = cid ^ 1; + + spin_lock_bh(&p_rdma_info->lock); + + qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); + if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); + } + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) { return QED_LEADING_HWFN(cdev); @@ -2773,7 +2955,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, : QED_LL2_RROCE; if (pkt->roce_mode == ROCE_V2_IPV4) - flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); + flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); /* Tx header */ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 36cf4b2ab7faf0afcd17ec3347bf0d9d05567901..9742af51618301c633997a939bb6242f2e83a61c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -67,9 +67,11 @@ enum qed_rdma_toggle_bit { QED_RDMA_TOGGLE_BIT_SET = 1 }; +#define QED_RDMA_MAX_BMAP_NAME (10) struct qed_bmap { unsigned long *bitmap; u32 max_count; + char name[QED_RDMA_MAX_BMAP_NAME]; }; struct qed_rdma_info { @@ -82,6 +84,7 @@ struct qed_rdma_info { struct qed_bmap qp_map; struct qed_bmap srq_map; struct qed_bmap cid_map; + struct qed_bmap real_cid_map; struct qed_bmap dpi_map; struct qed_bmap toggle_bits; struct qed_rdma_events events; @@ -92,6 +95,7 @@ struct qed_rdma_info { u32 num_qps; u32 num_mrs; u16 queue_zone_base; + u16 max_queue_zones; enum protocol_type proto; }; @@ -153,6 +157,7 @@ struct qed_rdma_qp { dma_addr_t irq_phys_addr; u8 irq_num_pages; bool resp_offloaded; + u32 cq_prod; u8 remote_mac_addr[6]; u8 local_mac_addr[6]; @@ -163,8 +168,8 @@ struct qed_rdma_qp { #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -void qed_async_roce_event(struct qed_hwfn *p_hwfn, - struct event_ring_entry *p_eqe); +void qed_roce_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, union rdma_eqe_data *rdma_data); void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, u8 connection_handle, void *cookie, @@ -187,7 +192,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, u16 src_mac_addr_lo, bool b_last_packet); #else static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {} +static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, + union rdma_eqe_data *rdma_data) {} static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, u8 connection_handle, void *cookie, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 30393ffaa8e540ab7c1b37ba4d2e5ea17259dabd..3357bbefa445c4247ed003a4285254887da0e24c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -84,6 +84,7 @@ union ramrod_data { struct tx_queue_stop_ramrod_data tx_queue_stop; struct vport_start_ramrod_data vport_start; struct vport_stop_ramrod_data vport_stop; + struct rx_update_gft_filter_data rx_update_gft; struct vport_update_ramrod_data vport_update; struct core_rx_start_ramrod_data core_rx_queue_start; struct core_rx_stop_ramrod_data core_rx_queue_stop; @@ -408,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - struct qed_tunn_start_params *p_tunn, + struct qed_tunnel_info *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch); /** @@ -441,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, - struct qed_tunn_update_params *p_tunn, + struct qed_tunnel_info *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 6fb80f9ef44662a17001783169c0e42f5aa6a977..bc3694e91b858af2478d0145bdbb2294840ef5f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, return 0; } -static enum tunnel_clss qed_tunn_get_clss_type(u8 type) +static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) { switch (type) { case QED_TUNN_CLSS_MAC_VLAN: @@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type) return TUNNEL_CLSS_INNER_MAC_VLAN; case QED_TUNN_CLSS_INNER_MAC_VNI: return TUNNEL_CLSS_INNER_MAC_VNI; + case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: + return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; default: return TUNNEL_CLSS_MAC_VLAN; } } static void -qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, - struct qed_tunn_update_params *p_src, - struct pf_update_tunnel_config *p_tunn_cfg) +qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src, bool b_pf_start) { - unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; - unsigned long update_mask = p_src->tunn_mode_update_mask; - unsigned long tunn_mode = p_src->tunn_mode; - unsigned long new_tunn_mode = 0; - - if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) { - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) - __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); - } else { - if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode)) - __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); - } - - if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) { - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) - __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); - } else { - if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode)) - __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); - } + if (p_src->vxlan.b_update_mode || b_pf_start) + p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; - if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) { - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) - __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); - } else { - if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode)) - __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); - } - - if (p_src->update_geneve_udp_port) { - p_tunn_cfg->set_geneve_udp_port_flg = 1; - p_tunn_cfg->geneve_udp_port = - cpu_to_le16(p_src->geneve_udp_port); - } + if (p_src->l2_gre.b_update_mode || b_pf_start) + p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; - if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) { - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) - __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); - } else { - if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) - __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); - } + if (p_src->ip_gre.b_update_mode || b_pf_start) + p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; - if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) { - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) - __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); - } else { - if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) - __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); - } + if (p_src->l2_geneve.b_update_mode || b_pf_start) + p_tun->l2_geneve.b_mode_enabled = + p_src->l2_geneve.b_mode_enabled; - p_src->tunn_mode = new_tunn_mode; + if (p_src->ip_geneve.b_update_mode || b_pf_start) + p_tun->ip_geneve.b_mode_enabled = + p_src->ip_geneve.b_mode_enabled; } -static void -qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, - struct qed_tunn_update_params *p_src, - struct pf_update_tunnel_config *p_tunn_cfg) +static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src) { - unsigned long tunn_mode = p_src->tunn_mode; enum tunnel_clss type; - qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); - p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; - p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; - - type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); - p_tunn_cfg->tunnel_clss_vxlan = type; - - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); - p_tunn_cfg->tunnel_clss_l2gre = type; + p_tun->b_update_rx_cls = p_src->b_update_rx_cls; + p_tun->b_update_tx_cls = p_src->b_update_tx_cls; + + type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); + p_tun->vxlan.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); + p_tun->l2_gre.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); + p_tun->ip_gre.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); + p_tun->l2_geneve.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); + p_tun->ip_geneve.tun_cls = type; +} - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); - p_tunn_cfg->tunnel_clss_ipgre = type; +static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src) +{ + p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; + p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; - if (p_src->update_vxlan_udp_port) { - p_tunn_cfg->set_vxlan_udp_port_flg = 1; - p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); - } + if (p_src->geneve_port.b_update_port) + p_tun->geneve_port.port = p_src->geneve_port.port; - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_l2gre = 1; + if (p_src->vxlan_port.b_update_port) + p_tun->vxlan_port.port = p_src->vxlan_port.port; +} - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_ipgre = 1; +static void +__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, + struct qed_tunn_update_type *tun_type) +{ + *p_tunn_cls = tun_type->tun_cls; - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_vxlan = 1; + if (tun_type->b_mode_enabled) + *p_enable_tx_clas = 1; +} - if (p_src->update_geneve_udp_port) { - p_tunn_cfg->set_geneve_udp_port_flg = 1; - p_tunn_cfg->geneve_udp_port = - cpu_to_le16(p_src->geneve_udp_port); +static void +qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, + struct qed_tunn_update_type *tun_type, + u8 *p_update_port, __le16 *p_port, + struct qed_tunn_update_udp_port *p_udp_port) +{ + __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type); + if (p_udp_port->b_update_port) { + *p_update_port = 1; + *p_port = cpu_to_le16(p_udp_port->port); } +} - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_l2geneve = 1; - - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_ipgeneve = 1; - - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); - p_tunn_cfg->tunnel_clss_l2geneve = type; - - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); - p_tunn_cfg->tunnel_clss_ipgeneve = type; +static void +qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + + qed_set_pf_update_tunn_mode(p_tun, p_src, false); + qed_set_tunn_cls_info(p_tun, p_src); + qed_set_tunn_ports(p_tun, p_src); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tunn_cfg->tx_enable_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tunn_cfg->tx_enable_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tunn_cfg->tx_enable_ipgeneve, + &p_tun->ip_geneve); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tunn_cfg->tx_enable_l2gre, + &p_tun->l2_gre); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tunn_cfg->tx_enable_ipgre, + &p_tun->ip_gre); + + p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; + p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls; } static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - unsigned long tunn_mode) + struct qed_tunnel_info *p_tun) { - u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; - u8 l2geneve_enable = 0, ipgeneve_enable = 0; - - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) - l2gre_enable = 1; + qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, + p_tun->ip_gre.b_mode_enabled); + qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) - ipgre_enable = 1; - - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) - vxlan_enable = 1; - - qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); - qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); + qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled); +} - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) - l2geneve_enable = 1; +static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tunn) +{ + if (p_tunn->vxlan_port.b_update_port) + qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->vxlan_port.port); - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) - ipgeneve_enable = 1; + if (p_tunn->geneve_port.b_update_port) + qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->geneve_port.port); - qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, - ipgeneve_enable); + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn); } static void qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, - struct qed_tunn_start_params *p_src, + struct qed_tunnel_info *p_src, struct pf_start_tunnel_config *p_tunn_cfg) { - unsigned long tunn_mode; - enum tunnel_clss type; + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; if (!p_src) return; - tunn_mode = p_src->tunn_mode; - type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); - p_tunn_cfg->tunnel_clss_vxlan = type; - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); - p_tunn_cfg->tunnel_clss_l2gre = type; - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); - p_tunn_cfg->tunnel_clss_ipgre = type; - - if (p_src->update_vxlan_udp_port) { - p_tunn_cfg->set_vxlan_udp_port_flg = 1; - p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); - } - - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_l2gre = 1; - - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_ipgre = 1; - - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_vxlan = 1; - - if (p_src->update_geneve_udp_port) { - p_tunn_cfg->set_geneve_udp_port_flg = 1; - p_tunn_cfg->geneve_udp_port = - cpu_to_le16(p_src->geneve_udp_port); - } - - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_l2geneve = 1; - - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) - p_tunn_cfg->tx_enable_ipgeneve = 1; - - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); - p_tunn_cfg->tunnel_clss_l2geneve = type; - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); - p_tunn_cfg->tunnel_clss_ipgeneve = type; + qed_set_pf_update_tunn_mode(p_tun, p_src, true); + qed_set_tunn_cls_info(p_tun, p_src); + qed_set_tunn_ports(p_tun, p_src); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tunn_cfg->tx_enable_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tunn_cfg->tx_enable_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tunn_cfg->tx_enable_ipgeneve, + &p_tun->ip_geneve); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tunn_cfg->tx_enable_l2gre, + &p_tun->l2_gre); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tunn_cfg->tx_enable_ipgre, + &p_tun->ip_gre); } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - struct qed_tunn_start_params *p_tunn, + struct qed_tunnel_info *p_tunn, enum qed_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; @@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (p_tunn) { - qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, - p_tunn->tunn_mode); - p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; - } + if (p_tunn) + qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); return rc; } @@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, - struct qed_tunn_update_params *p_tunn, + struct qed_tunnel_info *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { @@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_sp_init_data init_data; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); + + if (!p_tunn) + return -EINVAL; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); @@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, if (rc) return rc; - if (p_tunn->update_vxlan_udp_port) - qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, - p_tunn->vxlan_udp_port); - if (p_tunn->update_geneve_udp_port) - qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, - p_tunn->geneve_udp_port); - - qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); - p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; + qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 645328a9f0cfb6b4040c8d6402ad5684d79adab9..f6423a139ca074a60909db7d369c42eb34394c27 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, u8 *p_fw_ret, bool skip_quick_poll) { struct qed_spq_comp_done *comp_done; + struct qed_ptt *p_ptt; int rc; /* A relatively short polling period w/o sleeping, to allow the FW to @@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, if (!rc) return 0; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); + return -EAGAIN; + } + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); - rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); + rc = qed_mcp_drain(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); goto err; @@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /* Retry after drain */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) - return 0; + goto out; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; - if (comp_done->done == 1) { + if (comp_done->done == 1) if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; - return 0; - } +out: + qed_ptt_release(p_hwfn, p_ptt); + return 0; + err: + qed_ptt_release(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), @@ -205,11 +215,10 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - u16 pq; - struct qed_cxt_info cxt_info; - struct core_conn_context *p_cxt; - union qed_qm_pq_params pq_params; - int rc; + struct core_conn_context *p_cxt; + struct qed_cxt_info cxt_info; + u16 physical_q; + int rc; cxt_info.iid = p_spq->cid; @@ -231,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ - memset(&pq_params, 0, sizeof(pq_params)); - pq_params.core.tc = LB_TC; - pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); - p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); p_cxt->xstorm_st_context.spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr); @@ -296,9 +303,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { switch (p_eqe->protocol_id) { +#if IS_ENABLED(CONFIG_QED_RDMA) case PROTOCOLID_ROCE: - qed_async_roce_event(p_hwfn, p_eqe); + qed_roce_async_event(p_hwfn, p_eqe->opcode, + &p_eqe->data.rdma_data); return 0; +#endif case PROTOCOLID_COMMON: return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, @@ -306,14 +316,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, case PROTOCOLID_ISCSI: if (!IS_ENABLED(CONFIG_QED_ISCSI)) return -EINVAL; - if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) { - u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid); - - qed_ooo_release_connection_isles(p_hwfn, - p_hwfn->p_ooo_info, - cid); - return 0; - } if (p_hwfn->p_iscsi_info->event_cb) { struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 253c2bbe1e4e1a705e52054b4d3faa199fd2ca93..d5df29f787c569a16228baa3679fd5d942027756 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return vf; } +enum qed_iov_validate_q_mode { + QED_IOV_VALIDATE_Q_NA, + QED_IOV_VALIDATE_Q_ENABLE, + QED_IOV_VALIDATE_Q_DISABLE, +}; + +static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + u16 qid, + enum qed_iov_validate_q_mode mode, + bool b_is_tx) +{ + if (mode == QED_IOV_VALIDATE_Q_NA) + return true; + + if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) || + (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid)) + return mode == QED_IOV_VALIDATE_Q_ENABLE; + + /* In case we haven't found any valid cid, then its disabled */ + return mode == QED_IOV_VALIDATE_Q_DISABLE; +} + static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, - struct qed_vf_info *p_vf, u16 rx_qid) + struct qed_vf_info *p_vf, + u16 rx_qid, + enum qed_iov_validate_q_mode mode) { - if (rx_qid >= p_vf->num_rxqs) + if (rx_qid >= p_vf->num_rxqs) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); - return rx_qid < p_vf->num_rxqs; + return false; + } + + return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); } static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, - struct qed_vf_info *p_vf, u16 tx_qid) + struct qed_vf_info *p_vf, + u16 tx_qid, + enum qed_iov_validate_q_mode mode) { - if (tx_qid >= p_vf->num_txqs) + if (tx_qid >= p_vf->num_txqs) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); - return tx_qid < p_vf->num_txqs; + return false; + } + + return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); } static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, @@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, return false; } +static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_rxqs; i++) + if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, + QED_IOV_VALIDATE_Q_ENABLE, + false)) + return true; + + return false; +} + +static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_txqs; i++) + if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, + QED_IOV_VALIDATE_Q_ENABLE, + true)) + return true; + + return false; +} + static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, int vfid, struct qed_ptt *p_ptt) { @@ -557,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } - /* Calculate the first VF index - this is a bit tricky; Basically, - * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin - * after the first engine's VFs. + /* First VF index based on offset is tricky: + * - If ARI is supported [likely], offset - (16 - pf_id) would + * provide the number for eng0. 2nd engine Vfs would begin + * after the first engine's VFs. + * - If !ARI, VFs would start on next device. + * so offset - (256 - pf_id) would provide the number. + * Utilize the fact that (256 - pf_id) is achieved only by later + * to diffrentiate between the two. */ - cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + - p_hwfn->abs_pf_id - 16; - if (QED_PATH_ID(p_hwfn)) - cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + + if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { + u32 first = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 16; + + cdev->p_iov_info->first_vf_in_pf = first; + + if (QED_PATH_ID(p_hwfn)) + cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + } else { + u32 first = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 256; + + cdev->p_iov_info->first_vf_in_pf = first; + } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "First VF in hwfn 0x%08x\n", @@ -677,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; int rc; + /* It's possible VF was previously considered malicious - + * clear the indication even if we're only going to disable VF. + */ + vf->b_malicious = false; + if (vf->to_disable) return 0; @@ -689,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); - /* It's possible VF was previously considered malicious */ - vf->b_malicious = false; - rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); if (rc) return rc; @@ -1118,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, ¶ms); - qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, - mbx->req_virt->first_tlv.reply_address, - sizeof(u64) / 4, ¶ms); - + /* Once PF copies the rc to the VF, the latter can continue + * and send an additional message. So we have to make sure the + * channel would be re-set to ready prior to that. + */ REG_WR(p_hwfn, GTT_BAR0_MAP_REG_USDM_RAM + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); } static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, @@ -1733,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, vf->state = VF_ENABLED; start = &mbx->req_virt->start_vport; + qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + /* Initialize Status block in CAU */ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { if (!start->sb_addr[sb_id]) { @@ -1746,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, start->sb_addr[sb_id], vf->igu_sbs[sb_id], vf->abs_vf_id, 1); } - qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); vf->mtu = start->mtu; vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; @@ -1803,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, vf->vport_instance--; vf->spoof_chk = false; + if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || + (qed_iov_validate_active_txq(p_hwfn, vf))) { + vf->b_malicious = true; + DP_NOTICE(p_hwfn, + "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", + vf->abs_vf_id); + status = PFVF_STATUS_MALICIOUS; + goto out; + } + rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc) { DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", @@ -1814,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, vf->configured_features = 0; memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); +out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, sizeof(struct pfvf_def_resp_tlv), status); } @@ -1870,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, req = &mbx->req_virt->start_rxq; - if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || + if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, + QED_IOV_VALIDATE_Q_DISABLE) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; @@ -1923,6 +2019,220 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); } +static void +qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, + struct qed_tunnel_info *p_tun, + u16 tunn_feature_mask) +{ + p_resp->tunn_feature_mask = tunn_feature_mask; + p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; + p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; + p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; + p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->vxlan_clss = p_tun->vxlan.tun_cls; + p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; + p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; + p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; + p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; + p_resp->geneve_udp_port = p_tun->geneve_port.port; + p_resp->vxlan_udp_port = p_tun->vxlan_port.port; +} + +static void +__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_tun, + enum qed_tunn_mode mask, u8 tun_cls) +{ + if (p_req->tun_mode_update_mask & BIT(mask)) { + p_tun->b_update_mode = true; + + if (p_req->tunn_mode & BIT(mask)) + p_tun->b_mode_enabled = true; + } + + p_tun->tun_cls = tun_cls; +} + +static void +qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_tun, + struct qed_tunn_update_udp_port *p_port, + enum qed_tunn_mode mask, + u8 tun_cls, u8 update_port, u16 port) +{ + if (update_port) { + p_port->b_update_port = true; + p_port->port = port; + } + + __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); +} + +static bool +qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) +{ + bool b_update_requested = false; + + if (p_req->tun_mode_update_mask || p_req->update_tun_cls || + p_req->update_geneve_port || p_req->update_vxlan_port) + b_update_requested = true; + + return b_update_requested; +} + +static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) +{ + if (tun->b_update_mode && !tun->b_mode_enabled) { + tun->b_update_mode = false; + *rc = -EINVAL; + } +} + +static int +qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, + u16 *tun_features, bool *update, + struct qed_tunnel_info *tun_src) +{ + struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; + struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; + u16 bultn_vxlan_port, bultn_geneve_port; + void *cookie = p_hwfn->cdev->ops_cookie; + int i, rc = 0; + + *tun_features = p_hwfn->cdev->tunn_feature_mask; + bultn_vxlan_port = tun->vxlan_port.port; + bultn_geneve_port = tun->geneve_port.port; + qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); + qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); + qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); + qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); + qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); + + if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && + (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { + tun_src->b_update_rx_cls = false; + tun_src->b_update_tx_cls = false; + rc = -EINVAL; + } + + if (tun_src->vxlan_port.b_update_port) { + if (tun_src->vxlan_port.port == tun->vxlan_port.port) { + tun_src->vxlan_port.b_update_port = false; + } else { + *update = true; + bultn_vxlan_port = tun_src->vxlan_port.port; + } + } + + if (tun_src->geneve_port.b_update_port) { + if (tun_src->geneve_port.port == tun->geneve_port.port) { + tun_src->geneve_port.b_update_port = false; + } else { + *update = true; + bultn_geneve_port = tun_src->geneve_port.port; + } + } + + qed_for_each_vf(p_hwfn, i) { + qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, + bultn_geneve_port); + } + + qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); + + return rc; +} + +static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + bool b_update_required = false; + struct qed_tunnel_info tunn; + u16 tunn_feature_mask = 0; + int i, rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + + memset(&tunn, 0, sizeof(tunn)); + p_req = &mbx->req_virt->tunn_param_update; + + if (!qed_iov_pf_validate_tunn_param(p_req)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No tunnel update requested by VF\n"); + status = PFVF_STATUS_FAILURE; + goto send_resp; + } + + tunn.b_update_rx_cls = p_req->update_tun_cls; + tunn.b_update_tx_cls = p_req->update_tun_cls; + + qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, + QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, + p_req->update_vxlan_port, + p_req->vxlan_port); + qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, + QED_MODE_L2GENEVE_TUNN, + p_req->l2geneve_clss, + p_req->update_geneve_port, + p_req->geneve_port); + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, + QED_MODE_IPGENEVE_TUNN, + p_req->ipgeneve_clss); + __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, + QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, + QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); + + /* If PF modifies VF's req then it should + * still return an error in case of partial configuration + * or modified configuration as opposed to requested one. + */ + rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, + &b_update_required, &tunn); + + if (rc) + status = PFVF_STATUS_FAILURE; + + /* If QED client is willing to update anything ? */ + if (b_update_required) { + u16 geneve_port; + + rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + + geneve_port = p_tun->geneve_port.port; + qed_for_each_vf(p_hwfn, i) { + qed_iov_bulletin_set_udp_ports(p_hwfn, i, + p_tun->vxlan_port.port, + geneve_port); + } + } + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, + CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); + + qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, u8 status) @@ -1970,21 +2280,16 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; - union qed_qm_pq_params pq_params; struct vfpf_start_txq_tlv *req; struct qed_vf_q_info *p_queue; int rc; u16 pq; - /* Prepare the parameters which would choose the right PQ */ - memset(&pq_params, 0, sizeof(pq_params)); - pq_params.eth.is_vf = 1; - pq_params.eth.vf_id = vf->relative_vf_id; - memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_txq; - if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) || + if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, + QED_IOV_VALIDATE_Q_DISABLE) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; @@ -2004,7 +2309,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, if (!p_queue->p_tx_cid) goto out; - pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params); + pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, req->pbl_addr, req->pbl_size, pq); if (rc) { @@ -2021,57 +2326,53 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, - u16 rxq_id, u8 num_rxqs, bool cqe_completion) + u16 rxq_id, bool cqe_completion) { struct qed_vf_q_info *p_queue; int rc = 0; - int qid; - if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) + if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] Tried Closing Rx 0x%04x which is inactive\n", + vf->relative_vf_id, rxq_id); return -EINVAL; + } - for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { - p_queue = &vf->vf_queues[qid]; + p_queue = &vf->vf_queues[rxq_id]; - if (!p_queue->p_rx_cid) - continue; - - rc = qed_eth_rx_queue_stop(p_hwfn, - p_queue->p_rx_cid, - false, cqe_completion); - if (rc) - return rc; + rc = qed_eth_rx_queue_stop(p_hwfn, + p_queue->p_rx_cid, + false, cqe_completion); + if (rc) + return rc; - vf->vf_queues[qid].p_rx_cid = NULL; - vf->num_active_rxqs--; - } + p_queue->p_rx_cid = NULL; + vf->num_active_rxqs--; - return rc; + return 0; } static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, - struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) + struct qed_vf_info *vf, u16 txq_id) { - int rc = 0; struct qed_vf_q_info *p_queue; - int qid; + int rc = 0; - if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) + if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, + QED_IOV_VALIDATE_Q_ENABLE)) return -EINVAL; - for (qid = txq_id; qid < txq_id + num_txqs; qid++) { - p_queue = &vf->vf_queues[qid]; - if (!p_queue->p_tx_cid) - continue; + p_queue = &vf->vf_queues[txq_id]; - rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); - if (rc) - return rc; + rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); + if (rc) + return rc; - p_queue->p_tx_cid = NULL; - } + p_queue->p_tx_cid = NULL; - return rc; + return 0; } static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, @@ -2080,20 +2381,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, { u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_rxqs_tlv *req; int rc; - /* We give the option of starting from qid != 0, in this case we - * need to make sure that qid + num_qs doesn't exceed the actual - * amount of queues that exist. + /* There has never been an official driver that used this interface + * for stopping multiple queues, and it is now considered deprecated. + * Validate this isn't used here. */ req = &mbx->req_virt->stop_rxqs; - rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, - req->num_rxqs, req->cqe_completion); - if (rc) - status = PFVF_STATUS_FAILURE; + if (req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Rx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + req->cqe_completion); + if (!rc) + status = PFVF_STATUS_SUCCESS; +out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, length, status); } @@ -2104,19 +2413,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, { u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_txqs_tlv *req; int rc; - /* We give the option of starting from qid != 0, in this case we - * need to make sure that qid + num_qs doesn't exceed the actual - * amount of queues that exist. + /* There has never been an official driver that used this interface + * for stopping multiple queues, and it is now considered deprecated. + * Validate this isn't used here. */ req = &mbx->req_virt->stop_txqs; - rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); - if (rc) - status = PFVF_STATUS_FAILURE; + if (req->num_txqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Tx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid); + if (!rc) + status = PFVF_STATUS_SUCCESS; +out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, length, status); } @@ -2141,22 +2458,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); /* Validate inputs */ - if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF || - !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) { - DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", - vf->relative_vf_id, req->rx_qid, req->num_rxqs); - goto out; - } - - for (i = 0; i < req->num_rxqs; i++) { - qid = req->rx_qid + i; - if (!vf->vf_queues[qid].p_rx_cid) { - DP_INFO(p_hwfn, - "VF[%d] rx_qid = %d isn`t active!\n", - vf->relative_vf_id, qid); + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) + if (!qed_iov_validate_rxq(p_hwfn, vf, i, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, req->num_rxqs); goto out; } + /* Prepare the handlers */ + for (i = 0; i < req->num_rxqs; i++) { + qid = req->rx_qid + i; handlers[i] = vf->vf_queues[qid].p_rx_cid; } @@ -2372,7 +2684,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, for (i = 0; i < table_size; i++) { q_idx = p_rss_tlv->rss_ind_table[i]; - if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) { + if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, + QED_IOV_VALIDATE_Q_ENABLE)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Omitting RSS due to wrong queue %04x\n", @@ -2381,15 +2694,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, goto out; } - if (!vf->vf_queues[q_idx].p_rx_cid) { - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, - "VF[%d]: Omitting RSS due to inactive queue %08x\n", - vf->relative_vf_id, q_idx); - b_reject = true; - goto out; - } - p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; } @@ -3042,9 +3346,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return rc; } -int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) +bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) { - u16 i, found = 0; + bool found = false; + u16 i; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); for (i = 0; i < (VF_MAX_STATIC / 32); i++) @@ -3054,7 +3359,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) if (!p_hwfn->cdev->p_iov_info) { DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); - return 0; + return false; } /* Mark VFs */ @@ -3083,7 +3388,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) * VF flr until ACKs, we're safe. */ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); - found = 1; + found = true; } } @@ -3184,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_RELEASE: qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_UPDATE_TUNN_PARAM: + qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -3289,11 +3597,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, if (!p_vf) return; - DP_INFO(p_hwfn, - "VF [%d] - Malicious behavior [%02x]\n", - p_vf->abs_vf_id, p_data->err_id); + if (!p_vf->b_malicious) { + DP_NOTICE(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); - p_vf->b_malicious = true; + p_vf->b_malicious = true; + } else { + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + } } int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, @@ -3414,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } +void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, + int vfid, u16 vxlan_port, u16 geneve_port) +{ + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set udp ports, invalid vfid [%d]\n", vfid); + return; + } + + if (vf_info->b_malicious) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set udp ports to malicious VF [%d]\n", + vfid); + return; + } + + vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; + vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; +} + static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; @@ -3842,6 +4179,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { + struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); struct qed_mcp_link_capabilities caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; @@ -3858,9 +4196,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn) if (!vf_info) continue; - memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); - memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); - memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), + /* Only hwfn0 is actually interested in the link speed. + * But since only it would receive an MFW indication of link, + * need to take configuration from it - otherwise things like + * rate limiting for hwfn1 VF would not work. + */ + memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), + sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), sizeof(caps)); /* Modify link according to the VF's configured link state */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index a89605821522d528411f711bbb0755c0ae003e5a..81a497ce65851898636b1e46d9c8ad3ff4081f48 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -270,6 +270,9 @@ enum qed_iov_wq_flag { */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); +void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, + int vfid, u16 vxlan_port, u16 geneve_port); + /** * @brief Read sriov related information and allocated resources * reads from configuraiton space, shmem, etc. @@ -348,9 +351,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param disabled_vfs - bitmask of all VFs on path that were FLRed * - * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. + * @return true iff one of the PF's vfs got FLRed. false otherwise. */ -int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); +bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** * @brief Search extended TLVs in request/reply buffer. @@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, return MAX_NUM_VFS; } +static inline void +qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, + u16 vxlan_port, u16 geneve_port) +{ +} + static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) { return 0; @@ -407,10 +416,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, return -EINVAL; } -static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, - u32 *disabled_vfs) +static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, + u32 *disabled_vfs) { - return 0; + return false; } static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 15d2855ec56352f861b0915823992c505b9c1b9d..11d71e5eea14375662e8e4da66a78e8b43d3f499 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) } if (!*done) { - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF <-- PF Timeout [Type %d]\n", - p_req->first_tlv.tl.type); + DP_NOTICE(p_hwfn, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); rc = -EBUSY; } else { - DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF response: %d [Type %d]\n", - *done, p_req->first_tlv.tl.type); + if ((*done != PFVF_STATUS_SUCCESS) && + (*done != PFVF_STATUS_NO_RESOURCE)) + DP_NOTICE(p_hwfn, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); } return rc; @@ -228,7 +234,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; /* copy acquire response from buffer to p_hwfn */ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); @@ -412,6 +418,155 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) +static void +__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_src, + enum qed_tunn_clss mask, u8 *p_cls) +{ + if (p_src->b_update_mode) { + p_req->tun_mode_update_mask |= BIT(mask); + + if (p_src->b_mode_enabled) + p_req->tunn_mode |= BIT(mask); + } + + *p_cls = p_src->tun_cls; +} + +static void +qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_src, + enum qed_tunn_clss mask, + u8 *p_cls, struct qed_tunn_update_udp_port *p_port, + u8 *p_update_port, u16 *p_udp_port) +{ + if (p_port->b_update_port) { + *p_update_port = 1; + *p_udp_port = p_port->port; + } + + __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); +} + +void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) +{ + if (p_tun->vxlan.b_mode_enabled) + p_tun->vxlan.b_update_mode = true; + if (p_tun->l2_geneve.b_mode_enabled) + p_tun->l2_geneve.b_update_mode = true; + if (p_tun->ip_geneve.b_mode_enabled) + p_tun->ip_geneve.b_update_mode = true; + if (p_tun->l2_gre.b_mode_enabled) + p_tun->l2_gre.b_update_mode = true; + if (p_tun->ip_gre.b_mode_enabled) + p_tun->ip_gre.b_update_mode = true; + + p_tun->b_update_rx_cls = true; + p_tun->b_update_tx_cls = true; +} + +static void +__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, + u16 feature_mask, u8 tunn_mode, + u8 tunn_cls, enum qed_tunn_mode val) +{ + if (feature_mask & BIT(val)) { + p_tun->b_mode_enabled = tunn_mode; + p_tun->tun_cls = tunn_cls; + } else { + p_tun->b_mode_enabled = false; + } +} + +static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tun, + struct pfvf_update_tunn_param_tlv *p_resp) +{ + /* Update mode and classes provided by PF */ + u16 feat_mask = p_resp->tunn_feature_mask; + + __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, + p_resp->vxlan_mode, p_resp->vxlan_clss, + QED_MODE_VXLAN_TUNN); + __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, + p_resp->l2geneve_mode, + p_resp->l2geneve_clss, + QED_MODE_L2GENEVE_TUNN); + __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, + p_resp->ipgeneve_mode, + p_resp->ipgeneve_clss, + QED_MODE_IPGENEVE_TUNN); + __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, + p_resp->l2gre_mode, p_resp->l2gre_clss, + QED_MODE_L2GRE_TUNN); + __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, + p_resp->ipgre_mode, p_resp->ipgre_clss, + QED_MODE_IPGRE_TUNN); + p_tun->geneve_port.port = p_resp->geneve_udp_port; + p_tun->vxlan_port.port = p_resp->vxlan_udp_port; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", + p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled, + p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); +} + +int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_src) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + int rc; + + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, + sizeof(*p_req)); + + if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) + p_req->update_tun_cls = 1; + + qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, + &p_req->vxlan_clss, &p_src->vxlan_port, + &p_req->update_vxlan_port, + &p_req->vxlan_port); + qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, + QED_MODE_L2GENEVE_TUNN, + &p_req->l2geneve_clss, &p_src->geneve_port, + &p_req->update_geneve_port, + &p_req->geneve_port); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, + QED_MODE_IPGENEVE_TUNN, + &p_req->ipgeneve_clss); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, + QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, + QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->tunn_param_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + + if (rc) + goto exit; + + if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Failed to update tunnel parameters\n"); + rc = -EINVAL; + } + + qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -1245,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, return true; } +static void +qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, + u16 *p_vxlan_port, u16 *p_geneve_port) +{ + struct qed_bulletin_content *p_bulletin; + + p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + + *p_vxlan_port = p_bulletin->vxlan_udp_port; + *p_geneve_port = p_bulletin->geneve_udp_port; +} + void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) @@ -1264,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; void *cookie = hwfn->cdev->ops_cookie; + u16 vxlan_port, geneve_port; + qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, &is_mac_forced); if (is_mac_exist && cookie) ops->force_mac(cookie, mac, !!is_mac_forced); + ops->ports_update(cookie, vxlan_port, geneve_port); + /* Always update link configuration according to bulletin */ qed_link_update(hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 7da0b165d8bc2718d28fbbd108040cc4d948a03a..34ac70b0e5fe8a8c52c28b73532d059c2ee28763 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv { struct vfpf_first_tlv first_tlv; u16 rx_qid; + + /* this field is deprecated and should *always* be set to '1' */ u8 num_rxqs; u8 cqe_completion; u8 padding[4]; @@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv { struct vfpf_first_tlv first_tlv; u16 tx_qid; + + /* this field is deprecated and should *always* be set to '1' */ u8 num_txqs; u8 padding[5]; }; @@ -425,6 +429,43 @@ struct vfpf_ucast_filter_tlv { u16 padding[3]; }; +/* tunnel update param tlv */ +struct vfpf_update_tunn_param_tlv { + struct vfpf_first_tlv first_tlv; + + u8 tun_mode_update_mask; + u8 tunn_mode; + u8 update_tun_cls; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u8 update_geneve_port; + u8 update_vxlan_port; + u16 geneve_port; + u16 vxlan_port; + u8 padding[2]; +}; + +struct pfvf_update_tunn_param_tlv { + struct pfvf_tlv hdr; + + u16 tunn_feature_mask; + u8 vxlan_mode; + u8 l2geneve_mode; + u8 ipgeneve_mode; + u8 l2gre_mode; + u8 ipgre_mode; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u16 vxlan_udp_port; + u16 geneve_udp_port; +}; + struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; @@ -440,6 +481,7 @@ union vfpf_tlvs { struct vfpf_vport_start_tlv start_vport; struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; + struct vfpf_update_tunn_param_tlv tunn_param_update; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; @@ -449,6 +491,7 @@ union pfvf_tlvs { struct pfvf_acquire_resp_tlv acquire_resp; struct tlv_buffer_size tlv_buf_size; struct pfvf_start_queue_resp_tlv queue_start; + struct pfvf_update_tunn_param_tlv tunn_param_resp; }; enum qed_bulletin_bit { @@ -509,7 +552,9 @@ struct qed_bulletin_content { u8 partner_rx_flow_ctrl_en; u8 partner_adv_pause; u8 sfp_tx_fault; - u8 padding4[6]; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 padding4[2]; u32 speed; u32 partner_adv_speed; @@ -551,6 +596,7 @@ enum { CHANNEL_TLV_VPORT_UPDATE_RSS, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_UPDATE_TUNN_PARAM, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -868,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin); void qed_iov_vf_task(struct work_struct *work); +void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun); +int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tunn); #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) @@ -1029,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, static inline void qed_iov_vf_task(struct work_struct *work) { } + +static inline void +qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) +{ +} + +static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tunn) +{ + return -EINVAL; +} #endif #endif diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f2aaef2cfb86d7a31c5fdc6f5d5940e1c9459a70..9b4f08b6f9b9b64aaaf880d6ac03a0d26178d987 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -41,6 +41,9 @@ #include #include #include +#ifdef CONFIG_RFS_ACCEL +#include +#endif #include #include #include @@ -50,7 +53,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 #define QEDE_REVISION_VERSION 10 -#define QEDE_ENGINEERING_VERSION 20 +#define QEDE_ENGINEERING_VERSION 21 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \ @@ -58,7 +61,7 @@ #define DRV_MODULE_SYM qede -struct qede_stats { +struct qede_stats_common { u64 no_buff_discards; u64 packet_too_big_discard; u64 ttl0_discard; @@ -90,11 +93,6 @@ struct qede_stats { u64 rx_256_to_511_byte_packets; u64 rx_512_to_1023_byte_packets; u64 rx_1024_to_1518_byte_packets; - u64 rx_1519_to_1522_byte_packets; - u64 rx_1519_to_2047_byte_packets; - u64 rx_2048_to_4095_byte_packets; - u64 rx_4096_to_9216_byte_packets; - u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -111,17 +109,39 @@ struct qede_stats { u64 tx_256_to_511_byte_packets; u64 tx_512_to_1023_byte_packets; u64 tx_1024_to_1518_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 brb_truncates; + u64 brb_discards; + u64 tx_mac_ctrl_frames; +}; + +struct qede_stats_bb { + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 tx_1519_to_2047_byte_packets; u64 tx_2048_to_4095_byte_packets; u64 tx_4096_to_9216_byte_packets; u64 tx_9217_to_16383_byte_packets; - u64 tx_pause_frames; - u64 tx_pfc_frames; u64 tx_lpi_entry_count; u64 tx_total_collisions; - u64 brb_truncates; - u64 brb_discards; - u64 tx_mac_ctrl_frames; +}; + +struct qede_stats_ah { + u64 rx_1519_to_max_byte_packets; + u64 tx_1519_to_max_byte_packets; +}; + +struct qede_stats { + struct qede_stats_common common; + + union { + struct qede_stats_bb bb; + struct qede_stats_ah ah; + }; }; struct qede_vlan { @@ -147,10 +167,11 @@ struct qede_dev { u32 dp_module; u8 dp_level; - u32 flags; -#define QEDE_FLAG_IS_VF BIT(0) + unsigned long flags; +#define QEDE_FLAG_IS_VF BIT(0) #define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) #define QEDE_TX_TIMESTAMPING_EN BIT(1) +#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2) const struct qed_eth_ops *ops; struct qede_ptp *ptp; @@ -158,6 +179,10 @@ struct qede_dev { struct qed_dev_eth_info dev_info; #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_IS_BB(edev) \ + ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB) +#define QEDE_IS_AH(edev) \ + ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH) struct qede_fastpath *fp_array; u8 req_num_tx; @@ -216,7 +241,10 @@ struct qede_dev { u16 vxlan_dst_port; u16 geneve_dst_port; - bool wol_enabled; +#ifdef CONFIG_RFS_ACCEL + struct qede_arfs *arfs; +#endif + bool wol_enabled; struct qede_rdma_dev rdma_info; @@ -292,21 +320,24 @@ struct qede_rx_queue { u8 data_direction; u8 rxq_id; + /* Used once per each NAPI run */ + u16 num_rx_buffers; + + u16 rx_headroom; + u32 rx_buf_size; u32 rx_buf_seg_size; - u64 rcv_pkts; - struct sw_rx_data *sw_rx_ring; struct qed_chain rx_bd_ring; struct qed_chain rx_comp_ring ____cacheline_aligned; - /* Used once per each NAPI run */ - u16 num_rx_buffers; - /* GRO */ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; + /* Used once per each NAPI run */ + u64 rcv_pkts; + u64 rx_hw_errors; u64 rx_alloc_errors; u64 rx_ip_frags; @@ -328,6 +359,11 @@ struct sw_tx_bd { #define QEDE_TSO_SPLIT_BD BIT(0) }; +struct sw_tx_xdp { + struct page *page; + dma_addr_t mapping; +}; + struct qede_tx_queue { u8 is_xdp; bool is_legacy; @@ -351,11 +387,11 @@ struct qede_tx_queue { #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) /* Regular Tx requires skb + metadata for release purpose, - * while XDP requires only the pages themselves. + * while XDP requires the pages and the mapped address. */ union { struct sw_tx_bd *skbs; - struct page **pages; + struct sw_tx_xdp *xdp; } sw_tx_ring; struct qed_chain tx_pbl; @@ -407,8 +443,20 @@ struct qede_fastpath { #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) #define QEDE_SP_RX_MODE 1 -#define QEDE_SP_VXLAN_PORT_CONFIG 2 -#define QEDE_SP_GENEVE_PORT_CONFIG 3 + +#ifdef CONFIG_RFS_ACCEL +int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); +void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); +void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); +void qede_free_arfs(struct qede_dev *edev); +int qede_alloc_arfs(struct qede_dev *edev); + +#define QEDE_SP_ARFS_CONFIG 4 +#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) +#define QEDE_RFS_MAX_FLTR 256 +#endif struct qede_reload_args { void (*func)(struct qede_dev *edev, struct qede_reload_args *args); @@ -433,6 +481,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); /* Filtering function definitions */ void qede_force_mac(void *dev, u8 *mac, bool forced); +void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port); int qede_set_mac_addr(struct net_device *ndev, void *p); int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c index 03e8c021243339eb78e3891cc81b4ba7cb582453..a9e7379313db7bdff8032605f2811c3d210c2152 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -281,6 +281,11 @@ static int qede_dcbnl_ieee_setapp(struct net_device *netdev, struct dcb_app *app) { struct qede_dev *edev = netdev_priv(netdev); + int err; + + err = dcb_ieee_setapp(netdev, app); + if (err) + return err; return edev->ops->dcb->ieee_setapp(edev->cdev, app); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8979531332455453f4fcb54b4a1a16017005c61c..4dcfe9614731db70d673cc15aec6da26ff673743 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -75,16 +75,33 @@ static const struct { QEDE_TQSTAT(stopped_cnt), }; -#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) -#define QEDE_STAT_STRING(stat_name) (#stat_name) -#define _QEDE_STAT(stat_name, pf_only) \ - {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} -#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) -#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) +#define QEDE_STAT_OFFSET(stat_name, type, base) \ + (offsetof(type, stat_name) + (base)) +#define QEDE_STAT_STRING(stat_name) (#stat_name) +#define _QEDE_STAT(stat_name, type, base, attr) \ + {QEDE_STAT_OFFSET(stat_name, type, base), \ + QEDE_STAT_STRING(stat_name), \ + attr} +#define QEDE_STAT(stat_name) \ + _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0) +#define QEDE_PF_STAT(stat_name) \ + _QEDE_STAT(stat_name, struct qede_stats_common, 0, \ + BIT(QEDE_STAT_PF_ONLY)) +#define QEDE_PF_BB_STAT(stat_name) \ + _QEDE_STAT(stat_name, struct qede_stats_bb, \ + offsetof(struct qede_stats, bb), \ + BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY)) +#define QEDE_PF_AH_STAT(stat_name) \ + _QEDE_STAT(stat_name, struct qede_stats_ah, \ + offsetof(struct qede_stats, ah), \ + BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY)) static const struct { u64 offset; char string[ETH_GSTRING_LEN]; - bool pf_only; + unsigned long attr; +#define QEDE_STAT_PF_ONLY 0 +#define QEDE_STAT_BB_ONLY 1 +#define QEDE_STAT_AH_ONLY 2 } qede_stats_arr[] = { QEDE_STAT(rx_ucast_bytes), QEDE_STAT(rx_mcast_bytes), @@ -106,22 +123,23 @@ static const struct { QEDE_PF_STAT(rx_256_to_511_byte_packets), QEDE_PF_STAT(rx_512_to_1023_byte_packets), QEDE_PF_STAT(rx_1024_to_1518_byte_packets), - QEDE_PF_STAT(rx_1519_to_1522_byte_packets), - QEDE_PF_STAT(rx_1519_to_2047_byte_packets), - QEDE_PF_STAT(rx_2048_to_4095_byte_packets), - QEDE_PF_STAT(rx_4096_to_9216_byte_packets), - QEDE_PF_STAT(rx_9217_to_16383_byte_packets), + QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets), + QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets), + QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets), + QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets), + QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets), + QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets), QEDE_PF_STAT(tx_64_byte_packets), QEDE_PF_STAT(tx_65_to_127_byte_packets), QEDE_PF_STAT(tx_128_to_255_byte_packets), QEDE_PF_STAT(tx_256_to_511_byte_packets), QEDE_PF_STAT(tx_512_to_1023_byte_packets), QEDE_PF_STAT(tx_1024_to_1518_byte_packets), - QEDE_PF_STAT(tx_1519_to_2047_byte_packets), - QEDE_PF_STAT(tx_2048_to_4095_byte_packets), - QEDE_PF_STAT(tx_4096_to_9216_byte_packets), - QEDE_PF_STAT(tx_9217_to_16383_byte_packets), - + QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets), + QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets), + QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets), + QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets), + QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets), QEDE_PF_STAT(rx_mac_crtl_frames), QEDE_PF_STAT(tx_mac_ctrl_frames), QEDE_PF_STAT(rx_pause_frames), @@ -136,8 +154,8 @@ static const struct { QEDE_PF_STAT(rx_jabbers), QEDE_PF_STAT(rx_undersize_packets), QEDE_PF_STAT(rx_fragments), - QEDE_PF_STAT(tx_lpi_entry_count), - QEDE_PF_STAT(tx_total_collisions), + QEDE_PF_BB_STAT(tx_lpi_entry_count), + QEDE_PF_BB_STAT(tx_total_collisions), QEDE_PF_STAT(brb_truncates), QEDE_PF_STAT(brb_discards), QEDE_STAT(no_buff_discards), @@ -155,6 +173,12 @@ static const struct { }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) +#define QEDE_STAT_IS_PF_ONLY(i) \ + test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr) +#define QEDE_STAT_IS_BB_ONLY(i) \ + test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr) +#define QEDE_STAT_IS_AH_ONLY(i) \ + test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr) enum { QEDE_PRI_FLAG_CMT, @@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev, } } +static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index) +{ + return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) || + (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) || + (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index)); +} + static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { struct qede_fastpath *fp; @@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) /* Account for non-queue statistics */ for (i = 0; i < QEDE_NUM_STATS; i++) { - if (IS_VF(edev) && qede_stats_arr[i].pf_only) + if (qede_is_irrelevant_stat(edev, i)) continue; strcpy(buf, qede_stats_arr[i].string); buf += ETH_GSTRING_LEN; @@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev, } for (i = 0; i < QEDE_NUM_STATS; i++) { - if (IS_VF(edev) && qede_stats_arr[i].pf_only) + if (qede_is_irrelevant_stat(edev, i)) continue; *buf = *((u64 *)(((void *)&edev->stats) + qede_stats_arr[i].offset)); @@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev, static int qede_get_sset_count(struct net_device *dev, int stringset) { struct qede_dev *edev = netdev_priv(dev); - int num_stats = QEDE_NUM_STATS; + int num_stats = QEDE_NUM_STATS, i; switch (stringset) { case ETH_SS_STATS: - if (IS_VF(edev)) { - int i; - - for (i = 0; i < QEDE_NUM_STATS; i++) - if (qede_stats_arr[i].pf_only) - num_stats--; - } + for (i = 0; i < QEDE_NUM_STATS; i++) + if (qede_is_irrelevant_stat(edev, i)) + num_stats--; /* Account for the Regular Tx statistics */ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 107c3fda4792c3b7f2fb83168f192b1aef35fc70..eb5652073ca86dfcbbbdf5c73cfb998a72e09917 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,6 +38,459 @@ #include #include "qede.h" +#ifdef CONFIG_RFS_ACCEL +struct qede_arfs_tuple { + union { + __be32 src_ipv4; + struct in6_addr src_ipv6; + }; + union { + __be32 dst_ipv4; + struct in6_addr dst_ipv6; + }; + __be16 src_port; + __be16 dst_port; + __be16 eth_proto; + u8 ip_proto; +}; + +struct qede_arfs_fltr_node { +#define QEDE_FLTR_VALID 0 + unsigned long state; + + /* pointer to aRFS packet buffer */ + void *data; + + /* dma map address of aRFS packet buffer */ + dma_addr_t mapping; + + /* length of aRFS packet buffer */ + int buf_len; + + /* tuples to hold from aRFS packet buffer */ + struct qede_arfs_tuple tuple; + + u32 flow_id; + u16 sw_id; + u16 rxq_id; + u16 next_rxq_id; + bool filter_op; + bool used; + struct hlist_node node; +}; + +struct qede_arfs { +#define QEDE_ARFS_POLL_COUNT 100 +#define QEDE_RFS_FLW_BITSHIFT (4) +#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) + struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT]; + + /* lock for filter list access */ + spinlock_t arfs_list_lock; + unsigned long *arfs_fltr_bmap; + int filter_count; + bool enable; +}; + +static void qede_configure_arfs_fltr(struct qede_dev *edev, + struct qede_arfs_fltr_node *n, + u16 rxq_id, bool add_fltr) +{ + const struct qed_eth_ops *op = edev->ops; + + if (n->used) + return; + + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, + "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", + add_fltr ? "Adding" : "Deleting", + n->flow_id, n->sw_id, ntohs(n->tuple.src_port), + ntohs(n->tuple.dst_port), rxq_id); + + n->used = true; + n->filter_op = add_fltr; + op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0, + rxq_id, add_fltr); +} + +static void +qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) +{ + kfree(fltr->data); + clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); + kfree(fltr); +} + +void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) +{ + struct qede_arfs_fltr_node *fltr = filter; + struct qede_dev *edev = dev; + + if (fw_rc) { + DP_NOTICE(edev, + "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", + fw_rc, fltr->flow_id, fltr->sw_id, + ntohs(fltr->tuple.src_port), + ntohs(fltr->tuple.dst_port), fltr->rxq_id); + + spin_lock_bh(&edev->arfs->arfs_list_lock); + + fltr->used = false; + clear_bit(QEDE_FLTR_VALID, &fltr->state); + + spin_unlock_bh(&edev->arfs->arfs_list_lock); + return; + } + + spin_lock_bh(&edev->arfs->arfs_list_lock); + + fltr->used = false; + + if (fltr->filter_op) { + set_bit(QEDE_FLTR_VALID, &fltr->state); + if (fltr->rxq_id != fltr->next_rxq_id) + qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, + false); + } else { + clear_bit(QEDE_FLTR_VALID, &fltr->state); + if (fltr->rxq_id != fltr->next_rxq_id) { + fltr->rxq_id = fltr->next_rxq_id; + qede_configure_arfs_fltr(edev, fltr, + fltr->rxq_id, true); + } + } + + spin_unlock_bh(&edev->arfs->arfs_list_lock); +} + +/* Should be called while qede_lock is held */ +void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) +{ + int i; + + for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) { + struct hlist_node *temp; + struct hlist_head *head; + struct qede_arfs_fltr_node *fltr; + + head = &edev->arfs->arfs_hl_head[i]; + + hlist_for_each_entry_safe(fltr, temp, head, node) { + bool del = false; + + if (edev->state != QEDE_STATE_OPEN) + del = true; + + spin_lock_bh(&edev->arfs->arfs_list_lock); + + if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && + !fltr->used) || free_fltr) { + hlist_del(&fltr->node); + dma_unmap_single(&edev->pdev->dev, + fltr->mapping, + fltr->buf_len, DMA_TO_DEVICE); + qede_free_arfs_filter(edev, fltr); + edev->arfs->filter_count--; + } else { + if ((rps_may_expire_flow(edev->ndev, + fltr->rxq_id, + fltr->flow_id, + fltr->sw_id) || del) && + !free_fltr) + qede_configure_arfs_fltr(edev, fltr, + fltr->rxq_id, + false); + } + + spin_unlock_bh(&edev->arfs->arfs_list_lock); + } + } + + spin_lock_bh(&edev->arfs->arfs_list_lock); + + if (!edev->arfs->filter_count) { + if (edev->arfs->enable) { + edev->arfs->enable = false; + edev->ops->configure_arfs_searcher(edev->cdev, false); + } + } else { + set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, + QEDE_SP_TASK_POLL_DELAY); + } + + spin_unlock_bh(&edev->arfs->arfs_list_lock); +} + +/* This function waits until all aRFS filters get deleted and freed. + * On timeout it frees all filters forcefully. + */ +void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev) +{ + int count = QEDE_ARFS_POLL_COUNT; + + while (count) { + qede_process_arfs_filters(edev, false); + + if (!edev->arfs->filter_count) + break; + + msleep(100); + count--; + } + + if (!count) { + DP_NOTICE(edev, "Timeout in polling for arfs filter free\n"); + + /* Something is terribly wrong, free forcefully */ + qede_process_arfs_filters(edev, true); + } +} + +int qede_alloc_arfs(struct qede_dev *edev) +{ + int i; + + edev->arfs = vzalloc(sizeof(*edev->arfs)); + if (!edev->arfs) + return -ENOMEM; + + spin_lock_init(&edev->arfs->arfs_list_lock); + + for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) + INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]); + + edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); + if (!edev->ndev->rx_cpu_rmap) { + vfree(edev->arfs); + edev->arfs = NULL; + return -ENOMEM; + } + + edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * + sizeof(long)); + if (!edev->arfs->arfs_fltr_bmap) { + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + edev->ndev->rx_cpu_rmap = NULL; + vfree(edev->arfs); + edev->arfs = NULL; + return -ENOMEM; + } + + return 0; +} + +void qede_free_arfs(struct qede_dev *edev) +{ + if (!edev->arfs) + return; + + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + vfree(edev->arfs->arfs_fltr_bmap); + edev->arfs->arfs_fltr_bmap = NULL; + vfree(edev->arfs); + edev->arfs = NULL; +} + +static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, + const struct sk_buff *skb) +{ + if (skb->protocol == htons(ETH_P_IP)) { + if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr && + tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr) + return true; + else + return false; + } else { + struct in6_addr *src = &tpos->tuple.src_ipv6; + u8 size = sizeof(struct in6_addr); + + if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) && + !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size)) + return true; + else + return false; + } +} + +static struct qede_arfs_fltr_node * +qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb, + __be16 src_port, __be16 dst_port, u8 ip_proto) +{ + struct qede_arfs_fltr_node *tpos; + + hlist_for_each_entry(tpos, h, node) + if (tpos->tuple.ip_proto == ip_proto && + tpos->tuple.eth_proto == skb->protocol && + qede_compare_ip_addr(tpos, skb) && + tpos->tuple.src_port == src_port && + tpos->tuple.dst_port == dst_port) + return tpos; + + return NULL; +} + +static struct qede_arfs_fltr_node * +qede_alloc_filter(struct qede_dev *edev, int min_hlen) +{ + struct qede_arfs_fltr_node *n; + int bit_id; + + bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap, + QEDE_RFS_MAX_FLTR); + + if (bit_id >= QEDE_RFS_MAX_FLTR) + return NULL; + + n = kzalloc(sizeof(*n), GFP_ATOMIC); + if (!n) + return NULL; + + n->data = kzalloc(min_hlen, GFP_ATOMIC); + if (!n->data) { + kfree(n); + return NULL; + } + + n->sw_id = (u16)bit_id; + set_bit(bit_id, edev->arfs->arfs_fltr_bmap); + return n; +} + +int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_arfs_fltr_node *n; + int min_hlen, rc, tp_offset; + struct ethhdr *eth; + __be16 *ports; + u16 tbl_idx; + u8 ip_proto; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + if (skb->protocol == htons(ETH_P_IP)) { + ip_proto = ip_hdr(skb)->protocol; + tp_offset = sizeof(struct iphdr); + } else { + ip_proto = ipv6_hdr(skb)->nexthdr; + tp_offset = sizeof(struct ipv6hdr); + } + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + ports = (__be16 *)(skb->data + tp_offset); + tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK; + + spin_lock_bh(&edev->arfs->arfs_list_lock); + + n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx], + skb, ports[0], ports[1], ip_proto); + + if (n) { + /* Filter match */ + n->next_rxq_id = rxq_index; + + if (test_bit(QEDE_FLTR_VALID, &n->state)) { + if (n->rxq_id != rxq_index) + qede_configure_arfs_fltr(edev, n, n->rxq_id, + false); + } else { + if (!n->used) { + n->rxq_id = rxq_index; + qede_configure_arfs_fltr(edev, n, n->rxq_id, + true); + } + } + + rc = n->sw_id; + goto ret_unlock; + } + + min_hlen = ETH_HLEN + skb_headlen(skb); + + n = qede_alloc_filter(edev, min_hlen); + if (!n) { + rc = -ENOMEM; + goto ret_unlock; + } + + n->buf_len = min_hlen; + n->rxq_id = rxq_index; + n->next_rxq_id = rxq_index; + n->tuple.src_port = ports[0]; + n->tuple.dst_port = ports[1]; + n->flow_id = flow_id; + + if (skb->protocol == htons(ETH_P_IP)) { + n->tuple.src_ipv4 = ip_hdr(skb)->saddr; + n->tuple.dst_ipv4 = ip_hdr(skb)->daddr; + } else { + memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + } + + eth = (struct ethhdr *)n->data; + eth->h_proto = skb->protocol; + n->tuple.eth_proto = skb->protocol; + n->tuple.ip_proto = ip_proto; + memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); + + n->mapping = dma_map_single(&edev->pdev->dev, n->data, + n->buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&edev->pdev->dev, n->mapping)) { + DP_NOTICE(edev, "Failed to map DMA memory for arfs\n"); + qede_free_arfs_filter(edev, n); + rc = -ENOMEM; + goto ret_unlock; + } + + INIT_HLIST_NODE(&n->node); + hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]); + edev->arfs->filter_count++; + + if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { + edev->ops->configure_arfs_searcher(edev->cdev, true); + edev->arfs->enable = true; + } + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + + spin_unlock_bh(&edev->arfs->arfs_list_lock); + + set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); + return n->sw_id; + +ret_unlock: + spin_unlock_bh(&edev->arfs->arfs_list_lock); + return rc; +} +#endif + +void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port) +{ + struct qede_dev *edev = dev; + + if (edev->vxlan_dst_port != vxlan_port) + edev->vxlan_dst_port = 0; + + if (edev->geneve_dst_port != geneve_port) + edev->geneve_dst_port = 0; +} + void qede_force_mac(void *dev, u8 *mac, bool forced) { struct qede_dev *edev = dev; @@ -441,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); + struct qed_tunn_params tunn_params; u16 t_port = ntohs(ti->port); + int rc; + + memset(&tunn_params, 0, sizeof(tunn_params)); switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: + if (!edev->dev_info.common.vxlan_enable) + return; + if (edev->vxlan_dst_port) return; - edev->vxlan_dst_port = t_port; + tunn_params.update_vxlan_port = 1; + tunn_params.vxlan_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", - t_port); + __qede_lock(edev); + rc = edev->ops->tunn_config(edev->cdev, &tunn_params); + __qede_unlock(edev); + + if (!rc) { + edev->vxlan_dst_port = t_port; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", + t_port); + } else { + DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n", + t_port); + } - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); break; case UDP_TUNNEL_TYPE_GENEVE: + if (!edev->dev_info.common.geneve_enable) + return; + if (edev->geneve_dst_port) return; - edev->geneve_dst_port = t_port; + tunn_params.update_geneve_port = 1; + tunn_params.geneve_port = t_port; + + __qede_lock(edev); + rc = edev->ops->tunn_config(edev->cdev, &tunn_params); + __qede_unlock(edev); + + if (!rc) { + edev->geneve_dst_port = t_port; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Added geneve port=%d\n", t_port); + } else { + DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n", + t_port); + } - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", - t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; default: return; } - - schedule_delayed_work(&edev->sp_task, 0); } -void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) +void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); + struct qed_tunn_params tunn_params; u16 t_port = ntohs(ti->port); + memset(&tunn_params, 0, sizeof(tunn_params)); + switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: if (t_port != edev->vxlan_dst_port) return; + tunn_params.update_vxlan_port = 1; + tunn_params.vxlan_port = 0; + + __qede_lock(edev); + edev->ops->tunn_config(edev->cdev, &tunn_params); + __qede_unlock(edev); + edev->vxlan_dst_port = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", t_port); - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); break; case UDP_TUNNEL_TYPE_GENEVE: if (t_port != edev->geneve_dst_port) return; + tunn_params.update_geneve_port = 1; + tunn_params.geneve_port = 0; + + __qede_lock(edev); + edev->ops->tunn_config(edev->cdev, &tunn_params); + __qede_unlock(edev); + edev->geneve_dst_port = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; default: return; } - - schedule_delayed_work(&edev->sp_task, 0); } static void qede_xdp_reload_func(struct qede_dev *edev, @@ -520,11 +1016,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) { struct qede_reload_args args; - if (prog && prog->xdp_adjust_head) { - DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - /* If we're called, there was already a bpf reference increment */ args.func = &qede_xdp_reload_func; args.u.new_prog = prog; @@ -537,6 +1028,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) { struct qede_dev *edev = netdev_priv(dev); + if (IS_VF(edev)) { + DP_NOTICE(edev, "VFs don't support XDP\n"); + return -EOPNOTSUPP; + } + switch (xdp->command) { case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1e65038c8fc06cb62172f0a144729e51ecf8faaf..7b6f41d062451efcbc6bea0d654e02df428dfcd3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); WARN_ON(!rx_bd); rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + + rxq->rx_headroom); rxq->sw_rx_prod++; rxq->filled_buffers++; @@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, metadata->mapping + padding, length, PCI_DMA_TODEVICE); - txq->sw_tx_ring.pages[idx] = metadata->data; + txq->sw_tx_ring.xdp[idx].page = metadata->data; + txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; txq->sw_tx_prod++; /* Mark the fastpath for future XDP doorbell */ @@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq) static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { - struct eth_tx_1st_bd *bd; - u16 hw_bd_cons; + u16 hw_bd_cons, idx; hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { - bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + qed_chain_consume(&txq->tx_pbl); + idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; - dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & - NUM_TX_BDS_MAX]); + dma_unmap_page(&edev->pdev->dev, + txq->sw_tx_ring.xdp[idx].mapping, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(txq->sw_tx_ring.xdp[idx].page); txq->sw_tx_cons++; txq->xmit_pkts++; @@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq, new_mapping = curr_prod->mapping + curr_prod->page_offset; rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + + rxq->rx_headroom); rxq->sw_rx_prod++; curr_cons->data = NULL; @@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); - rxq->rcv_pkts++; } static void qede_set_gro_params(struct qede_dev *edev, @@ -884,9 +886,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev, "Strange - TPA cont with more than a single len_list entry\n"); } -static void qede_tpa_end(struct qede_dev *edev, - struct qede_fastpath *fp, - struct eth_fast_path_rx_tpa_end_cqe *cqe) +static int qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) { struct qede_rx_queue *rxq = fp->rxq; struct qede_agg_info *tpa_info; @@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev, tpa_info->state = QEDE_AGG_STATE_NONE; - return; + return 1; err: tpa_info->state = QEDE_AGG_STATE_NONE; dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; + return 0; } static u8 qede_check_notunn_csum(u16 flag) @@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev, struct qede_rx_queue *rxq, struct bpf_prog *prog, struct sw_rx_data *bd, - struct eth_fast_path_rx_reg_cqe *cqe) + struct eth_fast_path_rx_reg_cqe *cqe, + u16 *data_offset, u16 *len) { - u16 len = le16_to_cpu(cqe->len_on_first_bd); struct xdp_buff xdp; enum xdp_action act; - xdp.data = page_address(bd->data) + cqe->placement_offset; - xdp.data_end = xdp.data + len; + xdp.data_hard_start = page_address(bd->data); + xdp.data = xdp.data_hard_start + *data_offset; + xdp.data_end = xdp.data + *len; /* Queues always have a full reset currently, so for the time * being until there's atomic program replace just mark read @@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev, act = bpf_prog_run_xdp(prog, &xdp); rcu_read_unlock(); + /* Recalculate, as XDP might have changed the headers */ + *data_offset = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; + if (act == XDP_PASS) return true; @@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, /* Now if there's a transmission problem, we'd still have to * throw current buffer, as replacement was already allocated. */ - if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) { + if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) { dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(bd->data); @@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, struct sw_rx_data *bd, u16 len, u16 pad) { - unsigned int offset = bd->page_offset; + unsigned int offset = bd->page_offset + pad; struct skb_frag_struct *frag; struct page *page = bd->data; unsigned int pull_len; @@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, */ if (len + pad <= edev->rx_copybreak) { memcpy(skb_put(skb, len), - page_address(page) + pad + offset, len); + page_address(page) + offset, len); qede_reuse_page(rxq, bd); goto out; } @@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, frag = &skb_shinfo(skb)->frags[0]; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, pad + offset, len, rxq->rx_buf_seg_size); + page, offset, len, rxq->rx_buf_seg_size); va = skb_frag_address(frag); pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); @@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev, qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); return 0; case ETH_RX_CQE_TYPE_TPA_END: - qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); - return 1; + return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); default: return 0; } @@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev, fp_cqe = &cqe->fast_path_regular; len = le16_to_cpu(fp_cqe->len_on_first_bd); - pad = fp_cqe->placement_offset; + pad = fp_cqe->placement_offset + rxq->rx_headroom; /* Run eBPF program if one is attached */ if (xdp_prog) - if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe)) - return 1; + if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe, + &pad, &len)) + return 0; /* If this is an error packet then drop it */ flags = cqe->fast_path_regular.pars_flags.flags; @@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_rx_queue *rxq = fp->rxq; struct qede_dev *edev = fp->edev; + int work_done = 0, rcv_pkts = 0; u16 hw_comp_cons, sw_comp_cons; - int work_done = 0; hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); @@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) /* Loop to complete all indicated BDs */ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { - qede_rx_process_cqe(edev, fp, rxq); + rcv_pkts += qede_rx_process_cqe(edev, fp, rxq); qed_chain_recycle_consumed(&rxq->rx_comp_ring); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); work_done++; } + rxq->rcv_pkts += rcv_pkts; + /* Allocate replacement buffers */ while (rxq->num_rx_buffers - rxq->filled_buffers) if (qede_alloc_rx_buffer(rxq, false)) @@ -1687,13 +1697,24 @@ netdev_features_t qede_features_check(struct sk_buff *skb, } /* Disable offloads for geneve tunnels, as HW can't parse - * the geneve header which has option length greater than 32B. + * the geneve header which has option length greater than 32b + * and disable offloads for the ports which are not offloaded. */ - if ((l4_proto == IPPROTO_UDP) && - ((skb_inner_mac_header(skb) - - skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) - return features & ~(NETIF_F_CSUM_MASK | - NETIF_F_GSO_MASK); + if (l4_proto == IPPROTO_UDP) { + struct qede_dev *edev = netdev_priv(dev); + u16 hdrlen, vxln_port, gnv_port; + + hdrlen = QEDE_MAX_TUN_HDR_LEN; + vxln_port = edev->vxlan_dst_port; + gnv_port = edev->geneve_dst_port; + + if ((skb_inner_mac_header(skb) - + skb_transport_header(skb)) > hdrlen || + (ntohs(udp_hdr(skb)->dest) != vxln_port && + ntohs(udp_hdr(skb)->dest) != gnv_port)) + return features & ~(NETIF_F_CSUM_MASK | + NETIF_F_GSO_MASK); + } } return features; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 3a78c3f2515748ca882f89d7da2c3b4e5cfad79e..b9ba23d71c61a6f7cf7edc1a2ff9e5fd6e252514 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_25 0x1656 #define CHIP_NUM_57980S_IOV 0x1664 +#define CHIP_NUM_AH 0x8070 +#define CHIP_NUM_AH_IOV 0x8090 #ifndef PCI_DEVICE_ID_NX2_57980E #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 @@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops; #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV +#define PCI_DEVICE_ID_AH CHIP_NUM_AH +#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV + #endif enum qede_pci_private { @@ -109,6 +114,10 @@ static const struct pci_device_id qede_pci_tbl[] = { {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, #ifdef CONFIG_QED_SRIOV {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, +#endif + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, +#ifdef CONFIG_QED_SRIOV + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, #endif { 0 } }; @@ -216,9 +225,13 @@ static struct pci_driver qede_pci_driver = { static struct qed_eth_cb_ops qede_ll_ops = { { +#ifdef CONFIG_RFS_ACCEL + .arfs_filter_op = qede_arfs_filter_op, +#endif .link_update = qede_link_update, }, .force_mac = qede_force_mac, + .ports_update = qede_udp_ports_update, }; static int qede_netdev_event(struct notifier_block *this, unsigned long event, @@ -314,122 +327,135 @@ static int qede_close(struct net_device *ndev); void qede_fill_by_demand_stats(struct qede_dev *edev) { + struct qede_stats_common *p_common = &edev->stats.common; struct qed_eth_stats stats; edev->ops->get_vport_stats(edev->cdev, &stats); - edev->stats.no_buff_discards = stats.no_buff_discards; - edev->stats.packet_too_big_discard = stats.packet_too_big_discard; - edev->stats.ttl0_discard = stats.ttl0_discard; - edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; - edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; - edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; - edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; - edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; - edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; - edev->stats.mftag_filter_discards = stats.mftag_filter_discards; - edev->stats.mac_filter_discards = stats.mac_filter_discards; - - edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes; - edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; - edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; - edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; - edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; - edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; - edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; - edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; - edev->stats.coalesced_events = stats.tpa_coalesced_events; - edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; - edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; - edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; - - edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; - edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; - edev->stats.rx_128_to_255_byte_packets = - stats.rx_128_to_255_byte_packets; - edev->stats.rx_256_to_511_byte_packets = - stats.rx_256_to_511_byte_packets; - edev->stats.rx_512_to_1023_byte_packets = - stats.rx_512_to_1023_byte_packets; - edev->stats.rx_1024_to_1518_byte_packets = - stats.rx_1024_to_1518_byte_packets; - edev->stats.rx_1519_to_1522_byte_packets = - stats.rx_1519_to_1522_byte_packets; - edev->stats.rx_1519_to_2047_byte_packets = - stats.rx_1519_to_2047_byte_packets; - edev->stats.rx_2048_to_4095_byte_packets = - stats.rx_2048_to_4095_byte_packets; - edev->stats.rx_4096_to_9216_byte_packets = - stats.rx_4096_to_9216_byte_packets; - edev->stats.rx_9217_to_16383_byte_packets = - stats.rx_9217_to_16383_byte_packets; - edev->stats.rx_crc_errors = stats.rx_crc_errors; - edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; - edev->stats.rx_pause_frames = stats.rx_pause_frames; - edev->stats.rx_pfc_frames = stats.rx_pfc_frames; - edev->stats.rx_align_errors = stats.rx_align_errors; - edev->stats.rx_carrier_errors = stats.rx_carrier_errors; - edev->stats.rx_oversize_packets = stats.rx_oversize_packets; - edev->stats.rx_jabbers = stats.rx_jabbers; - edev->stats.rx_undersize_packets = stats.rx_undersize_packets; - edev->stats.rx_fragments = stats.rx_fragments; - edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; - edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; - edev->stats.tx_128_to_255_byte_packets = - stats.tx_128_to_255_byte_packets; - edev->stats.tx_256_to_511_byte_packets = - stats.tx_256_to_511_byte_packets; - edev->stats.tx_512_to_1023_byte_packets = - stats.tx_512_to_1023_byte_packets; - edev->stats.tx_1024_to_1518_byte_packets = - stats.tx_1024_to_1518_byte_packets; - edev->stats.tx_1519_to_2047_byte_packets = - stats.tx_1519_to_2047_byte_packets; - edev->stats.tx_2048_to_4095_byte_packets = - stats.tx_2048_to_4095_byte_packets; - edev->stats.tx_4096_to_9216_byte_packets = - stats.tx_4096_to_9216_byte_packets; - edev->stats.tx_9217_to_16383_byte_packets = - stats.tx_9217_to_16383_byte_packets; - edev->stats.tx_pause_frames = stats.tx_pause_frames; - edev->stats.tx_pfc_frames = stats.tx_pfc_frames; - edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; - edev->stats.tx_total_collisions = stats.tx_total_collisions; - edev->stats.brb_truncates = stats.brb_truncates; - edev->stats.brb_discards = stats.brb_discards; - edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; + + p_common->no_buff_discards = stats.common.no_buff_discards; + p_common->packet_too_big_discard = stats.common.packet_too_big_discard; + p_common->ttl0_discard = stats.common.ttl0_discard; + p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; + p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; + p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; + p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; + p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; + p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; + p_common->mftag_filter_discards = stats.common.mftag_filter_discards; + p_common->mac_filter_discards = stats.common.mac_filter_discards; + + p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; + p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; + p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; + p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; + p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; + p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; + p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; + p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; + p_common->coalesced_events = stats.common.tpa_coalesced_events; + p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; + p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; + p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; + + p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; + p_common->rx_65_to_127_byte_packets = + stats.common.rx_65_to_127_byte_packets; + p_common->rx_128_to_255_byte_packets = + stats.common.rx_128_to_255_byte_packets; + p_common->rx_256_to_511_byte_packets = + stats.common.rx_256_to_511_byte_packets; + p_common->rx_512_to_1023_byte_packets = + stats.common.rx_512_to_1023_byte_packets; + p_common->rx_1024_to_1518_byte_packets = + stats.common.rx_1024_to_1518_byte_packets; + p_common->rx_crc_errors = stats.common.rx_crc_errors; + p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; + p_common->rx_pause_frames = stats.common.rx_pause_frames; + p_common->rx_pfc_frames = stats.common.rx_pfc_frames; + p_common->rx_align_errors = stats.common.rx_align_errors; + p_common->rx_carrier_errors = stats.common.rx_carrier_errors; + p_common->rx_oversize_packets = stats.common.rx_oversize_packets; + p_common->rx_jabbers = stats.common.rx_jabbers; + p_common->rx_undersize_packets = stats.common.rx_undersize_packets; + p_common->rx_fragments = stats.common.rx_fragments; + p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; + p_common->tx_65_to_127_byte_packets = + stats.common.tx_65_to_127_byte_packets; + p_common->tx_128_to_255_byte_packets = + stats.common.tx_128_to_255_byte_packets; + p_common->tx_256_to_511_byte_packets = + stats.common.tx_256_to_511_byte_packets; + p_common->tx_512_to_1023_byte_packets = + stats.common.tx_512_to_1023_byte_packets; + p_common->tx_1024_to_1518_byte_packets = + stats.common.tx_1024_to_1518_byte_packets; + p_common->tx_pause_frames = stats.common.tx_pause_frames; + p_common->tx_pfc_frames = stats.common.tx_pfc_frames; + p_common->brb_truncates = stats.common.brb_truncates; + p_common->brb_discards = stats.common.brb_discards; + p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + + if (QEDE_IS_BB(edev)) { + struct qede_stats_bb *p_bb = &edev->stats.bb; + + p_bb->rx_1519_to_1522_byte_packets = + stats.bb.rx_1519_to_1522_byte_packets; + p_bb->rx_1519_to_2047_byte_packets = + stats.bb.rx_1519_to_2047_byte_packets; + p_bb->rx_2048_to_4095_byte_packets = + stats.bb.rx_2048_to_4095_byte_packets; + p_bb->rx_4096_to_9216_byte_packets = + stats.bb.rx_4096_to_9216_byte_packets; + p_bb->rx_9217_to_16383_byte_packets = + stats.bb.rx_9217_to_16383_byte_packets; + p_bb->tx_1519_to_2047_byte_packets = + stats.bb.tx_1519_to_2047_byte_packets; + p_bb->tx_2048_to_4095_byte_packets = + stats.bb.tx_2048_to_4095_byte_packets; + p_bb->tx_4096_to_9216_byte_packets = + stats.bb.tx_4096_to_9216_byte_packets; + p_bb->tx_9217_to_16383_byte_packets = + stats.bb.tx_9217_to_16383_byte_packets; + p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; + p_bb->tx_total_collisions = stats.bb.tx_total_collisions; + } else { + struct qede_stats_ah *p_ah = &edev->stats.ah; + + p_ah->rx_1519_to_max_byte_packets = + stats.ah.rx_1519_to_max_byte_packets; + p_ah->tx_1519_to_max_byte_packets = + stats.ah.tx_1519_to_max_byte_packets; + } } static void qede_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); + struct qede_stats_common *p_common; qede_fill_by_demand_stats(edev); + p_common = &edev->stats.common; - stats->rx_packets = edev->stats.rx_ucast_pkts + - edev->stats.rx_mcast_pkts + - edev->stats.rx_bcast_pkts; - stats->tx_packets = edev->stats.tx_ucast_pkts + - edev->stats.tx_mcast_pkts + - edev->stats.tx_bcast_pkts; - - stats->rx_bytes = edev->stats.rx_ucast_bytes + - edev->stats.rx_mcast_bytes + - edev->stats.rx_bcast_bytes; + stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + + p_common->tx_bcast_pkts; - stats->tx_bytes = edev->stats.tx_ucast_bytes + - edev->stats.tx_mcast_bytes + - edev->stats.tx_bcast_bytes; + stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + + p_common->rx_bcast_bytes; + stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + + p_common->tx_bcast_bytes; - stats->tx_errors = edev->stats.tx_err_drop_pkts; - stats->multicast = edev->stats.rx_mcast_pkts + - edev->stats.rx_bcast_pkts; + stats->tx_errors = p_common->tx_err_drop_pkts; + stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; - stats->rx_fifo_errors = edev->stats.no_buff_discards; + stats->rx_fifo_errors = p_common->no_buff_discards; - stats->collisions = edev->stats.tx_total_collisions; - stats->rx_crc_errors = edev->stats.rx_crc_errors; - stats->rx_frame_errors = edev->stats.rx_align_errors; + if (QEDE_IS_BB(edev)) + stats->collisions = edev->stats.bb.tx_total_collisions; + stats->rx_crc_errors = p_common->rx_crc_errors; + stats->rx_frame_errors = p_common->rx_align_errors; } #ifdef CONFIG_QED_SRIOV @@ -532,6 +558,9 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_udp_tunnel_del = qede_udp_tunnel_del, .ndo_features_check = qede_features_check, .ndo_xdp = qede_xdp, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = qede_rx_flow_steer, +#endif }; /* ------------------------------------------------------------------------- @@ -581,7 +610,8 @@ static void qede_init_ndev(struct qede_dev *edev) { struct net_device *ndev = edev->ndev; struct pci_dev *pdev = edev->pdev; - u32 hw_features; + bool udp_tunnel_enable = false; + netdev_features_t hw_features; pci_set_drvdata(pdev, ndev); @@ -603,16 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - /* Encap features*/ - hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_GRE_CSUM; - ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | - NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_GRE_CSUM; + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) + hw_features |= NETIF_F_NTUPLE; + + if (edev->dev_info.common.vxlan_enable || + edev->dev_info.common.geneve_enable) + udp_tunnel_enable = true; + + if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { + hw_features |= NETIF_F_TSO_ECN; + ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO | + NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM; + } + + if (udp_tunnel_enable) { + hw_features |= (NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + } + + if (edev->dev_info.common.gre_enable) { + hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); + ndev->hw_enc_features |= (NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM); + } ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; @@ -750,7 +797,6 @@ static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); - struct qed_dev *cdev = edev->cdev; __qede_lock(edev); @@ -758,24 +804,12 @@ static void qede_sp_task(struct work_struct *work) if (edev->state == QEDE_STATE_OPEN) qede_config_rx_mode(edev->ndev); - if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { - struct qed_tunn_params tunn_params; - - memset(&tunn_params, 0, sizeof(tunn_params)); - tunn_params.update_vxlan_port = 1; - tunn_params.vxlan_port = edev->vxlan_dst_port; - qed_ops->tunn_config(cdev, &tunn_params); - } - - if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) { - struct qed_tunn_params tunn_params; - - memset(&tunn_params, 0, sizeof(tunn_params)); - tunn_params.update_geneve_port = 1; - tunn_params.geneve_port = edev->geneve_dst_port; - qed_ops->tunn_config(cdev, &tunn_params); +#ifdef CONFIG_RFS_ACCEL + if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { + if (edev->state == QEDE_STATE_OPEN) + qede_process_arfs_filters(edev, false); } - +#endif __qede_unlock(edev); } @@ -786,6 +820,9 @@ static void qede_update_pf_params(struct qed_dev *cdev) /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3; +#ifdef CONFIG_RFS_ACCEL + pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; +#endif qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -870,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); /* PTP not supported on VFs */ - if (!is_vf) { - rc = qede_ptp_register_phc(edev); - if (rc) { - DP_NOTICE(edev, "Cannot register PHC\n"); - goto err5; - } - } + if (!is_vf) + qede_ptp_enable(edev, true); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -891,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; -err5: - unregister_netdev(edev->ndev); err4: qede_roce_dev_remove(edev); err3: @@ -940,11 +970,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); - cancel_delayed_work_sync(&edev->sp_task); - unregister_netdev(ndev); + cancel_delayed_work_sync(&edev->sp_task); - qede_ptp_remove(edev); + qede_ptp_disable(edev); qede_roce_dev_remove(edev); @@ -1165,9 +1194,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->num_rx_buffers = edev->q_num_rx_buffers; rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; + rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; - if (rxq->rx_buf_size > PAGE_SIZE) - rxq->rx_buf_size = PAGE_SIZE; + /* Make sure that the headroom and payload fit in a single page */ + if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; /* Segment size to spilt a page in multiple equal parts, * unless XDP is used in which case we'd use the entire page. @@ -1229,7 +1260,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ if (txq->is_xdp) - kfree(txq->sw_tx_ring.pages); + kfree(txq->sw_tx_ring.xdp); else kfree(txq->sw_tx_ring.skbs); @@ -1247,9 +1278,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) /* Allocate the parallel driver ring for Tx buffers */ if (txq->is_xdp) { - size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE; - txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL); - if (!txq->sw_tx_ring.pages) + size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE; + txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); + if (!txq->sw_tx_ring.xdp) goto err; } else { size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; @@ -1466,6 +1497,18 @@ static int qede_req_msix_irqs(struct qede_dev *edev) } for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { +#ifdef CONFIG_RFS_ACCEL + struct qede_fastpath *fp = &edev->fp_array[i]; + + if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { + rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, + edev->int_info.msix[i].vector); + if (rc) { + DP_ERR(edev, "Failed to add CPU rmap\n"); + qede_free_arfs(edev); + } + } +#endif rc = request_irq(edev->int_info.msix[i].vector, qede_msix_fp_int, 0, edev->fp_array[i].name, &edev->fp_array[i]); @@ -1827,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_roce_dev_event_close(edev); edev->state = QEDE_STATE_CLOSED; - qede_ptp_stop(edev); - /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); @@ -1847,7 +1888,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); - +#ifdef CONFIG_RFS_ACCEL + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { + qede_poll_for_freeing_arfs_filters(edev); + qede_free_arfs(edev); + } +#endif /* Release the interrupts */ qede_sync_free_irqs(edev); edev->ops->common->set_fp_int(edev->cdev, 0); @@ -1899,6 +1945,13 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; +#ifdef CONFIG_RFS_ACCEL + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { + rc = qede_alloc_arfs(edev); + if (rc) + DP_NOTICE(edev, "aRFS memory allocation failed\n"); + } +#endif qede_napi_add_enable(edev); DP_INFO(edev, "Napi added and enabled\n"); @@ -1925,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, qede_roce_dev_event_open(edev); - qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL)); - edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); - goto out; err4: qede_sync_free_irqs(edev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 2e62dec09bd7be7f8639d5c8b01c9f020a349750..24f06e2ef43e8c16c8b29f35a87d952e71da5dd6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -181,6 +181,7 @@ static void qede_ptp_task(struct work_struct *work) skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); DP_VERBOSE(edev, QED_MSG_DEBUG, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", @@ -206,23 +207,10 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc) return phc_cycles; } -static void qede_ptp_init_cc(struct qede_dev *edev) -{ - struct qede_ptp *ptp; - - ptp = edev->ptp; - if (!ptp) - return; - - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; -} - static int qede_ptp_cfg_filters(struct qede_dev *edev) { + enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON; + enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE; struct qede_ptp *ptp = edev->ptp; if (!ptp) @@ -236,7 +224,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) switch (ptp->tx_type) { case HWTSTAMP_TX_ON: edev->flags |= QEDE_TX_TIMESTAMPING_EN; - ptp->ops->hwtstamp_tx_on(edev->cdev); + tx_type = QED_PTP_HWTSTAMP_TX_ON; + break; + + case HWTSTAMP_TX_OFF: + edev->flags &= ~QEDE_TX_TIMESTAMPING_EN; + tx_type = QED_PTP_HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ONESTEP_SYNC: @@ -247,42 +240,57 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) spin_lock_bh(&ptp->lock); switch (ptp->rx_filter) { case HWTSTAMP_FILTER_NONE: + rx_filter = QED_PTP_FILTER_NONE; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: ptp->rx_filter = HWTSTAMP_FILTER_NONE; + rx_filter = QED_PTP_FILTER_ALL; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + rx_filter = QED_PTP_FILTER_V1_L4_EVENT; + break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 events */ - ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4); + rx_filter = QED_PTP_FILTER_V1_L4_GEN; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + rx_filter = QED_PTP_FILTER_V2_L4_EVENT; + break; case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ - ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6); + rx_filter = QED_PTP_FILTER_V2_L4_GEN; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + rx_filter = QED_PTP_FILTER_V2_L2_EVENT; + break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; /* Initialize PTP detection L2 events */ - ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2); + rx_filter = QED_PTP_FILTER_V2_L2_GEN; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + rx_filter = QED_PTP_FILTER_V2_EVENT; + break; case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ - ptp->ops->cfg_rx_filters(edev->cdev, - QED_PTP_FILTER_L2_IPV4_IPV6); + rx_filter = QED_PTP_FILTER_V2_GEN; break; } + ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type); + spin_unlock_bh(&ptp->lock); return 0; @@ -324,61 +332,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) sizeof(config)) ? -EFAULT : 0; } -/* Called during load, to initialize PTP-related stuff */ -static void qede_ptp_init(struct qede_dev *edev, bool init_tc) -{ - struct qede_ptp *ptp; - int rc; - - ptp = edev->ptp; - if (!ptp) - return; - - spin_lock_init(&ptp->lock); - - /* Configure PTP in HW */ - rc = ptp->ops->enable(edev->cdev); - if (rc) { - DP_ERR(edev, "Stopping PTP initialization\n"); - return; - } - - /* Init work queue for Tx timestamping */ - INIT_WORK(&ptp->work, qede_ptp_task); - - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - qede_ptp_init_cc(edev); - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } - - DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n"); -} - -void qede_ptp_start(struct qede_dev *edev, bool init_tc) -{ - qede_ptp_init(edev, init_tc); - qede_ptp_cfg_filters(edev); -} - -void qede_ptp_remove(struct qede_dev *edev) -{ - struct qede_ptp *ptp; - - ptp = edev->ptp; - if (ptp && ptp->clock) { - ptp_clock_unregister(ptp->clock); - ptp->clock = NULL; - } - - kfree(ptp); - edev->ptp = NULL; -} - int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; @@ -417,8 +370,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) return 0; } -/* Called during unload, to stop PTP-related stuff */ -void qede_ptp_stop(struct qede_dev *edev) +void qede_ptp_disable(struct qede_dev *edev) { struct qede_ptp *ptp; @@ -426,6 +378,11 @@ void qede_ptp_stop(struct qede_dev *edev) if (!ptp) return; + if (ptp->clock) { + ptp_clock_unregister(ptp->clock); + ptp->clock = NULL; + } + /* Cancel PTP work queue. Should be done after the Tx queues are * drained to prevent additional scheduling. */ @@ -439,11 +396,54 @@ void qede_ptp_stop(struct qede_dev *edev) spin_lock_bh(&ptp->lock); ptp->ops->disable(edev->cdev); spin_unlock_bh(&ptp->lock); + + kfree(ptp); + edev->ptp = NULL; +} + +static int qede_ptp_init(struct qede_dev *edev, bool init_tc) +{ + struct qede_ptp *ptp; + int rc; + + ptp = edev->ptp; + if (!ptp) + return -EINVAL; + + spin_lock_init(&ptp->lock); + + /* Configure PTP in HW */ + rc = ptp->ops->enable(edev->cdev); + if (rc) { + DP_INFO(edev, "PTP HW enable failed\n"); + return rc; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&ptp->work, qede_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (init_tc) { + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; + + timecounter_init(&ptp->tc, &ptp->cc, + ktime_to_ns(ktime_get_real())); + } + + return rc; } -int qede_ptp_register_phc(struct qede_dev *edev) +int qede_ptp_enable(struct qede_dev *edev, bool init_tc) { struct qede_ptp *ptp; + int rc; ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); if (!ptp) { @@ -454,14 +454,19 @@ int qede_ptp_register_phc(struct qede_dev *edev) ptp->edev = edev; ptp->ops = edev->ops->ptp; if (!ptp->ops) { - kfree(ptp); - edev->ptp = NULL; - DP_ERR(edev, "PTP clock registeration failed\n"); - return -EIO; + DP_INFO(edev, "PTP enable failed\n"); + rc = -EIO; + goto err1; } edev->ptp = ptp; + rc = qede_ptp_init(edev, init_tc); + if (rc) + goto err1; + + qede_ptp_cfg_filters(edev); + /* Fill the ptp_clock_info struct and register PTP clock */ ptp->clock_info.owner = THIS_MODULE; snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); @@ -478,13 +483,21 @@ int qede_ptp_register_phc(struct qede_dev *edev) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - ptp->clock = NULL; - kfree(ptp); - edev->ptp = NULL; + rc = -EINVAL; DP_ERR(edev, "PTP clock registeration failed\n"); + goto err2; } return 0; + +err2: + qede_ptp_disable(edev); + ptp->clock = NULL; +err1: + kfree(ptp); + edev->ptp = NULL; + + return rc; } void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) @@ -495,6 +508,9 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) if (!ptp) return; + if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) + return; + if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { DP_NOTICE(edev, "Tx timestamping was not enabled, this packet will not be timestamped\n"); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index f328f9bba53a7a3363514a3495720954724c3bc5..691a14c4b2c5a790e43968623ca9645b3139060f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -40,10 +40,8 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); -void qede_ptp_start(struct qede_dev *edev, bool init_tc); -void qede_ptp_stop(struct qede_dev *edev); -void qede_ptp_remove(struct qede_dev *edev); -int qede_ptp_register_phc(struct qede_dev *edev); +void qede_ptp_disable(struct qede_dev *edev); +int qede_ptp_enable(struct qede_dev *edev, bool init_tc); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index d7107055ec6035bf206380d98bbd4670d06660cd..2f656f395f39699e4cec53f4ff25ea7e745d1041 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -128,6 +128,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index e9e647072596d5c6e3c6206fb003b66bfe3cdb81..1188d420fe539912e6b2eb8656124625f1ae6f34 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4686,7 +4686,8 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, /* * Set up the operating parameters. */ - qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM); + qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + ndev->name); INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c index f62c215be779853ad76cf71f02b09dc4f7d62a1a..7116be485e6129090b2a60f6a8397a37be3b1897 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c @@ -26,6 +26,7 @@ /* SGMII digital lane registers */ #define EMAC_SGMII_LN_DRVR_CTRL0 0x000C +#define EMAC_SGMII_LN_DRVR_CTRL1 0x0010 #define EMAC_SGMII_LN_DRVR_TAP_EN 0x0018 #define EMAC_SGMII_LN_TX_MARGINING 0x001C #define EMAC_SGMII_LN_TX_PRE 0x0020 @@ -48,6 +49,7 @@ #define EMAC_SGMII_LN_RX_EN_SIGNAL 0x02AC #define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x02B8 #define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x02C8 +#define EMAC_SGMII_LN_RX_RESECODE_OFFSET 0x02CC /* SGMII digital lane register values */ #define UCDR_STEP_BY_TWO_MODE0 BIT(7) @@ -73,6 +75,8 @@ #define CML_GEAR_MODE(x) (((x) & 7) << 3) #define CML2CMOS_IBOOST_MODE(x) ((x) & 7) +#define RESCODE_OFFSET(x) ((x) & 0x1f) + #define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2) #define MIXER_DATARATE_MODE(x) ((x) & 3) @@ -159,6 +163,8 @@ static const struct emac_reg_write sgmii_laned[] = { {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)}, {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)}, {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)}, + {EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)}, + {EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)}, {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)}, {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) | EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0}, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 040b28977ee74c8cbd9a3f83c73a14597677ddbd..18c184ee1f3c0ee9e7ec66d2fafba4da22b9d676 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -13,6 +13,7 @@ /* Qualcomm Technologies, Inc. EMAC SGMII Controller driver. */ +#include #include #include #include diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 672f6b696069ad8b47989ecb1dd03d3a81fde565..72233ab9474b1263271c772691c5c35cc245a63e 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1406,27 +1406,29 @@ static int cp_get_sset_count (struct net_device *dev, int sset) } } -static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int cp_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct cp_private *cp = netdev_priv(dev); int rc; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); - rc = mii_ethtool_gset(&cp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&cp->mii_if, cmd); spin_unlock_irqrestore(&cp->lock, flags); return rc; } -static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int cp_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct cp_private *cp = netdev_priv(dev); int rc; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); - rc = mii_ethtool_sset(&cp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd); spin_unlock_irqrestore(&cp->lock, flags); return rc; @@ -1578,8 +1580,6 @@ static const struct ethtool_ops cp_ethtool_ops = { .get_drvinfo = cp_get_drvinfo, .get_regs_len = cp_get_regs_len, .get_sset_count = cp_get_sset_count, - .get_settings = cp_get_settings, - .set_settings = cp_set_settings, .nway_reset = cp_nway_reset, .get_link = ethtool_op_get_link, .get_msglevel = cp_get_msglevel, @@ -1593,6 +1593,8 @@ static const struct ethtool_ops cp_ethtool_ops = { .get_eeprom = cp_get_eeprom, .set_eeprom = cp_set_eeprom, .get_ringparam = cp_get_ringparam, + .get_link_ksettings = cp_get_link_ksettings, + .set_link_ksettings = cp_set_link_ksettings, }; static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 89631753e79962d91456d93b71929af768917da1..ca22f2898664617656f7fa6e4e98df1e7aa3bc26 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2384,21 +2384,23 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); } -static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8139_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rtl8139_private *tp = netdev_priv(dev); spin_lock_irq(&tp->lock); - mii_ethtool_gset(&tp->mii, cmd); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); spin_unlock_irq(&tp->lock); return 0; } -static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8139_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct rtl8139_private *tp = netdev_priv(dev); int rc; spin_lock_irq(&tp->lock); - rc = mii_ethtool_sset(&tp->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd); spin_unlock_irq(&tp->lock); return rc; } @@ -2480,8 +2482,6 @@ static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) static const struct ethtool_ops rtl8139_ethtool_ops = { .get_drvinfo = rtl8139_get_drvinfo, - .get_settings = rtl8139_get_settings, - .set_settings = rtl8139_set_settings, .get_regs_len = rtl8139_get_regs_len, .get_regs = rtl8139_get_regs, .nway_reset = rtl8139_nway_reset, @@ -2493,6 +2493,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = { .get_strings = rtl8139_get_strings, .get_sset_count = rtl8139_get_sset_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, + .get_link_ksettings = rtl8139_get_link_ksettings, + .set_link_ksettings = rtl8139_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 81f18a8335276495a59fa93219c4607c2b8a47aa..0a8f2817ea60f2172eb28177473a4879f85bd18a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -817,7 +817,8 @@ struct rtl8169_private { } csi_ops; int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*get_link_ksettings)(struct net_device *, + struct ethtool_link_ksettings *); void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct net_device *); unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); @@ -2115,41 +2116,49 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 status; + u32 supported, advertising; - cmd->supported = + supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_INTERNAL; + cmd->base.port = PORT_FIBRE; status = RTL_R32(TBICSR); - cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->autoneg = !!(status & TBINwEnable); + advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->base.autoneg = !!(status & TBINwEnable); - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; /* Always set */ + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; /* Always set */ + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - return mii_ethtool_gset(&tp->mii, cmd); + return mii_ethtool_get_link_ksettings(&tp->mii, cmd); } -static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8169_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); int rc; rtl_lock_work(tp); - rc = tp->get_settings(dev, cmd); + rc = tp->get_link_ksettings(dev, cmd); rtl_unlock_work(tp); return rc; @@ -2356,7 +2365,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, - .get_settings = rtl8169_get_settings, .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, @@ -2368,6 +2376,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, + .get_link_ksettings = rtl8169_get_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -8351,14 +8360,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rtl_tbi_enabled(tp)) { tp->set_speed = rtl8169_set_speed_tbi; - tp->get_settings = rtl8169_gset_tbi; + tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; tp->phy_reset_enable = rtl8169_tbi_reset_enable; tp->phy_reset_pending = rtl8169_tbi_reset_pending; tp->link_ok = rtl8169_tbi_link_ok; tp->do_ioctl = rtl_tbi_ioctl; } else { tp->set_speed = rtl8169_set_speed_xmii; - tp->get_settings = rtl8169_gset_xmii; + tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; tp->phy_reset_enable = rtl8169_xmii_reset_enable; tp->phy_reset_pending = rtl8169_xmii_reset_pending; tp->link_ok = rtl8169_xmii_link_ok; @@ -8444,9 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? ~(RxBOVF | RxFOVF) : ~0; - init_timer(&tp->timer); - tp->timer.data = (unsigned long) dev; - tp->timer.function = rtl8169_phy_timer; + setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev); tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 0f63a44a955deb4de7b8da6c69b5dd6125d19dad..bab13613b138cc15c734d9e9fff5f465ef480a44 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1115,7 +1116,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { - struct ethtool_cmd *ecmd = priv; + struct ethtool_link_ksettings *ecmd = priv; const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; u32 speed; @@ -1137,13 +1138,14 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = SUPPORTED_TP; - ecmd->phy_address = 0xff; - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; - ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + + ecmd->base.phy_address = 0xff; + ecmd->base.port = PORT_TP; + ecmd->base.speed = speed; + ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } @@ -1250,7 +1252,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { - struct ethtool_cmd *ecmd = priv; + struct ethtool_link_ksettings *ecmd = priv; struct rocker_tlv *cmd_info; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, @@ -1263,13 +1265,13 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, - ethtool_cmd_speed(ecmd))) + ecmd->base.speed)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, - ecmd->duplex)) + ecmd->base.duplex)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, - ecmd->autoneg)) + ecmd->base.autoneg)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; @@ -1347,8 +1349,9 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, return 0; } -static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) +static int +rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_link_ksettings *ecmd) { return rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_settings_prep, NULL, @@ -1373,12 +1376,17 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, rocker_cmd_get_port_settings_mode_proc, p_mode); } -static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) +static int +rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + const struct ethtool_link_ksettings *ecmd) { + struct ethtool_link_ksettings copy_ecmd; + + memcpy(©_ecmd, ecmd, sizeof(copy_ecmd)); + return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_settings_ethtool_prep, - ecmd, NULL, NULL); + ©_ecmd, NULL, NULL); } static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, @@ -2168,7 +2176,10 @@ static const struct switchdev_ops rocker_port_switchdev_ops = { struct rocker_fib_event_work { struct work_struct work; - struct fib_entry_notifier_info fen_info; + union { + struct fib_entry_notifier_info fen_info; + struct fib_rule_notifier_info fr_info; + }; struct rocker *rocker; unsigned long event; }; @@ -2178,6 +2189,7 @@ static void rocker_router_fib_event_work(struct work_struct *work) struct rocker_fib_event_work *fib_work = container_of(work, struct rocker_fib_event_work, work); struct rocker *rocker = fib_work->rocker; + struct fib_rule *rule; int err; /* Protect internal structures from changes */ @@ -2195,7 +2207,10 @@ static void rocker_router_fib_event_work(struct work_struct *work) break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - rocker_world_fib4_abort(rocker); + rule = fib_work->fr_info.rule; + if (!fib4_rule_default(rule)) + rocker_world_fib4_abort(rocker); + fib_rule_put(rule); break; } rtnl_unlock(); @@ -2226,6 +2241,11 @@ static int rocker_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; } queue_work(rocker->rocker_owq, &fib_work->work); @@ -2237,16 +2257,18 @@ static int rocker_router_fib_event(struct notifier_block *nb, * ethtool interface ********************/ -static int rocker_port_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int +rocker_port_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct rocker_port *rocker_port = netdev_priv(dev); return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); } -static int rocker_port_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int +rocker_port_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct rocker_port *rocker_port = netdev_priv(dev); @@ -2388,13 +2410,13 @@ static int rocker_port_get_sset_count(struct net_device *netdev, int sset) } static const struct ethtool_ops rocker_port_ethtool_ops = { - .get_settings = rocker_port_get_settings, - .set_settings = rocker_port_set_settings, .get_drvinfo = rocker_port_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = rocker_port_get_strings, .get_ethtool_stats = rocker_port_get_stats, .get_sset_count = rocker_port_get_sset_count, + .get_link_ksettings = rocker_port_get_link_ksettings, + .set_link_ksettings = rocker_port_set_link_ksettings, }; /***************** diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index c60c2d4c646a89610edd35ef40e6ef337d045d79..78efb2822b8648c6e6f02ffa764aad89570ffbd0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -119,6 +119,7 @@ struct efx_ef10_filter_table { bool mc_promisc; /* Whether in multicast promiscuous mode when last changed */ bool mc_promisc_last; + bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ bool vlan_filter; struct list_head vlan_list; }; @@ -5058,6 +5059,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) struct netdev_hw_addr *mc; unsigned int i, addr_count; + table->mc_overflow = false; table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); addr_count = netdev_mc_count(net_dev); @@ -5065,6 +5067,7 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { table->mc_promisc = true; + table->mc_overflow = true; break; } ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); @@ -5469,12 +5472,15 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, } } else { /* If we failed to insert promiscuous filters, don't - * rollback. Regardless, also insert the mc_list + * rollback. Regardless, also insert the mc_list, + * unless it's incomplete due to overflow */ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, true, false); - efx_ef10_filter_insert_addr_list(efx, vlan, true, false); + if (!table->mc_overflow) + efx_ef10_filter_insert_addr_list(efx, vlan, + true, false); } } else { /* If any filters failed to insert, rollback and fall back to diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 104fb15a73f2074c145878f0466d8ff1fc650167..f6daf09b86272397d35bc72d59c5269b4644db1c 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -437,11 +437,13 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, if (ntc->type != TC_SETUP_MQPRIO) return -EINVAL; - num_tc = ntc->tc; + num_tc = ntc->mqprio->num_tc; if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) return -EINVAL; + ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ff88d60aa6d5650d04f46938eaf6abc63c4ff568..3bdf87f310877a31fee219afa3de3dea91e40521 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -665,11 +665,13 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, if (ntc->type != TC_SETUP_MQPRIO) return -EINVAL; - num_tc = ntc->tc; + num_tc = ntc->mqprio->num_tc; if (num_tc > EFX_MAX_TX_TC) return -EINVAL; + ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 57e6cef81ebec9d566a860424f4f83d7bb602663..52ead5524de76b8de6581c2fea8bc69d6ccebcb7 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1558,25 +1558,27 @@ static void ioc3_get_drvinfo (struct net_device *dev, strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); } -static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ioc3_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); - rc = mii_ethtool_gset(&ip->mii, cmd); + rc = mii_ethtool_get_link_ksettings(&ip->mii, cmd); spin_unlock_irq(&ip->ioc3_lock); return rc; } -static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ioc3_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); - rc = mii_ethtool_sset(&ip->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd); spin_unlock_irq(&ip->ioc3_lock); return rc; @@ -1608,10 +1610,10 @@ static u32 ioc3_get_link(struct net_device *dev) static const struct ethtool_ops ioc3_ethtool_ops = { .get_drvinfo = ioc3_get_drvinfo, - .get_settings = ioc3_get_settings, - .set_settings = ioc3_set_settings, .nway_reset = ioc3_nway_reset, .get_link = ioc3_get_link, + .get_link_ksettings = ioc3_get_link_ksettings, + .set_link_ksettings = ioc3_set_link_ksettings, }; static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 6c2e2b311c16d396f3969a90183648d7e77a7858..751c81848f3557c2c019f3d3f59e14a9fb247034 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1122,14 +1122,16 @@ static void sc92031_poll_controller(struct net_device *dev) } #endif -static int sc92031_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +sc92031_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; u8 phy_address; u32 phy_ctrl; u16 output_status; + u32 supported, advertising; spin_lock_bh(&priv->lock); @@ -1142,68 +1144,77 @@ static int sc92031_ethtool_get_settings(struct net_device *dev, spin_unlock_bh(&priv->lock); - cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; - cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + advertising = ADVERTISED_TP | ADVERTISED_MII; if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) - cmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) - cmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) == (PhyCtrlSpd10 | PhyCtrlDux)) - cmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) - cmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) == (PhyCtrlSpd100 | PhyCtrlDux)) - cmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (phy_ctrl & PhyCtrlAne) - cmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; - ethtool_cmd_speed_set(cmd, - (output_status & 0x2) ? SPEED_100 : SPEED_10); - cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; - cmd->port = PORT_MII; - cmd->phy_address = phy_address; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10; + cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; + cmd->base.port = PORT_MII; + cmd->base.phy_address = phy_address; + cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int sc92031_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +sc92031_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sc92031_priv *priv = netdev_priv(dev); void __iomem *port_base = priv->port_base; - u32 speed = ethtool_cmd_speed(cmd); + u32 speed = cmd->base.speed; u32 phy_ctrl; u32 old_phy_ctrl; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); if (!(speed == SPEED_10 || speed == SPEED_100)) return -EINVAL; - if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) - return -EINVAL; - if (!(cmd->port == PORT_MII)) + if (!(cmd->base.duplex == DUPLEX_HALF || + cmd->base.duplex == DUPLEX_FULL)) return -EINVAL; - if (!(cmd->phy_address == 0x1f)) + if (!(cmd->base.port == PORT_MII)) return -EINVAL; - if (!(cmd->transceiver == XCVR_INTERNAL)) + if (!(cmd->base.phy_address == 0x1f)) return -EINVAL; - if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) + if (!(cmd->base.autoneg == AUTONEG_DISABLE || + cmd->base.autoneg == AUTONEG_ENABLE)) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - if (!(cmd->advertising & (ADVERTISED_Autoneg + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (!(advertising & (ADVERTISED_Autoneg | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_10baseT_Full @@ -1213,15 +1224,15 @@ static int sc92031_ethtool_set_settings(struct net_device *dev, phy_ctrl = PhyCtrlAne; // FIXME: I'm not sure what the original code was trying to do - if (cmd->advertising & ADVERTISED_Autoneg) + if (advertising & ADVERTISED_Autoneg) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; - if (cmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; - if (cmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) phy_ctrl |= PhyCtrlSpd100; - if (cmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; - if (cmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) phy_ctrl |= PhyCtrlSpd10; } else { // FIXME: Whole branch guessed @@ -1232,7 +1243,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev, else /* cmd->speed == SPEED_100 */ phy_ctrl |= PhyCtrlSpd100; - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) phy_ctrl |= PhyCtrlDux; } @@ -1368,8 +1379,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops sc92031_ethtool_ops = { - .get_settings = sc92031_ethtool_get_settings, - .set_settings = sc92031_ethtool_set_settings, .get_wol = sc92031_ethtool_get_wol, .set_wol = sc92031_ethtool_set_wol, .nway_reset = sc92031_ethtool_nway_reset, @@ -1377,6 +1386,8 @@ static const struct ethtool_ops sc92031_ethtool_ops = { .get_strings = sc92031_ethtool_get_strings, .get_sset_count = sc92031_ethtool_get_sset_count, .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, + .get_link_ksettings = sc92031_ethtool_get_link_ksettings, + .set_link_ksettings = sc92031_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 210e35d079dd88672fe5c419955a04913d49ad62..02da106c6e04e8a59ff001575fd29cda851435cf 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1734,18 +1734,20 @@ static void sis190_set_speed_auto(struct net_device *dev) BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); } -static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int sis190_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct sis190_private *tp = netdev_priv(dev); - return mii_ethtool_gset(&tp->mii_if, cmd); + return mii_ethtool_get_link_ksettings(&tp->mii_if, cmd); } -static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int sis190_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct sis190_private *tp = netdev_priv(dev); - return mii_ethtool_sset(&tp->mii_if, cmd); + return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd); } static void sis190_get_drvinfo(struct net_device *dev, @@ -1797,8 +1799,6 @@ static void sis190_set_msglevel(struct net_device *dev, u32 value) } static const struct ethtool_ops sis190_ethtool_ops = { - .get_settings = sis190_get_settings, - .set_settings = sis190_set_settings, .get_drvinfo = sis190_get_drvinfo, .get_regs_len = sis190_get_regs_len, .get_regs = sis190_get_regs, @@ -1806,6 +1806,8 @@ static const struct ethtool_ops sis190_ethtool_ops = { .get_msglevel = sis190_get_msglevel, .set_msglevel = sis190_set_msglevel, .nway_reset = sis190_nway_reset, + .get_link_ksettings = sis190_get_link_ksettings, + .set_link_ksettings = sis190_set_link_ksettings, }; static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 1b6f6171d0788e74b939622d284880f5c083742e..40bd88362e3d94650f7eb9273efe2e5850b61312 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2035,23 +2035,23 @@ static u32 sis900_get_link(struct net_device *net_dev) return mii_link_ok(&sis_priv->mii_info); } -static int sis900_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +static int sis900_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { struct sis900_private *sis_priv = netdev_priv(net_dev); spin_lock_irq(&sis_priv->lock); - mii_ethtool_gset(&sis_priv->mii_info, cmd); + mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd); spin_unlock_irq(&sis_priv->lock); return 0; } -static int sis900_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +static int sis900_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { struct sis900_private *sis_priv = netdev_priv(net_dev); int rt; spin_lock_irq(&sis_priv->lock); - rt = mii_ethtool_sset(&sis_priv->mii_info, cmd); + rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd); spin_unlock_irq(&sis_priv->lock); return rt; } @@ -2129,11 +2129,11 @@ static const struct ethtool_ops sis900_ethtool_ops = { .get_msglevel = sis900_get_msglevel, .set_msglevel = sis900_set_msglevel, .get_link = sis900_get_link, - .get_settings = sis900_get_settings, - .set_settings = sis900_set_settings, .nway_reset = sis900_nway_reset, .get_wol = sis900_get_wol, - .set_wol = sis900_set_wol + .set_wol = sis900_set_wol, + .get_link_ksettings = sis900_get_link_ksettings, + .set_link_ksettings = sis900_set_link_ksettings, }; /** diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 5f2737189c724eea48ffdd8d8a48f520a209f3e7..db6dcb06193d3dbce7243287800f54c28b198d7d 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1387,25 +1387,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct epic_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_gset(&np->mii, cmd); + rc = mii_ethtool_get_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct epic_private *np = netdev_priv(dev); int rc; spin_lock_irq(&np->lock); - rc = mii_ethtool_sset(&np->mii, cmd); + rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); spin_unlock_irq(&np->lock); return rc; @@ -1460,14 +1462,14 @@ static void ethtool_complete(struct net_device *dev) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, .begin = ethtool_begin, - .complete = ethtool_complete + .complete = ethtool_complete, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 4f19c6166182e780b77222c566390c5acdf2ad3e..36307d34f64181d03e744352d56681b33b111b1f 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1446,40 +1446,40 @@ static int smc911x_close(struct net_device *dev) * Ethtool support */ static int -smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +smc911x_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct smc911x_local *lp = netdev_priv(dev); int ret, status; unsigned long flags; + u32 supported; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_gset(&lp->mii, cmd); + ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd); spin_unlock_irqrestore(&lp->lock, flags); } else { - cmd->supported = SUPPORTED_10baseT_Half | + supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP | SUPPORTED_AUI; if (lp->ctl_rspeed == 10) - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; else if (lp->ctl_rspeed == 100) - ethtool_cmd_speed_set(cmd, SPEED_100); - - cmd->autoneg = AUTONEG_DISABLE; - if (lp->mii.phy_id==1) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->port = 0; + cmd->base.speed = SPEED_100; + + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = 0; SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); - cmd->duplex = + cmd->base.duplex = (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? DUPLEX_FULL : DUPLEX_HALF; + + ethtool_convert_legacy_u32_to_link_mode( + cmd->link_modes.supported, supported); + ret = 0; } @@ -1487,7 +1487,8 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) } static int -smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +smc911x_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct smc911x_local *lp = netdev_priv(dev); int ret; @@ -1495,16 +1496,18 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) if (lp->phy_type != 0) { spin_lock_irqsave(&lp->lock, flags); - ret = mii_ethtool_sset(&lp->mii, cmd); + ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd); spin_unlock_irqrestore(&lp->lock, flags); } else { - if (cmd->autoneg != AUTONEG_DISABLE || - cmd->speed != SPEED_10 || - (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || - (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + if (cmd->base.autoneg != AUTONEG_DISABLE || + cmd->base.speed != SPEED_10 || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL) || + (cmd->base.port != PORT_TP && + cmd->base.port != PORT_AUI)) return -EINVAL; - lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL; ret = 0; } @@ -1686,8 +1689,6 @@ static int smc911x_ethtool_geteeprom_len(struct net_device *dev) } static const struct ethtool_ops smc911x_ethtool_ops = { - .get_settings = smc911x_ethtool_getsettings, - .set_settings = smc911x_ethtool_setsettings, .get_drvinfo = smc911x_ethtool_getdrvinfo, .get_msglevel = smc911x_ethtool_getmsglevel, .set_msglevel = smc911x_ethtool_setmsglevel, @@ -1698,6 +1699,8 @@ static const struct ethtool_ops smc911x_ethtool_ops = { .get_eeprom_len = smc911x_ethtool_geteeprom_len, .get_eeprom = smc911x_ethtool_geteeprom, .set_eeprom = smc911x_ethtool_seteeprom, + .get_link_ksettings = smc911x_ethtool_get_link_ksettings, + .set_link_ksettings = smc911x_ethtool_set_link_ksettings, }; /* diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 97280daba27f7f8349eaaabc71e4514c33d9fcf3..976aa876789a5731df5f50136ff7284dd2ab1258 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1843,56 +1843,60 @@ static int smc_link_ok(struct net_device *dev) } } -static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int smc_netdev_get_ecmd(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { - u16 tmp; - unsigned int ioaddr = dev->base_addr; + u16 tmp; + unsigned int ioaddr = dev->base_addr; + u32 supported; - ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | - SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); - - SMC_SELECT_BANK(1); - tmp = inw(ioaddr + CONFIG); - ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; - ethtool_cmd_speed_set(ecmd, SPEED_10); - ecmd->phy_address = ioaddr + MGMT; + supported = (SUPPORTED_TP | SUPPORTED_AUI | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); - SMC_SELECT_BANK(0); - tmp = inw(ioaddr + TCR); - ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; + SMC_SELECT_BANK(1); + tmp = inw(ioaddr + CONFIG); + ecmd->base.port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; + ecmd->base.speed = SPEED_10; + ecmd->base.phy_address = ioaddr + MGMT; - return 0; + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + ecmd->base.duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + + return 0; } -static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +static int smc_netdev_set_ecmd(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { - u16 tmp; - unsigned int ioaddr = dev->base_addr; + u16 tmp; + unsigned int ioaddr = dev->base_addr; - if (ethtool_cmd_speed(ecmd) != SPEED_10) - return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) - return -EINVAL; - if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) - return -EINVAL; - if (ecmd->transceiver != XCVR_INTERNAL) - return -EINVAL; + if (ecmd->base.speed != SPEED_10) + return -EINVAL; + if (ecmd->base.duplex != DUPLEX_HALF && + ecmd->base.duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->base.port != PORT_TP && ecmd->base.port != PORT_AUI) + return -EINVAL; - if (ecmd->port == PORT_AUI) - smc_set_xcvr(dev, 1); - else - smc_set_xcvr(dev, 0); + if (ecmd->base.port == PORT_AUI) + smc_set_xcvr(dev, 1); + else + smc_set_xcvr(dev, 0); - SMC_SELECT_BANK(0); - tmp = inw(ioaddr + TCR); - if (ecmd->duplex == DUPLEX_FULL) - tmp |= TCR_FDUPLX; - else - tmp &= ~TCR_FDUPLX; - outw(tmp, ioaddr + TCR); - - return 0; + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + if (ecmd->base.duplex == DUPLEX_FULL) + tmp |= TCR_FDUPLX; + else + tmp &= ~TCR_FDUPLX; + outw(tmp, ioaddr + TCR); + + return 0; } static int check_if_running(struct net_device *dev) @@ -1908,7 +1912,8 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int smc_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { struct smc_private *smc = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; @@ -1919,7 +1924,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) - ret = mii_ethtool_gset(&smc->mii_if, ecmd); + ret = mii_ethtool_get_link_ksettings(&smc->mii_if, ecmd); else ret = smc_netdev_get_ecmd(dev, ecmd); SMC_SELECT_BANK(saved_bank); @@ -1927,7 +1932,8 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) return ret; } -static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int smc_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { struct smc_private *smc = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; @@ -1938,7 +1944,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) - ret = mii_ethtool_sset(&smc->mii_if, ecmd); + ret = mii_ethtool_set_link_ksettings(&smc->mii_if, ecmd); else ret = smc_netdev_set_ecmd(dev, ecmd); SMC_SELECT_BANK(saved_bank); @@ -1982,10 +1988,10 @@ static int smc_nway_reset(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = smc_get_drvinfo, - .get_settings = smc_get_settings, - .set_settings = smc_set_settings, .get_link = smc_get_link, .nway_reset = smc_nway_reset, + .get_link_ksettings = smc_get_link_ksettings, + .set_link_ksettings = smc_set_link_ksettings, }; static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 01a8c020d6db193a67c2a74bdb522b245ea5ac99..e93c40b4631ecfd706cb0b7c16d58fa5089aa35f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -26,12 +26,15 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { - struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int entry = priv->cur_tx; - struct dma_desc *desc = priv->dma_tx + entry; + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; unsigned int bmax, des2; unsigned int i = 1, len; + struct dma_desc *desc; + + desc = tx_q->dma_tx + entry; if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; @@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = cpu_to_le32(des2); if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = bmax; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; /* do not close the descriptor and do not set own bit */ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, - 0, false); + 0, false, skb->len); while (len != 0) { - priv->tx_skbuff[entry] = NULL; + tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - desc = priv->dma_tx + entry; + desc = tx_q->dma_tx + entry; if (len > bmax) { des2 = dma_map_single(priv->device, @@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = cpu_to_le32(des2); if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = bmax; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, STMMAC_CHAIN_MODE, 1, - false); + false, skb->len); len -= bmax; i++; } else { @@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = cpu_to_le32(des2); if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; /* last descriptor can be set now */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_CHAIN_MODE, 1, - true); + true, skb->len); len = 0; } } - priv->cur_tx = entry; + tx_q->cur_tx = entry; return entry; } @@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; + struct stmmac_priv *priv = rx_q->priv_data; if (priv->hwts_rx_en && !priv->extend_desc) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ - p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy + - (((priv->dirty_rx) + 1) % + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + + (((rx_q->dirty_rx) + 1) % DMA_RX_SIZE) * sizeof(struct dma_desc))); } static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; - unsigned int entry = priv->dirty_tx; + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && priv->hwts_tx_en) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ - p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy + - ((priv->dirty_tx + 1) % DMA_TX_SIZE)) + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + + ((tx_q->dirty_tx + 1) % DMA_TX_SIZE)) * sizeof(struct dma_desc))); } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 04d9245b7149ce663b0827a158901a10b58ea701..b7ce3fbb53757a4cb4dd7405e1683d0fc784870c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -246,6 +246,15 @@ struct stmmac_extra_stats { #define STMMAC_TX_MAX_FRAMES 256 #define STMMAC_TX_FRAMES 64 +/* Packets types */ +enum packets_types { + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */ + PACKET_PTPQ = 0x2, /* PTP Packets */ + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */ + PACKET_UPQ = 0x4, /* Untagged Packets */ + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */ +}; + /* Rx IPC status */ enum rx_frame_status { good_frame = 0x0, @@ -324,6 +333,9 @@ struct dma_features { unsigned int number_tx_queues; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; + /* TX and RX FIFO sizes */ + unsigned int tx_fifo_size; + unsigned int rx_fifo_size; }; /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ @@ -361,7 +373,7 @@ struct stmmac_desc_ops { /* Invoked by the xmit function to prepare the tx descriptor */ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls); + bool ls, unsigned int tot_pkt_len); void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, int len2, bool tx_own, bool ls, unsigned int tcphdrlen, @@ -413,6 +425,14 @@ struct stmmac_dma_ops { int (*reset)(void __iomem *ioaddr); void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 dma_tx, u32 dma_rx, int atds); + void (*init_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan); + void (*init_rx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan); + void (*init_tx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan); /* Configure the AXI Bus Mode Register */ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ @@ -421,25 +441,28 @@ struct stmmac_dma_ops { * An invalid value enables the store-and-forward mode */ void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, int rxfifosz); + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel); /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr); void (*enable_dma_transmission) (void __iomem *ioaddr); - void (*enable_dma_irq) (void __iomem *ioaddr); - void (*disable_dma_irq) (void __iomem *ioaddr); - void (*start_tx) (void __iomem *ioaddr); - void (*stop_tx) (void __iomem *ioaddr); - void (*start_rx) (void __iomem *ioaddr); - void (*stop_rx) (void __iomem *ioaddr); + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*start_tx)(void __iomem *ioaddr, u32 chan); + void (*stop_tx)(void __iomem *ioaddr, u32 chan); + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); int (*dma_interrupt) (void __iomem *ioaddr, - struct stmmac_extra_stats *x); + struct stmmac_extra_stats *x, u32 chan); /* If supported then get the optional core features */ void (*get_hw_feature)(void __iomem *ioaddr, struct dma_features *dma_cap); /* Program the HW RX Watchdog */ - void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); @@ -451,20 +474,44 @@ struct mac_device_info; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, int mtu); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ int (*rx_ipc)(struct mac_device_info *hw); /* Enable RX Queues */ - void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue); + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); + /* RX Queues Priority */ + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* TX Queues Priority */ + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* RX Queues Routing */ + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, + u32 queue); + /* Program RX Algorithms */ + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); + /* Program TX Algorithms */ + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); + /* Set MTL TX queues weight */ + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, + u32 weight, u32 queue); + /* RX MTL queue to RX dma mapping */ + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); + /* Configure AV Algorithm */ + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, + u32 idle_slope, u32 high_credit, u32 low_credit, + u32 queue); /* Dump MAC registers */ void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); /* Handle extra events on specific interrupts hw dependent */ int (*host_irq_status)(struct mac_device_info *hw, struct stmmac_extra_stats *x); + /* Handle MTL interrupts */ + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); /* Multicast filter setting */ void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); /* Flow control setting */ void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time); + unsigned int fc, unsigned int pause_time, u32 tx_cnt); /* Set power management mode (e.g. magic frame) */ void (*pmt)(struct mac_device_info *hw, unsigned long mode); /* Set/Get Unicast MAC addresses */ @@ -477,7 +524,8 @@ struct stmmac_ops { void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues); /* PCS calls */ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, bool loopback); @@ -547,6 +595,11 @@ struct mac_device_info { unsigned int ps; }; +struct stmmac_rx_routing { + u32 reg_mask; + u32 reg_shift; +}; + struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, int perfect_uc_entries, int *synopsys_id); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 1a3fa3d9f85549c9b5cc10064c6aaac799184adc..dd6a2f9791cc11a390d71bcb5a1b071cd1bca068 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -14,16 +14,34 @@ #include #include #include +#include #include #include +#include #include #include +#include #include #include #include +#include #include #include "stmmac_platform.h" +#include "dwmac4.h" + +struct tegra_eqos { + struct device *dev; + void __iomem *regs; + + struct reset_control *rst; + struct clk *clk_master; + struct clk *clk_slave; + struct clk *clk_tx; + struct clk *clk_rx; + + struct gpio_desc *reset; +}; static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat) @@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, return 0; } +static void *dwc_qos_probe(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *stmmac_res) +{ + int err; + + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(plat_dat->stmmac_clk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + return ERR_CAST(plat_dat->stmmac_clk); + } + + err = clk_prepare_enable(plat_dat->stmmac_clk); + if (err < 0) { + dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n", + err); + return ERR_PTR(err); + } + + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(plat_dat->pclk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + err = PTR_ERR(plat_dat->pclk); + goto disable; + } + + err = clk_prepare_enable(plat_dat->pclk); + if (err < 0) { + dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", + err); + goto disable; + } + + return NULL; + +disable: + clk_disable_unprepare(plat_dat->stmmac_clk); + return ERR_PTR(err); +} + +static int dwc_qos_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); + + return 0; +} + +#define SDMEMCOMPPADCTRL 0x8800 +#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31) + +#define AUTO_CAL_CONFIG 0x8804 +#define AUTO_CAL_CONFIG_START BIT(31) +#define AUTO_CAL_CONFIG_ENABLE BIT(29) + +#define AUTO_CAL_STATUS 0x880c +#define AUTO_CAL_STATUS_ACTIVE BIT(31) + +static void tegra_eqos_fix_speed(void *priv, unsigned int speed) +{ + struct tegra_eqos *eqos = priv; + unsigned long rate = 125000000; + bool needs_calibration = false; + u32 value; + int err; + + switch (speed) { + case SPEED_1000: + needs_calibration = true; + rate = 125000000; + break; + + case SPEED_100: + needs_calibration = true; + rate = 25000000; + break; + + case SPEED_10: + rate = 2500000; + break; + + default: + dev_err(eqos->dev, "invalid speed %u\n", speed); + break; + } + + if (needs_calibration) { + /* calibrate */ + value = readl(eqos->regs + SDMEMCOMPPADCTRL); + value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; + writel(value, eqos->regs + SDMEMCOMPPADCTRL); + + udelay(1); + + value = readl(eqos->regs + AUTO_CAL_CONFIG); + value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE; + writel(value, eqos->regs + AUTO_CAL_CONFIG); + + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, + value, + value & AUTO_CAL_STATUS_ACTIVE, + 1, 10); + if (err < 0) { + dev_err(eqos->dev, "calibration did not start\n"); + goto failed; + } + + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS, + value, + (value & AUTO_CAL_STATUS_ACTIVE) == 0, + 20, 200); + if (err < 0) { + dev_err(eqos->dev, "calibration didn't finish\n"); + goto failed; + } + + failed: + value = readl(eqos->regs + SDMEMCOMPPADCTRL); + value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; + writel(value, eqos->regs + SDMEMCOMPPADCTRL); + } else { + value = readl(eqos->regs + AUTO_CAL_CONFIG); + value &= ~AUTO_CAL_CONFIG_ENABLE; + writel(value, eqos->regs + AUTO_CAL_CONFIG); + } + + err = clk_set_rate(eqos->clk_tx, rate); + if (err < 0) + dev_err(eqos->dev, "failed to set TX rate: %d\n", err); +} + +static int tegra_eqos_init(struct platform_device *pdev, void *priv) +{ + struct tegra_eqos *eqos = priv; + unsigned long rate; + u32 value; + + rate = clk_get_rate(eqos->clk_slave); + + value = (rate / 1000000) - 1; + writel(value, eqos->regs + GMAC_1US_TIC_COUNTER); + + return 0; +} + +static void *tegra_eqos_probe(struct platform_device *pdev, + struct plat_stmmacenet_data *data, + struct stmmac_resources *res) +{ + struct tegra_eqos *eqos; + int err; + + eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL); + if (!eqos) { + err = -ENOMEM; + goto error; + } + + eqos->dev = &pdev->dev; + eqos->regs = res->addr; + + eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus"); + if (IS_ERR(eqos->clk_master)) { + err = PTR_ERR(eqos->clk_master); + goto error; + } + + err = clk_prepare_enable(eqos->clk_master); + if (err < 0) + goto error; + + eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus"); + if (IS_ERR(eqos->clk_slave)) { + err = PTR_ERR(eqos->clk_slave); + goto disable_master; + } + + data->stmmac_clk = eqos->clk_slave; + + err = clk_prepare_enable(eqos->clk_slave); + if (err < 0) + goto disable_master; + + eqos->clk_rx = devm_clk_get(&pdev->dev, "rx"); + if (IS_ERR(eqos->clk_rx)) { + err = PTR_ERR(eqos->clk_rx); + goto disable_slave; + } + + err = clk_prepare_enable(eqos->clk_rx); + if (err < 0) + goto disable_slave; + + eqos->clk_tx = devm_clk_get(&pdev->dev, "tx"); + if (IS_ERR(eqos->clk_tx)) { + err = PTR_ERR(eqos->clk_tx); + goto disable_rx; + } + + err = clk_prepare_enable(eqos->clk_tx); + if (err < 0) + goto disable_rx; + + eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH); + if (IS_ERR(eqos->reset)) { + err = PTR_ERR(eqos->reset); + goto disable_tx; + } + + usleep_range(2000, 4000); + gpiod_set_value(eqos->reset, 0); + + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); + if (IS_ERR(eqos->rst)) { + err = PTR_ERR(eqos->rst); + goto reset_phy; + } + + err = reset_control_assert(eqos->rst); + if (err < 0) + goto reset_phy; + + usleep_range(2000, 4000); + + err = reset_control_deassert(eqos->rst); + if (err < 0) + goto reset_phy; + + usleep_range(2000, 4000); + + data->fix_mac_speed = tegra_eqos_fix_speed; + data->init = tegra_eqos_init; + data->bsp_priv = eqos; + + err = tegra_eqos_init(pdev, eqos); + if (err < 0) + goto reset; + +out: + return eqos; + +reset: + reset_control_assert(eqos->rst); +reset_phy: + gpiod_set_value(eqos->reset, 1); +disable_tx: + clk_disable_unprepare(eqos->clk_tx); +disable_rx: + clk_disable_unprepare(eqos->clk_rx); +disable_slave: + clk_disable_unprepare(eqos->clk_slave); +disable_master: + clk_disable_unprepare(eqos->clk_master); +error: + eqos = ERR_PTR(err); + goto out; +} + +static int tegra_eqos_remove(struct platform_device *pdev) +{ + struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev); + + reset_control_assert(eqos->rst); + gpiod_set_value(eqos->reset, 1); + clk_disable_unprepare(eqos->clk_tx); + clk_disable_unprepare(eqos->clk_rx); + clk_disable_unprepare(eqos->clk_slave); + clk_disable_unprepare(eqos->clk_master); + + return 0; +} + +struct dwc_eth_dwmac_data { + void *(*probe)(struct platform_device *pdev, + struct plat_stmmacenet_data *data, + struct stmmac_resources *res); + int (*remove)(struct platform_device *pdev); +}; + +static const struct dwc_eth_dwmac_data dwc_qos_data = { + .probe = dwc_qos_probe, + .remove = dwc_qos_remove, +}; + +static const struct dwc_eth_dwmac_data tegra_eqos_data = { + .probe = tegra_eqos_probe, + .remove = tegra_eqos_remove, +}; + static int dwc_eth_dwmac_probe(struct platform_device *pdev) { + const struct dwc_eth_dwmac_data *data; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct resource *res; + void *priv; int ret; + data = of_device_get_match_data(&pdev->dev); + memset(&stmmac_res, 0, sizeof(struct stmmac_resources)); /** @@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(plat_dat->stmmac_clk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - ret = PTR_ERR(plat_dat->stmmac_clk); - plat_dat->stmmac_clk = NULL; - goto err_remove_config_dt; - } - clk_prepare_enable(plat_dat->stmmac_clk); - - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(plat_dat->pclk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - ret = PTR_ERR(plat_dat->pclk); - plat_dat->pclk = NULL; - goto err_out_clk_dis_phy; + priv = data->probe(pdev, plat_dat, &stmmac_res); + if (IS_ERR(priv)) { + ret = PTR_ERR(priv); + dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret); + goto remove_config; } - clk_prepare_enable(plat_dat->pclk); ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); if (ret) - goto err_out_clk_dis_aper; + goto remove; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_out_clk_dis_aper; + goto remove; - return 0; + return ret; -err_out_clk_dis_aper: - clk_disable_unprepare(plat_dat->pclk); -err_out_clk_dis_phy: - clk_disable_unprepare(plat_dat->stmmac_clk); -err_remove_config_dt: +remove: + data->remove(pdev); +remove_config: stmmac_remove_config_dt(pdev, plat_dat); return ret; @@ -178,11 +479,29 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) static int dwc_eth_dwmac_remove(struct platform_device *pdev) { - return stmmac_pltfr_remove(pdev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + const struct dwc_eth_dwmac_data *data; + int err; + + data = of_device_get_match_data(&pdev->dev); + + err = stmmac_dvr_remove(&pdev->dev); + if (err < 0) + dev_err(&pdev->dev, "failed to remove platform: %d\n", err); + + err = data->remove(pdev); + if (err < 0) + dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err); + + stmmac_remove_config_dt(pdev, priv->plat); + + return err; } static const struct of_device_id dwc_eth_dwmac_match[] = { - { .compatible = "snps,dwc-qos-ethernet-4.10", }, + { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data }, + { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data }, { } }; MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index e5db6ac362354317bff2ecdf0d0344a26831752a..f0df5193f047ba534ace8e012df673f5c6cad83f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -74,6 +74,10 @@ struct rk_priv_data { #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) #define GRF_CLR_BIT(nr) (BIT(nr+16)) +#define DELAY_ENABLE(soc, tx, rx) \ + (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ + ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) + #define RK3228_GRF_MAC_CON0 0x0900 #define RK3228_GRF_MAC_CON1 0x0904 @@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, RK3228_GMAC_PHY_INTF_SEL_RGMII | RK3228_GMAC_RMII_MODE_CLR | - RK3228_GMAC_RXCLK_DLY_ENABLE | - RK3228_GMAC_TXCLK_DLY_ENABLE); + DELAY_ENABLE(RK3228, tx_delay, rx_delay)); regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | @@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3, - RK3288_GMAC_RXCLK_DLY_ENABLE | - RK3288_GMAC_TXCLK_DLY_ENABLE | + DELAY_ENABLE(RK3288, tx_delay, rx_delay) | RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) | RK3288_GMAC_CLK_TX_DL_CFG(tx_delay)); } @@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3366_GMAC_PHY_INTF_SEL_RGMII | RK3366_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7, - RK3366_GMAC_RXCLK_DLY_ENABLE | - RK3366_GMAC_TXCLK_DLY_ENABLE | + DELAY_ENABLE(RK3366, tx_delay, rx_delay) | RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) | RK3366_GMAC_CLK_TX_DL_CFG(tx_delay)); } @@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3368_GMAC_PHY_INTF_SEL_RGMII | RK3368_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16, - RK3368_GMAC_RXCLK_DLY_ENABLE | - RK3368_GMAC_TXCLK_DLY_ENABLE | + DELAY_ENABLE(RK3368, tx_delay, rx_delay) | RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) | RK3368_GMAC_CLK_TX_DL_CFG(tx_delay)); } @@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6, - RK3399_GMAC_RXCLK_DLY_ENABLE | - RK3399_GMAC_TXCLK_DLY_ENABLE | + DELAY_ENABLE(RK3399, tx_delay, rx_delay) | RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) | RK3399_GMAC_CLK_TX_DL_CFG(tx_delay)); } @@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) return ret; /*rmii or rgmii*/ - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) { + switch (bsp_priv->phy_iface) { + case PHY_INTERFACE_MODE_RGMII: dev_info(dev, "init for RGMII\n"); bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay); - } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) { + break; + case PHY_INTERFACE_MODE_RGMII_ID: + dev_info(dev, "init for RGMII_ID\n"); + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0); + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + dev_info(dev, "init for RGMII_RXID\n"); + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0); + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + dev_info(dev, "init for RGMII_TXID\n"); + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay); + break; + case PHY_INTERFACE_MODE_RMII: dev_info(dev, "init for RMII\n"); bsp_priv->ops->set_to_rmii(bsp_priv); - } else { + break; + default: dev_err(dev, "NO interface defined!\n"); } @@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, unsigned int speed) struct rk_priv_data *bsp_priv = priv; struct device *dev = &bsp_priv->pdev->dev; - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) + switch (bsp_priv->phy_iface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: bsp_priv->ops->set_rgmii_speed(bsp_priv, speed); - else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + break; + case PHY_INTERFACE_MODE_RMII: bsp_priv->ops->set_rmii_speed(bsp_priv, speed); - else + break; + default: dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); + } } static int rk_gmac_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 19b9b308709953cc9327961d3eb3bea527848bb7..f3d9305e5f706bc407c51a5d98af6a28b6404f42 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time) + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) { void __iomem *ioaddr = hw->pcsr; /* Set flow such that DZPQ in Mac Register 6 is 0, @@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); } -static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) { u32 value = readl(ioaddr + GMAC_DEBUG); @@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, + .set_mac = stmmac_set_mac, .rx_ipc = dwmac1000_rx_ipc_enable, .dump_regs = dwmac1000_dump_regs, .host_irq_status = dwmac1000_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index d3654a4470461e1f44282fac163dbcd9b6827df6..471a9aa6ac94c14d46d4dcf2d956965948193c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr, dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; } -static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, + u32 number_chan) { writel(riwt, ioaddr + DMA_RX_WATCHDOG); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index e370ccec6176671d1717d24d88917b88f69b1bd2..1b360910548473a486372835b3e167456528d508 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct mac_device_info *hw, } static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time) + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) { void __iomem *ioaddr = hw->pcsr; unsigned int flow = MAC_FLOW_CTRL_ENABLE; @@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) static const struct stmmac_ops dwmac100_ops = { .core_init = dwmac100_core_init, + .set_mac = stmmac_set_mac, .rx_ipc = dwmac100_rx_ipc_enable, .dump_regs = dwmac100_dump_mac_regs, .host_irq_status = dwmac100_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index db45134fddf04e50c703254b8570b744004123f8..d74cedf2a397580aeb6c62737a35030e65053367 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -22,9 +22,15 @@ #define GMAC_HASH_TAB_32_63 0x00000014 #define GMAC_RX_FLOW_CTRL 0x00000090 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_TXQ_PRTY_MAP0 0x98 +#define GMAC_TXQ_PRTY_MAP1 0x9C #define GMAC_RXQ_CTRL0 0x000000a0 +#define GMAC_RXQ_CTRL1 0x000000a4 +#define GMAC_RXQ_CTRL2 0x000000a8 +#define GMAC_RXQ_CTRL3 0x000000ac #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 +#define GMAC_1US_TIC_COUNTER 0x000000dc #define GMAC_PCS_BASE 0x000000e0 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 @@ -38,6 +44,22 @@ #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) +/* RX Queues Routing */ +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0) +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4) +#define GMAC_RXQCTRL_PTPQ_SHIFT 4 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8) +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12) +#define GMAC_RXQCTRL_UPQ_SHIFT 12 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16) +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16 +#define GMAC_RXQCTRL_MCBCQEN BIT(20) +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20 +#define GMAC_RXQCTRL_TACPQE BIT(21) +#define GMAC_RXQCTRL_TACPQE_SHIFT 21 + /* MAC Packet Filtering */ #define GMAC_PACKET_FILTER_PR BIT(0) #define GMAC_PACKET_FILTER_HMC BIT(2) @@ -53,6 +75,14 @@ /* MAC Flow Control RX */ #define GMAC_RX_FLOW_CTRL_RFE BIT(0) +/* RX Queues Priorities */ +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8) + +/* TX Queues Priorities */ +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8)) +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8) + /* MAC Flow Control TX */ #define GMAC_TX_FLOW_CTRL_TFE BIT(1) #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 @@ -148,6 +178,8 @@ enum power_event { /* MAC HW features1 bitmap */ #define GMAC_HW_FEAT_AVSEL BIT(20) #define GMAC_HW_TSOEN BIT(18) +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) @@ -161,8 +193,25 @@ enum power_event { #define GMAC_HI_REG_AE BIT(31) /* MTL registers */ +#define MTL_OPERATION_MODE 0x00000c00 +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5) +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5) +#define MTL_OPERATION_SCHALG_SP (0x3 << 5) +#define MTL_OPERATION_RAA BIT(2) +#define MTL_OPERATION_RAA_SP (0x0 << 2) +#define MTL_OPERATION_RAA_WSP (0x1 << 2) + #define MTL_INT_STATUS 0x00000c20 -#define MTL_INT_Q0 BIT(0) +#define MTL_INT_QX(x) BIT(x) + +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */ +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */ +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0) +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0) +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x)) +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) #define MTL_CHAN_BASE_ADDR 0x00000d00 #define MTL_CHAN_BASE_OFFSET 0x40 @@ -180,6 +229,7 @@ enum power_event { #define MTL_OP_MODE_TSF BIT(1) #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16) +#define MTL_OP_MODE_TQS_SHIFT 16 #define MTL_OP_MODE_TTC_MASK 0x70 #define MTL_OP_MODE_TTC_SHIFT 4 @@ -193,6 +243,17 @@ enum power_event { #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20) +#define MTL_OP_MODE_RQS_SHIFT 20 + +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14) +#define MTL_OP_MODE_RFD_SHIFT 14 + +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8) +#define MTL_OP_MODE_RFA_SHIFT 8 + +#define MTL_OP_MODE_EHFC BIT(7) + #define MTL_OP_MODE_RTC_MASK 0x18 #define MTL_OP_MODE_RTC_SHIFT 3 @@ -201,6 +262,46 @@ enum power_event { #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) +/* MTL ETS Control register */ +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \ + ((x) * MTL_ETS_CTRL_BASE_OFFSET)) + +#define MTL_ETS_CTRL_CC BIT(3) +#define MTL_ETS_CTRL_AVALG BIT(2) + +/* MTL Queue Quantum Weight */ +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \ + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET)) +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0) + +/* MTL sendSlopeCredit register */ +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c +#define MTL_SEND_SLP_CRED_OFFSET 0x40 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \ + ((x) * MTL_SEND_SLP_CRED_OFFSET)) + +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0) + +/* MTL hiCredit register */ +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20 +#define MTL_HIGH_CRED_OFFSET 0x40 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \ + ((x) * MTL_HIGH_CRED_OFFSET)) + +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0) + +/* MTL loCredit register */ +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24 +#define MTL_LOW_CRED_OFFSET 0x40 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \ + ((x) * MTL_LOW_CRED_OFFSET)) + +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0) + /* MTL debug */ #define MTL_DEBUG_TXSTSFSTS BIT(5) #define MTL_DEBUG_TXFSTS BIT(4) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 1e79e6529c4a79a805663e2d65f2cec558f362e3..48793f2e93075a9cabd57b3a4e0ee86765368bae 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) writel(value, ioaddr + GMAC_INT_EN); } -static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue) +static void dwmac4_rx_queue_enable(struct mac_device_info *hw, + u8 mode, u32 queue) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); value &= GMAC_RX_QUEUE_CLEAR(queue); - value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + if (mode == MTL_QUEUE_AVB) + value |= GMAC_RX_AV_QUEUE_ENABLE(queue); + else if (mode == MTL_QUEUE_DCB) + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); writel(value, ioaddr + GMAC_RXQ_CTRL0); } +static void dwmac4_rx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & + GMAC_RXQCTRL_PSRQX_MASK(queue); + writel(value, ioaddr + base_register); +} + +static void dwmac4_tx_queue_priority(struct mac_device_info *hw, + u32 prio, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 base_register; + u32 value; + + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; + + value = readl(ioaddr + base_register); + + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & + GMAC_TXQCTRL_PSTQX_MASK(queue); + + writel(value, ioaddr + base_register); +} + +static void dwmac4_tx_queue_routing(struct mac_device_info *hw, + u8 packet, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + const struct stmmac_rx_routing route_possibilities[] = { + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, + }; + + value = readl(ioaddr + GMAC_RXQ_CTRL1); + + /* routing configuration */ + value &= ~route_possibilities[packet - 1].reg_mask; + value |= (queue << route_possibilities[packet-1].reg_shift) & + route_possibilities[packet - 1].reg_mask; + + /* some packets require extra ops */ + if (packet == PACKET_AVCPQ) { + value &= ~GMAC_RXQCTRL_TACPQE; + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; + } else if (packet == PACKET_MCBCQ) { + value &= ~GMAC_RXQCTRL_MCBCQEN; + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; + } + + writel(value, ioaddr + GMAC_RXQ_CTRL1); +} + +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, + u32 rx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_RAA; + switch (rx_alg) { + case MTL_RX_ALGORITHM_SP: + value |= MTL_OPERATION_RAA_SP; + break; + case MTL_RX_ALGORITHM_WSP: + value |= MTL_OPERATION_RAA_WSP; + break; + default: + break; + } + + writel(value, ioaddr + MTL_OPERATION_MODE); +} + +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, + u32 tx_alg) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_OPERATION_MODE); + + value &= ~MTL_OPERATION_SCHALG_MASK; + switch (tx_alg) { + case MTL_TX_ALGORITHM_WRR: + value |= MTL_OPERATION_SCHALG_WRR; + break; + case MTL_TX_ALGORITHM_WFQ: + value |= MTL_OPERATION_SCHALG_WFQ; + break; + case MTL_TX_ALGORITHM_DWRR: + value |= MTL_OPERATION_SCHALG_DWRR; + break; + case MTL_TX_ALGORITHM_SP: + value |= MTL_OPERATION_SCHALG_SP; + break; + default: + break; + } +} + +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, + u32 weight, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); + + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); +} + +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + if (queue < 4) + value = readl(ioaddr + MTL_RXQ_DMA_MAP0); + else + value = readl(ioaddr + MTL_RXQ_DMA_MAP1); + + if (queue == 0 || queue == 4) { + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; + value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); + } + + if (queue < 4) + writel(value, ioaddr + MTL_RXQ_DMA_MAP0); + else + writel(value, ioaddr + MTL_RXQ_DMA_MAP1); +} + +static void dwmac4_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + pr_debug("Queue %d configured as AVB. Parameters:\n", queue); + pr_debug("\tsend_slope: 0x%08x\n", send_slope); + pr_debug("\tidle_slope: 0x%08x\n", idle_slope); + pr_debug("\thigh_credit: 0x%08x\n", high_credit); + pr_debug("\tlow_credit: 0x%08x\n", low_credit); + + /* enable AV algorithm */ + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + value |= MTL_ETS_CTRL_AVALG; + value |= MTL_ETS_CTRL_CC; + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); + + /* configure send slope */ + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + value &= ~MTL_SEND_SLP_CRED_SSC_MASK; + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); + + /* configure idle slope (same register as tx weight) */ + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); + + /* configure high credit */ + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_HC_MASK; + value |= high_credit & MTL_HIGH_CRED_HC_MASK; + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); + + /* configure high credit */ + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); + value &= ~MTL_HIGH_CRED_LC_MASK; + value |= low_credit & MTL_HIGH_CRED_LC_MASK; + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); +} + static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) { void __iomem *ioaddr = hw->pcsr; @@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw, } static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time) + unsigned int fc, unsigned int pause_time, + u32 tx_cnt) { void __iomem *ioaddr = hw->pcsr; - u32 channel = STMMAC_CHAN0; /* FIXME */ unsigned int flow = 0; + u32 queue = 0; pr_debug("GMAC Flow-Control:\n"); if (fc & FLOW_RX) { @@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } if (fc & FLOW_TX) { pr_debug("\tTransmit Flow-Control ON\n"); - flow |= GMAC_TX_FLOW_CTRL_TFE; - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); - if (duplex) { + if (duplex) pr_debug("\tduplex mode: PAUSE %d\n", pause_time); - flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + + for (queue = 0; queue < tx_cnt; queue++) { + flow |= GMAC_TX_FLOW_CTRL_TFE; + + if (duplex) + flow |= + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); + + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); } } } @@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) } } +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) +{ + void __iomem *ioaddr = hw->pcsr; + u32 mtl_int_qx_status; + int ret = 0; + + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); + + /* Check MTL Interrupt */ + if (mtl_int_qx_status & MTL_INT_QX(chan)) { + /* read Queue x Interrupt status */ + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); + + if (status & MTL_RX_OVERFLOW_INT) { + /* clear Interrupt */ + writel(status | MTL_RX_OVERFLOW_INT, + ioaddr + MTL_CHAN_INT_CTRL(chan)); + ret = CORE_IRQ_MTL_RX_OVERFLOW; + } + } + + return ret; +} + static int dwmac4_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { void __iomem *ioaddr = hw->pcsr; - u32 mtl_int_qx_status; u32 intr_status; int ret = 0; @@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw, x->irq_receive_pmt_irq_n++; } - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); - /* Check MTL Interrupt: Currently only one queue is used: Q0. */ - if (mtl_int_qx_status & MTL_INT_Q0) { - /* read Queue 0 Interrupt status */ - u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); - - if (status & MTL_RX_OVERFLOW_INT) { - /* clear Interrupt */ - writel(status | MTL_RX_OVERFLOW_INT, - ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); - ret = CORE_IRQ_MTL_RX_OVERFLOW; - } - } - dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); if (intr_status & PCS_RGSMIIIS_IRQ) dwmac4_phystatus(ioaddr, x); @@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_device_info *hw, return ret; } -static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues) { u32 value; - - /* Currently only channel 0 is supported */ - value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); - - if (value & MTL_DEBUG_TXSTSFSTS) - x->mtl_tx_status_fifo_full++; - if (value & MTL_DEBUG_TXFSTS) - x->mtl_tx_fifo_not_empty++; - if (value & MTL_DEBUG_TWCSTS) - x->mmtl_fifo_ctrl++; - if (value & MTL_DEBUG_TRCSTS_MASK) { - u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) - >> MTL_DEBUG_TRCSTS_SHIFT; - if (trcsts == MTL_DEBUG_TRCSTS_WRITE) - x->mtl_tx_fifo_read_ctrl_write++; - else if (trcsts == MTL_DEBUG_TRCSTS_TXW) - x->mtl_tx_fifo_read_ctrl_wait++; - else if (trcsts == MTL_DEBUG_TRCSTS_READ) - x->mtl_tx_fifo_read_ctrl_read++; - else - x->mtl_tx_fifo_read_ctrl_idle++; + u32 queue; + + for (queue = 0; queue < tx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); + + if (value & MTL_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & MTL_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & MTL_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & MTL_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) + >> MTL_DEBUG_TRCSTS_SHIFT; + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == MTL_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; } - if (value & MTL_DEBUG_TXPAUSED) - x->mac_tx_in_pause++; - value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); + for (queue = 0; queue < rx_queues; queue++) { + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); - if (value & MTL_DEBUG_RXFSTS_MASK) { - u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) - >> MTL_DEBUG_RRCSTS_SHIFT; + if (value & MTL_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) + >> MTL_DEBUG_RRCSTS_SHIFT; - if (rxfsts == MTL_DEBUG_RXFSTS_FULL) - x->mtl_rx_fifo_fill_level_full++; - else if (rxfsts == MTL_DEBUG_RXFSTS_AT) - x->mtl_rx_fifo_fill_above_thresh++; - else if (rxfsts == MTL_DEBUG_RXFSTS_BT) - x->mtl_rx_fifo_fill_below_thresh++; - else - x->mtl_rx_fifo_fill_level_empty++; - } - if (value & MTL_DEBUG_RRCSTS_MASK) { - u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> - MTL_DEBUG_RRCSTS_SHIFT; - - if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) - x->mtl_rx_fifo_read_ctrl_flush++; - else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) - x->mtl_rx_fifo_read_ctrl_read_data++; - else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) - x->mtl_rx_fifo_read_ctrl_status++; - else - x->mtl_rx_fifo_read_ctrl_idle++; + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & MTL_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> + MTL_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; } - if (value & MTL_DEBUG_RWCSTS) - x->mtl_rx_fifo_ctrl_active++; /* GMAC debug */ value = readl(ioaddr + GMAC_DEBUG); @@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) static const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, + .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_tx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, .dump_regs = dwmac4_dump_regs, .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .set_eee_mode = dwmac4_set_eee_mode, + .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_timer = dwmac4_set_eee_timer, + .set_eee_pls = dwmac4_set_eee_pls, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, +}; + +static const struct stmmac_ops dwmac410_ops = { + .core_init = dwmac4_core_init, + .set_mac = stmmac_dwmac4_set_mac, + .rx_ipc = dwmac4_rx_ipc_enable, + .rx_queue_enable = dwmac4_rx_queue_enable, + .rx_queue_prio = dwmac4_rx_queue_priority, + .tx_queue_prio = dwmac4_tx_queue_priority, + .rx_queue_routing = dwmac4_tx_queue_routing, + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, + .map_mtl_to_dma = dwmac4_map_mtl_dma, + .config_cbs = dwmac4_config_cbs, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .host_mtl_irq_status = dwmac4_irq_mtl_status, .flow_ctrl = dwmac4_flow_ctrl, .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, @@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, if (mac->multicast_filter_bins) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); - mac->mac = &dwmac4_ops; - mac->link.port = GMAC_CONFIG_PS; mac->link.duplex = GMAC_CONFIG_DM; mac->link.speed = GMAC_CONFIG_FES; @@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, else mac->dma = &dwmac4_dma_ops; + if (*synopsys_id >= DWMAC_CORE_4_00) + mac->mac = &dwmac410_ops; + else + mac->mac = &dwmac4_ops; + return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 843ec69222eacd69f6107a0f6f1cceac83b2667f..aa6476439aee7f4c65784af4a451a8ffe5173561 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -304,12 +304,13 @@ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls) + bool ls, unsigned int tot_pkt_len) { unsigned int tdes3 = le32_to_cpu(p->des3); p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK); + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK; if (is_fs) tdes3 |= TDES3_FIRST_DESCRIPTOR; else diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index f97b0d5d998742efcad71972bd74ce40cc02afad..eec8463057fd7573b54019298dfa894d27fc33e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) writel(value, ioaddr + DMA_SYS_BUS_MODE); } -static void dwmac4_dma_init_channel(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx_phy, u32 dma_rx_phy, - u32 channel) +void dwmac4_dma_init_rx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) { u32 value; - int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; - int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; - /* set PBL for each channels. Currently we affect same configuration - * on each channel - */ - value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); - if (dma_cfg->pblx8) - value = value | DMA_BUS_MODE_PBL; - writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan)); +} - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); +void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + u32 value; + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); - value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); +} - /* Mask interrupts by writing to CSR7 */ - writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); +void dwmac4_dma_init_channel(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) +{ + u32 value; + + /* common channel control register config */ + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (dma_cfg->pblx8) + value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); - writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); - writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK, + ioaddr + DMA_CHAN_INTR_ENA(chan)); } static void dwmac4_dma_init(void __iomem *ioaddr, @@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr, u32 dma_tx, u32 dma_rx, int atds) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); - int i; /* Set the Fixed burst mode */ if (dma_cfg->fixed_burst) @@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem *ioaddr, value |= DMA_SYS_BUS_AAL; writel(value, ioaddr + DMA_SYS_BUS_MODE); - - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) - dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i); } static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel, @@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) _dwmac4_dump_dma_regs(ioaddr, i, reg_space); } -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) { - int i; + u32 chan; - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); + for (chan = 0; chan < number_chan; chan++) + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan)); } -static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, - int rxmode, u32 channel) +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel, int fifosz) { - u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; + unsigned int rqs = fifosz / 256 - 1; + u32 mtl_rx_op, mtl_rx_int; - /* Following code only done for channel 0, other channels not yet - * supported. - */ - mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + mtl_rx_op |= MTL_OP_MODE_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + mtl_rx_op &= ~MTL_OP_MODE_RSF; + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + if (mode <= 32) + mtl_rx_op |= MTL_OP_MODE_RTC_32; + else if (mode <= 64) + mtl_rx_op |= MTL_OP_MODE_RTC_64; + else if (mode <= 96) + mtl_rx_op |= MTL_OP_MODE_RTC_96; + else + mtl_rx_op |= MTL_OP_MODE_RTC_128; + } + + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; + + /* enable flow control only if each channel gets 4 KiB or more FIFO */ + if (fifosz >= 4096) { + unsigned int rfd, rfa; + + mtl_rx_op |= MTL_OP_MODE_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + case 8192: + rfd = 0x06; /* Full-4K */ + rfa = 0x0a; /* Full-6K */ + break; + + case 16384: + rfd = 0x06; /* Full-4K */ + rfa = 0x12; /* Full-10K */ + break; + + default: + rfd = 0x06; /* Full-4K */ + rfa = 0x1e; /* Full-16K */ + break; + } + + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK; + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; - if (txmode == SF_DMA_MODE) { + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK; + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT; + } + + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + /* Enable MTL RX overflow */ + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, + ioaddr + MTL_CHAN_INT_CTRL(channel)); +} + +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, + u32 channel) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ mtl_tx_op |= MTL_OP_MODE_TSF; } else { - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); mtl_tx_op &= ~MTL_OP_MODE_TSF; mtl_tx_op &= MTL_OP_MODE_TTC_MASK; /* Set the transmit threshold */ - if (txmode <= 32) + if (mode <= 32) mtl_tx_op |= MTL_OP_MODE_TTC_32; - else if (txmode <= 64) + else if (mode <= 64) mtl_tx_op |= MTL_OP_MODE_TTC_64; - else if (txmode <= 96) + else if (mode <= 96) mtl_tx_op |= MTL_OP_MODE_TTC_96; - else if (txmode <= 128) + else if (mode <= 128) mtl_tx_op |= MTL_OP_MODE_TTC_128; - else if (txmode <= 192) + else if (mode <= 192) mtl_tx_op |= MTL_OP_MODE_TTC_192; - else if (txmode <= 256) + else if (mode <= 256) mtl_tx_op |= MTL_OP_MODE_TTC_256; - else if (txmode <= 384) + else if (mode <= 384) mtl_tx_op |= MTL_OP_MODE_TTC_384; else mtl_tx_op |= MTL_OP_MODE_TTC_512; @@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, */ mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); - - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); - - if (rxmode == SF_DMA_MODE) { - pr_debug("GMAC: enable RX store and forward mode\n"); - mtl_rx_op |= MTL_OP_MODE_RSF; - } else { - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); - mtl_rx_op &= ~MTL_OP_MODE_RSF; - mtl_rx_op &= MTL_OP_MODE_RTC_MASK; - if (rxmode <= 32) - mtl_rx_op |= MTL_OP_MODE_RTC_32; - else if (rxmode <= 64) - mtl_rx_op |= MTL_OP_MODE_RTC_64; - else if (rxmode <= 96) - mtl_rx_op |= MTL_OP_MODE_RTC_96; - else - mtl_rx_op |= MTL_OP_MODE_RTC_128; - } - - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); - - /* Enable MTL RX overflow */ - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, - ioaddr + MTL_CHAN_INT_CTRL(channel)); -} - -static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) -{ - /* Only Channel 0 is actually configured and used */ - dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0); } static void dwmac4_get_hw_feature(void __iomem *ioaddr, @@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by + * shifting and store the sizes in bytes. + */ + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6); + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0); /* MAC HW feature2 */ hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); /* TX and RX number of channels */ @@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, + .init_chan = dwmac4_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, .axi = dwmac4_dma_axi, .dump_regs = dwmac4_dump_dma_regs, - .dma_mode = dwmac4_dma_operation_mode, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, .enable_dma_irq = dwmac4_enable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq, .start_tx = dwmac4_dma_start_tx, @@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { const struct stmmac_dma_ops dwmac410_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, + .init_chan = dwmac4_dma_init_channel, + .init_rx_chan = dwmac4_dma_init_rx_chan, + .init_tx_chan = dwmac4_dma_init_tx_chan, .axi = dwmac4_dma_axi, .dump_regs = dwmac4_dump_dma_regs, - .dma_mode = dwmac4_dma_operation_mode, + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode, + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode, .enable_dma_irq = dwmac410_enable_dma_irq, .disable_dma_irq = dwmac4_disable_dma_irq, .start_tx = dwmac4_dma_start_tx, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 1b06df749e2bbab63c25dbbc9b4ef814fc9d5d46..8474bf961dd0c60a409ba4c1201557cdef7580dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -185,17 +185,17 @@ int dwmac4_dma_reset(void __iomem *ioaddr); void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); -void dwmac4_enable_dma_irq(void __iomem *ioaddr); -void dwmac410_enable_dma_irq(void __iomem *ioaddr); -void dwmac4_disable_dma_irq(void __iomem *ioaddr); -void dwmac4_dma_start_tx(void __iomem *ioaddr); -void dwmac4_dma_stop_tx(void __iomem *ioaddr); -void dwmac4_dma_start_rx(void __iomem *ioaddr); -void dwmac4_dma_stop_rx(void __iomem *ioaddr); +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan); int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x); -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); + struct stmmac_extra_stats *x, u32 chan); +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan); void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index c7326d5b2f432b00ea35042097adfe6d92063441..49f5687879df241f01ccf7d7befa3ac7dfd8f1dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioaddr) void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) { - writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan)); } void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) { - writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan)); } -void dwmac4_dma_start_tx(void __iomem *ioaddr) +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value |= DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = readl(ioaddr + GMAC_CONFIG); value |= GMAC_CONFIG_TE; writel(value, ioaddr + GMAC_CONFIG); } -void dwmac4_dma_stop_tx(void __iomem *ioaddr) +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value &= ~DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = readl(ioaddr + GMAC_CONFIG); value &= ~GMAC_CONFIG_TE; writel(value, ioaddr + GMAC_CONFIG); } -void dwmac4_dma_start_rx(void __iomem *ioaddr) +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); value |= DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); value = readl(ioaddr + GMAC_CONFIG); value |= GMAC_CONFIG_RE; writel(value, ioaddr + GMAC_CONFIG); } -void dwmac4_dma_stop_rx(void __iomem *ioaddr) +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); value &= ~DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); value = readl(ioaddr + GMAC_CONFIG); value &= ~GMAC_CONFIG_RE; writel(value, ioaddr + GMAC_CONFIG); } -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) { - writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan)); } -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan) { - writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan)); } -void dwmac4_enable_dma_irq(void __iomem *ioaddr) +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan) { writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + - DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + DMA_CHAN_INTR_ENA(chan)); } -void dwmac410_enable_dma_irq(void __iomem *ioaddr) +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan) { writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, - ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + ioaddr + DMA_CHAN_INTR_ENA(chan)); } -void dwmac4_disable_dma_irq(void __iomem *ioaddr) +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) { - writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan)); } int dwmac4_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x) + struct stmmac_extra_stats *x, u32 chan) { int ret = 0; - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { @@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, if (likely(intr_status & DMA_CHAN_STATUS_RI)) { u32 value; - value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { x->rx_normal_irq_n++; @@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, * status [21-0] expect reserved bits [5-3] */ writel((intr_status & 0x3fffc7), - ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); + ioaddr + DMA_CHAN_STATUS(chan)); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 56e485f79077374a9e19859a7953b3f18f5c42f3..9091df86723a3988075cbda535d5d6ba21826b7b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -137,13 +137,14 @@ #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ void dwmac_enable_dma_transmission(void __iomem *ioaddr); -void dwmac_enable_dma_irq(void __iomem *ioaddr); -void dwmac_disable_dma_irq(void __iomem *ioaddr); -void dwmac_dma_start_tx(void __iomem *ioaddr); -void dwmac_dma_stop_tx(void __iomem *ioaddr); -void dwmac_dma_start_rx(void __iomem *ioaddr); -void dwmac_dma_stop_rx(void __iomem *ioaddr); -int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan); +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan); +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 chan); int dwmac_dma_reset(void __iomem *ioaddr); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index e60bfca2a763325880215bab4592d9dbe5056fbb..38f94305aab53116a74d533728d25cb750da66a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr) writel(1, ioaddr + DMA_XMT_POLL_DEMAND); } -void dwmac_enable_dma_irq(void __iomem *ioaddr) +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan) { writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); } -void dwmac_disable_dma_irq(void __iomem *ioaddr) +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan) { writel(0, ioaddr + DMA_INTR_ENA); } -void dwmac_dma_start_tx(void __iomem *ioaddr) +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan) { u32 value = readl(ioaddr + DMA_CONTROL); value |= DMA_CONTROL_ST; writel(value, ioaddr + DMA_CONTROL); } -void dwmac_dma_stop_tx(void __iomem *ioaddr) +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan) { u32 value = readl(ioaddr + DMA_CONTROL); value &= ~DMA_CONTROL_ST; writel(value, ioaddr + DMA_CONTROL); } -void dwmac_dma_start_rx(void __iomem *ioaddr) +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan) { u32 value = readl(ioaddr + DMA_CONTROL); value |= DMA_CONTROL_SR; writel(value, ioaddr + DMA_CONTROL); } -void dwmac_dma_stop_rx(void __iomem *ioaddr) +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan) { u32 value = readl(ioaddr + DMA_CONTROL); value &= ~DMA_CONTROL_SR; @@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status) #endif int dwmac_dma_interrupt(void __iomem *ioaddr, - struct stmmac_extra_stats *x) + struct stmmac_extra_stats *x, u32 chan) { int ret = 0; /* read the status register (CSR5) */ diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 323b59ec74a35f4f318060f02d74c3b17af40d5d..7546b3664113a3d776fe19094df71b2adfb99e98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls) + bool ls, unsigned int tot_pkt_len) { unsigned int tdes0 = le32_to_cpu(p->des0); diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index efb818ebd55e26c13b899c2180faf0ba0ed64609..f817f8f365696d3388e73f85710d30dde43a7d41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode) static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls) + bool ls, unsigned int tot_pkt_len) { unsigned int tdes1 = le32_to_cpu(p->des1); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 452f256ff03f04bb6ee846966eb8961ebbc40c05..28e4b5d50ce6c0afc9c0e03dd5dc312009acda02 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -26,16 +26,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { - struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int entry = priv->cur_tx; - struct dma_desc *desc; + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->cur_tx; unsigned int bmax, len, des2; + struct dma_desc *desc; if (priv->extend_desc) - desc = (struct dma_desc *)(priv->dma_etx + entry); + desc = (struct dma_desc *)(tx_q->dma_etx + entry); else - desc = priv->dma_tx + entry; + desc = tx_q->dma_tx + entry; if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; @@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = bmax; - priv->tx_skbuff_dma[entry].is_jumbo = true; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = bmax; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, false); - priv->tx_skbuff[entry] = NULL; + STMMAC_RING_MODE, 0, + false, skb->len); + tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (priv->extend_desc) - desc = (struct dma_desc *)(priv->dma_etx + entry); + desc = (struct dma_desc *)(tx_q->dma_etx + entry); else - desc = priv->dma_tx + entry; + desc = tx_q->dma_tx + entry; des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); desc->des2 = cpu_to_le32(des2); if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = len; - priv->tx_skbuff_dma[entry].is_jumbo = true; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_RING_MODE, 1, true); + STMMAC_RING_MODE, 1, + true, skb->len); } else { des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); desc->des2 = cpu_to_le32(des2); if (dma_mapping_error(priv->device, des2)) return -1; - priv->tx_skbuff_dma[entry].buf = des2; - priv->tx_skbuff_dma[entry].len = nopaged_len; - priv->tx_skbuff_dma[entry].is_jumbo = true; + tx_q->tx_skbuff_dma[entry].buf = des2; + tx_q->tx_skbuff_dma[entry].len = nopaged_len; + tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, true); + STMMAC_RING_MODE, 0, + true, skb->len); } - priv->cur_tx = entry; + tx_q->cur_tx = entry; return entry; } @@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; - unsigned int entry = priv->dirty_tx; + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; + struct stmmac_priv *priv = tx_q->priv_data; + unsigned int entry = tx_q->dirty_tx; /* des3 is only used for jumbo frames tx or time stamping */ - if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || - (priv->tx_skbuff_dma[entry].last_segment && + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo || + (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && priv->hwts_tx_en))) p->des3 = 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index cd8fb619b1e977cd2b51aa3cfbb9b242fe94510d..33efe7038cabf7740c359b6633bc963032f02165 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -46,38 +46,51 @@ struct stmmac_tx_info { bool is_jumbo; }; -struct stmmac_priv { - /* Frequently used values are kept adjacent for cache effect */ +/* Frequently used values are kept adjacent for cache effect */ +struct stmmac_tx_queue { + u32 queue_index; + struct stmmac_priv *priv_data; struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; struct dma_desc *dma_tx; struct sk_buff **tx_skbuff; + struct stmmac_tx_info *tx_skbuff_dma; unsigned int cur_tx; unsigned int dirty_tx; + dma_addr_t dma_tx_phy; + u32 tx_tail_addr; +}; + +struct stmmac_rx_queue { + u32 queue_index; + struct stmmac_priv *priv_data; + struct dma_extended_desc *dma_erx; + struct dma_desc *dma_rx ____cacheline_aligned_in_smp; + struct sk_buff **rx_skbuff; + dma_addr_t *rx_skbuff_dma; + unsigned int cur_rx; + unsigned int dirty_rx; + u32 rx_zeroc_thresh; + dma_addr_t dma_rx_phy; + u32 rx_tail_addr; + struct napi_struct napi ____cacheline_aligned_in_smp; +}; + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; - struct stmmac_tx_info *tx_skbuff_dma; - dma_addr_t dma_tx_phy; + int tx_coalesce; int hwts_tx_en; bool tx_path_in_lpi_mode; struct timer_list txtimer; bool tso; - struct dma_desc *dma_rx ____cacheline_aligned_in_smp; - struct dma_extended_desc *dma_erx; - struct sk_buff **rx_skbuff; - unsigned int cur_rx; - unsigned int dirty_rx; unsigned int dma_buf_sz; unsigned int rx_copybreak; - unsigned int rx_zeroc_thresh; u32 rx_riwt; int hwts_rx_en; - dma_addr_t *rx_skbuff_dma; - dma_addr_t dma_rx_phy; - - struct napi_struct napi ____cacheline_aligned_in_smp; void __iomem *ioaddr; struct net_device *dev; @@ -85,6 +98,12 @@ struct stmmac_priv { struct mac_device_info *hw; spinlock_t lock; + /* RX Queue */ + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; + + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + int oldlink; int speed; int oldduplex; @@ -119,8 +138,6 @@ struct stmmac_priv { spinlock_t ptp_lock; void __iomem *mmcaddr; void __iomem *ptpaddr; - u32 rx_tail_addr; - u32 tx_tail_addr; u32 mss; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 85d64114e159e6d76a03fe5cca839fb246a6e0e6..16808e48ca1cf7bbd4237967ce0497caffc52808 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phy = netdev->phydev; int new_pause = FLOW_OFF; @@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device *netdev, } priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, - priv->pause); + priv->pause, tx_cnt); return 0; } @@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; int i, j = 0; /* Update the DMA HW counters for dwmac10/100 */ @@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, if ((priv->hw->mac->debug) && (priv->synopsys_id >= DWMAC_CORE_3_50)) priv->hw->mac->debug(priv->ioaddr, - (void *)&priv->xstats); + (void *)&priv->xstats, + rx_queues_count, tx_queues_count); } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; @@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int rx_riwt; /* Check not supported parameters */ @@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct net_device *dev, priv->tx_coal_frames = ec->tx_max_coalesced_frames; priv->tx_coal_timer = ec->tx_coalesce_usecs; priv->rx_riwt = rx_riwt; - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4498a3861aa3ad09460e922bd7f38e3506889dcb..cd8c601323905a7ded2504fa69faa54a1c728066 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -138,6 +138,64 @@ static void stmmac_verify_args(void) eee_timer = STMMAC_DEFAULT_LPI_TIMER; } +/** + * stmmac_disable_all_queues - Disable all queues + * @priv: driver private structure + */ +static void stmmac_disable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 queue; + + for (queue = 0; queue < rx_queues_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + napi_disable(&rx_q->napi); + } +} + +/** + * stmmac_enable_all_queues - Enable all queues + * @priv: driver private structure + */ +static void stmmac_enable_all_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 queue; + + for (queue = 0; queue < rx_queues_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + napi_enable(&rx_q->napi); + } +} + +/** + * stmmac_stop_all_queues - Stop all queues + * @priv: driver private structure + */ +static void stmmac_stop_all_queues(struct stmmac_priv *priv) +{ + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queues_cnt; queue++) + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); +} + +/** + * stmmac_start_all_queues - Start all queues + * @priv: driver private structure + */ +static void stmmac_start_all_queues(struct stmmac_priv *priv) +{ + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queues_cnt; queue++) + netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); +} + /** * stmmac_clk_csr_set - dynamically set the MDC clock * @priv: driver private structure @@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf, int len) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } -static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; u32 avail; - if (priv->dirty_tx > priv->cur_tx) - avail = priv->dirty_tx - priv->cur_tx - 1; + if (tx_q->dirty_tx > tx_q->cur_tx) + avail = tx_q->dirty_tx - tx_q->cur_tx - 1; else - avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; + avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; return avail; } -static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) +/** + * stmmac_rx_dirty - Get RX queue dirty + * @priv: driver private structure + * @queue: RX queue index + */ +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; u32 dirty; - if (priv->dirty_rx <= priv->cur_rx) - dirty = priv->cur_rx - priv->dirty_rx; + if (rx_q->dirty_rx <= rx_q->cur_rx) + dirty = rx_q->cur_rx - rx_q->dirty_rx; else - dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; + dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; return dirty; } @@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) */ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) { + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* check if all TX queues have the work finished */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + if (tx_q->dirty_tx != tx_q->cur_tx) + return; /* still unfinished work */ + } + /* Check and enter in LPI mode */ - if ((priv->dirty_tx == priv->cur_tx) && - (priv->tx_path_in_lpi_mode == false)) + if (!priv->tx_path_in_lpi_mode) priv->hw->mac->set_eee_mode(priv->hw, priv->plat->en_tx_lpi_clockgating); } @@ -672,6 +747,19 @@ static void stmmac_release_ptp(struct stmmac_priv *priv) stmmac_ptp_unregister(priv); } +/** + * stmmac_mac_flow_ctrl - Configure flow control in all queues + * @priv: driver private structure + * Description: It is used for configuring the flow control in all queues + */ +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + + priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, + priv->pause, tx_cnt); +} + /** * stmmac_adjust_link - adjusts the link parameters * @dev: net device structure @@ -687,7 +775,6 @@ static void stmmac_adjust_link(struct net_device *dev) struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; - unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; if (!phydev) return; @@ -709,8 +796,7 @@ static void stmmac_adjust_link(struct net_device *dev) } /* Flow Control operation */ if (phydev->pause) - priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, - fc, pause_time); + stmmac_mac_flow_ctrl(priv, phydev->duplex); if (phydev->speed != priv->speed) { new_state = 1; @@ -878,22 +964,56 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } -static void stmmac_display_rings(struct stmmac_priv *priv) +static void stmmac_display_rx_rings(struct stmmac_priv *priv) { - void *head_rx, *head_tx; + u32 rx_cnt = priv->plat->rx_queues_to_use; + void *head_rx; + u32 queue; - if (priv->extend_desc) { - head_rx = (void *)priv->dma_erx; - head_tx = (void *)priv->dma_etx; - } else { - head_rx = (void *)priv->dma_rx; - head_tx = (void *)priv->dma_tx; + /* Display RX rings */ + for (queue = 0; queue < rx_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + pr_info("\tRX Queue %u rings\n", queue); + + if (priv->extend_desc) + head_rx = (void *)rx_q->dma_erx; + else + head_rx = (void *)rx_q->dma_rx; + + /* Display RX ring */ + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); } +} - /* Display Rx ring */ - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); - /* Display Tx ring */ - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); +static void stmmac_display_tx_rings(struct stmmac_priv *priv) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + void *head_tx; + u32 queue; + + /* Display TX rings */ + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + pr_info("\tTX Queue %d rings\n", queue); + + if (priv->extend_desc) + head_tx = (void *)tx_q->dma_etx; + else + head_tx = (void *)tx_q->dma_tx; + + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); + } +} + +static void stmmac_display_rings(struct stmmac_priv *priv) +{ + /* Display RX ring */ + stmmac_display_rx_rings(priv); + + /* Display TX ring */ + stmmac_display_tx_rings(priv); } static int stmmac_set_bfsize(int mtu, int bufsize) @@ -913,48 +1033,88 @@ static int stmmac_set_bfsize(int mtu, int bufsize) } /** - * stmmac_clear_descriptors - clear descriptors + * stmmac_clear_rx_descriptors - clear RX descriptors * @priv: driver private structure - * Description: this function is called to clear the tx and rx descriptors + * @queue: RX queue index + * Description: this function is called to clear the RX descriptors * in case of both basic and extended descriptors are used. */ -static void stmmac_clear_descriptors(struct stmmac_priv *priv) +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; int i; - /* Clear the Rx/Tx descriptors */ + /* Clear the RX descriptors */ for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, + priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, (i == DMA_RX_SIZE - 1)); else - priv->hw->desc->init_rx_desc(&priv->dma_rx[i], + priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], priv->use_riwt, priv->mode, (i == DMA_RX_SIZE - 1)); +} + +/** + * stmmac_clear_tx_descriptors - clear tx descriptors + * @priv: driver private structure + * @queue: TX queue index. + * Description: this function is called to clear the TX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + int i; + + /* Clear the TX descriptors */ for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], priv->mode, (i == DMA_TX_SIZE - 1)); } +/** + * stmmac_clear_descriptors - clear descriptors + * @priv: driver private structure + * Description: this function is called to clear the TX and RX descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_descriptors(struct stmmac_priv *priv) +{ + u32 rx_queue_cnt = priv->plat->rx_queues_to_use; + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + /* Clear the RX descriptors */ + for (queue = 0; queue < rx_queue_cnt; queue++) + stmmac_clear_rx_descriptors(priv, queue); + + /* Clear the TX descriptors */ + for (queue = 0; queue < tx_queue_cnt; queue++) + stmmac_clear_tx_descriptors(priv, queue); +} + /** * stmmac_init_rx_buffers - init the RX descriptor buffer. * @priv: driver private structure * @p: descriptor pointer * @i: descriptor index - * @flags: gfp flag. + * @flags: gfp flag + * @queue: RX queue index * Description: this function is called to allocate a receive buffer, perform * the DMA mapping and init the descriptor. */ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, - int i, gfp_t flags) + int i, gfp_t flags, u32 queue) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct sk_buff *skb; skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); @@ -963,20 +1123,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, "%s: Rx init fails; skb is NULL\n", __func__); return -ENOMEM; } - priv->rx_skbuff[i] = skb; - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + rx_q->rx_skbuff[i] = skb; + rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, priv->dma_buf_sz, DMA_FROM_DEVICE); - if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { + if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); dev_kfree_skb_any(skb); return -EINVAL; } if (priv->synopsys_id >= DWMAC_CORE_4_00) - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]); + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); else - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]); + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) @@ -985,30 +1145,71 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return 0; } -static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) +/** + * stmmac_free_rx_buffer - free RX dma buffers + * @priv: private structure + * @queue: RX queue index + * @i: buffer index. + */ +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) { - if (priv->rx_skbuff[i]) { - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + if (rx_q->rx_skbuff[i]) { + dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); + dev_kfree_skb_any(rx_q->rx_skbuff[i]); } - priv->rx_skbuff[i] = NULL; + rx_q->rx_skbuff[i] = NULL; } /** - * init_dma_desc_rings - init the RX/TX descriptor rings + * stmmac_free_tx_buffer - free RX dma buffers + * @priv: private structure + * @queue: RX queue index + * @i: buffer index. + */ +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + if (tx_q->tx_skbuff_dma[i].buf) { + if (tx_q->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + tx_q->tx_skbuff_dma[i].buf, + tx_q->tx_skbuff_dma[i].len, + DMA_TO_DEVICE); + } + + if (tx_q->tx_skbuff[i]) { + dev_kfree_skb_any(tx_q->tx_skbuff[i]); + tx_q->tx_skbuff[i] = NULL; + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + } +} + +/** + * init_dma_rx_desc_rings - init the RX descriptor rings * @dev: net device structure * @flags: gfp flag. - * Description: this function initializes the DMA RX/TX descriptors + * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) { - int i; struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; unsigned int bfsize = 0; int ret = -ENOMEM; + u32 queue; + int i; if (priv->hw->mode->set_16kib_bfsize) bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); @@ -1018,257 +1219,516 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) priv->dma_buf_sz = bfsize; - netif_dbg(priv, probe, priv->dev, - "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", - __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy); - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n"); - for (i = 0; i < DMA_RX_SIZE; i++) { - struct dma_desc *p; - if (priv->extend_desc) - p = &((priv->dma_erx + i)->basic); - else - p = priv->dma_rx + i; + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_rx_phy=0x%08x\n", __func__, + (u32)rx_q->dma_rx_phy); - ret = stmmac_init_rx_buffers(priv, p, i, flags); - if (ret) - goto err_init_rx_buffers; + for (i = 0; i < DMA_RX_SIZE; i++) { + struct dma_desc *p; - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", - priv->rx_skbuff[i], priv->rx_skbuff[i]->data, - (unsigned int)priv->rx_skbuff_dma[i]); + if (priv->extend_desc) + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, p, i, flags, + queue); + if (ret) + goto err_init_rx_buffers; + + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, + (unsigned int)rx_q->rx_skbuff_dma[i]); + } + + rx_q->cur_rx = 0; + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); + + stmmac_clear_rx_descriptors(priv, queue); + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + priv->hw->mode->init(rx_q->dma_erx, + rx_q->dma_rx_phy, + DMA_RX_SIZE, 1); + else + priv->hw->mode->init(rx_q->dma_rx, + rx_q->dma_rx_phy, + DMA_RX_SIZE, 0); + } } - priv->cur_rx = 0; - priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); + buf_sz = bfsize; - /* Setup the chained descriptor addresses */ - if (priv->mode == STMMAC_CHAIN_MODE) { - if (priv->extend_desc) { - priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, - DMA_RX_SIZE, 1); - priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, - DMA_TX_SIZE, 1); - } else { - priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, - DMA_RX_SIZE, 0); - priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, - DMA_TX_SIZE, 0); - } + return 0; + +err_init_rx_buffers: + while (queue >= 0) { + while (--i >= 0) + stmmac_free_rx_buffer(priv, queue, i); + + if (queue == 0) + break; + + i = DMA_RX_SIZE; + queue--; } - /* TX INITIALIZATION */ - for (i = 0; i < DMA_TX_SIZE; i++) { - struct dma_desc *p; - if (priv->extend_desc) - p = &((priv->dma_etx + i)->basic); - else - p = priv->dma_tx + i; + return ret; +} - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - p->des0 = 0; - p->des1 = 0; - p->des2 = 0; - p->des3 = 0; - } else { - p->des2 = 0; +/** + * init_dma_tx_desc_rings - init the TX descriptor rings + * @dev: net device structure. + * Description: this function initializes the DMA TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int init_dma_tx_desc_rings(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + int i; + + for (queue = 0; queue < tx_queue_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + netif_dbg(priv, probe, priv->dev, + "(%s) dma_tx_phy=0x%08x\n", __func__, + (u32)tx_q->dma_tx_phy); + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) + priv->hw->mode->init(tx_q->dma_etx, + tx_q->dma_tx_phy, + DMA_TX_SIZE, 1); + else + priv->hw->mode->init(tx_q->dma_tx, + tx_q->dma_tx_phy, + DMA_TX_SIZE, 0); } - priv->tx_skbuff_dma[i].buf = 0; - priv->tx_skbuff_dma[i].map_as_page = false; - priv->tx_skbuff_dma[i].len = 0; - priv->tx_skbuff_dma[i].last_segment = false; - priv->tx_skbuff[i] = NULL; + for (i = 0; i < DMA_TX_SIZE; i++) { + struct dma_desc *p; + if (priv->extend_desc) + p = &((tx_q->dma_etx + i)->basic); + else + p = tx_q->dma_tx + i; + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + } else { + p->des2 = 0; + } + + tx_q->tx_skbuff_dma[i].buf = 0; + tx_q->tx_skbuff_dma[i].map_as_page = false; + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; + } + + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); } - priv->dirty_tx = 0; - priv->cur_tx = 0; - netdev_reset_queue(priv->dev); + return 0; +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It supports the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + ret = init_dma_rx_desc_rings(dev, flags); + if (ret) + return ret; + + ret = init_dma_tx_desc_rings(dev); stmmac_clear_descriptors(priv); if (netif_msg_hw(priv)) stmmac_display_rings(priv); - return 0; -err_init_rx_buffers: - while (--i >= 0) - stmmac_free_rx_buffers(priv, i); return ret; } -static void dma_free_rx_skbufs(struct stmmac_priv *priv) +/** + * dma_free_rx_skbufs - free RX dma buffers + * @priv: private structure + * @queue: RX queue index + */ +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) { int i; for (i = 0; i < DMA_RX_SIZE; i++) - stmmac_free_rx_buffers(priv, i); + stmmac_free_rx_buffer(priv, queue, i); } -static void dma_free_tx_skbufs(struct stmmac_priv *priv) +/** + * dma_free_tx_skbufs - free TX dma buffers + * @priv: private structure + * @queue: TX queue index + */ +static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { int i; - for (i = 0; i < DMA_TX_SIZE; i++) { - if (priv->tx_skbuff_dma[i].buf) { - if (priv->tx_skbuff_dma[i].map_as_page) - dma_unmap_page(priv->device, - priv->tx_skbuff_dma[i].buf, - priv->tx_skbuff_dma[i].len, - DMA_TO_DEVICE); - else - dma_unmap_single(priv->device, - priv->tx_skbuff_dma[i].buf, - priv->tx_skbuff_dma[i].len, - DMA_TO_DEVICE); + for (i = 0; i < DMA_TX_SIZE; i++) + stmmac_free_tx_buffer(priv, queue, i); +} + +/** + * free_dma_rx_desc_resources - free RX dma desc resources + * @priv: private structure + */ +static void free_dma_rx_desc_resources(struct stmmac_priv *priv) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + u32 queue; + + /* Free RX queue resources */ + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + /* Release the DMA RX socket buffers */ + dma_free_rx_skbufs(priv, queue); + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, + DMA_RX_SIZE * sizeof(struct dma_desc), + rx_q->dma_rx, rx_q->dma_rx_phy); + else + dma_free_coherent(priv->device, DMA_RX_SIZE * + sizeof(struct dma_extended_desc), + rx_q->dma_erx, rx_q->dma_rx_phy); + + kfree(rx_q->rx_skbuff_dma); + kfree(rx_q->rx_skbuff); + } +} + +/** + * free_dma_tx_desc_resources - free TX dma desc resources + * @priv: private structure + */ +static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue = 0; + + /* Free TX queue resources */ + for (queue = 0; queue < tx_count; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + /* Release the DMA TX socket buffers */ + dma_free_tx_skbufs(priv, queue); + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) + dma_free_coherent(priv->device, + DMA_TX_SIZE * sizeof(struct dma_desc), + tx_q->dma_tx, tx_q->dma_tx_phy); + else + dma_free_coherent(priv->device, DMA_TX_SIZE * + sizeof(struct dma_extended_desc), + tx_q->dma_etx, tx_q->dma_tx_phy); + + kfree(tx_q->tx_skbuff_dma); + kfree(tx_q->tx_skbuff); + } +} + +/** + * alloc_dma_rx_desc_resources - alloc RX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) +{ + u32 rx_count = priv->plat->rx_queues_to_use; + int ret = -ENOMEM; + u32 queue; + + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + rx_q->queue_index = queue; + rx_q->priv_data = priv; + + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, + sizeof(dma_addr_t), + GFP_KERNEL); + if (!rx_q->rx_skbuff_dma) + return -ENOMEM; + + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!rx_q->rx_skbuff) + goto err_dma; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_zalloc_coherent(priv->device, + DMA_RX_SIZE * + sizeof(struct + dma_extended_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_erx) + goto err_dma; + + } else { + rx_q->dma_rx = dma_zalloc_coherent(priv->device, + DMA_RX_SIZE * + sizeof(struct + dma_desc), + &rx_q->dma_rx_phy, + GFP_KERNEL); + if (!rx_q->dma_rx) + goto err_dma; } + } + + return 0; + +err_dma: + free_dma_rx_desc_resources(priv); + + return ret; +} + +/** + * alloc_dma_tx_desc_resources - alloc TX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +{ + u32 tx_count = priv->plat->tx_queues_to_use; + int ret = -ENOMEM; + u32 queue; + + /* TX queues buffers and DMA */ + for (queue = 0; queue < tx_count; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + tx_q->queue_index = queue; + tx_q->priv_data = priv; + + tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, + sizeof(*tx_q->tx_skbuff_dma), + GFP_KERNEL); + if (!tx_q->tx_skbuff_dma) + return -ENOMEM; + + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!tx_q->tx_skbuff) + goto err_dma_buffers; + + if (priv->extend_desc) { + tx_q->dma_etx = dma_zalloc_coherent(priv->device, + DMA_TX_SIZE * + sizeof(struct + dma_extended_desc), + &tx_q->dma_tx_phy, + GFP_KERNEL); + if (!tx_q->dma_etx) + goto err_dma_buffers; + } else { + tx_q->dma_tx = dma_zalloc_coherent(priv->device, + DMA_TX_SIZE * + sizeof(struct + dma_desc), + &tx_q->dma_tx_phy, + GFP_KERNEL); + if (!tx_q->dma_tx) + goto err_dma_buffers; + } + } + + return 0; + +err_dma_buffers: + free_dma_tx_desc_resources(priv); + + return ret; +} + +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_desc_resources(struct stmmac_priv *priv) +{ + /* RX Allocation */ + int ret = alloc_dma_rx_desc_resources(priv); + + if (ret) + return ret; + + ret = alloc_dma_tx_desc_resources(priv); + + return ret; +} + +/** + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + */ +static void free_dma_desc_resources(struct stmmac_priv *priv) +{ + /* Release the DMA RX socket buffers */ + free_dma_rx_desc_resources(priv); + + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv); +} + +/** + * stmmac_mac_enable_rx_queues - Enable MAC rx queues + * @priv: driver private structure + * Description: It is used for enabling the rx queues in the MAC + */ +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; - if (priv->tx_skbuff[i]) { - dev_kfree_skb_any(priv->tx_skbuff[i]); - priv->tx_skbuff[i] = NULL; - priv->tx_skbuff_dma[i].buf = 0; - priv->tx_skbuff_dma[i].map_as_page = false; - } + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); } } /** - * alloc_dma_desc_resources - alloc TX/RX resources. - * @priv: private structure - * Description: according to which descriptor can be used (extend or basic) - * this function allocates the resources for TX and RX paths. In case of - * reception, for example, it pre-allocated the RX socket buffer in order to - * allow zero-copy mechanism. + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel */ -static int alloc_dma_desc_resources(struct stmmac_priv *priv) +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) { - int ret = -ENOMEM; - - priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), - GFP_KERNEL); - if (!priv->rx_skbuff_dma) - return -ENOMEM; - - priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), - GFP_KERNEL); - if (!priv->rx_skbuff) - goto err_rx_skbuff; - - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, - sizeof(*priv->tx_skbuff_dma), - GFP_KERNEL); - if (!priv->tx_skbuff_dma) - goto err_tx_skbuff_dma; - - priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), - GFP_KERNEL); - if (!priv->tx_skbuff) - goto err_tx_skbuff; - - if (priv->extend_desc) { - priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * - sizeof(struct - dma_extended_desc), - &priv->dma_rx_phy, - GFP_KERNEL); - if (!priv->dma_erx) - goto err_dma; - - priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * - sizeof(struct - dma_extended_desc), - &priv->dma_tx_phy, - GFP_KERNEL); - if (!priv->dma_etx) { - dma_free_coherent(priv->device, DMA_RX_SIZE * - sizeof(struct dma_extended_desc), - priv->dma_erx, priv->dma_rx_phy); - goto err_dma; - } - } else { - priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * - sizeof(struct dma_desc), - &priv->dma_rx_phy, - GFP_KERNEL); - if (!priv->dma_rx) - goto err_dma; + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + priv->hw->dma->start_rx(priv->ioaddr, chan); +} - priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * - sizeof(struct dma_desc), - &priv->dma_tx_phy, - GFP_KERNEL); - if (!priv->dma_tx) { - dma_free_coherent(priv->device, DMA_RX_SIZE * - sizeof(struct dma_desc), - priv->dma_rx, priv->dma_rx_phy); - goto err_dma; - } - } +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This starts a TX DMA channel + */ +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + priv->hw->dma->start_tx(priv->ioaddr, chan); +} - return 0; +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This stops a RX DMA channel + */ +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_rx(priv->ioaddr, chan); +} -err_dma: - kfree(priv->tx_skbuff); -err_tx_skbuff: - kfree(priv->tx_skbuff_dma); -err_tx_skbuff_dma: - kfree(priv->rx_skbuff); -err_rx_skbuff: - kfree(priv->rx_skbuff_dma); - return ret; +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure + * @chan: TX channel index + * Description: + * This stops a TX DMA channel + */ +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_tx(priv->ioaddr, chan); } -static void free_dma_desc_resources(struct stmmac_priv *priv) +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) { - /* Release the DMA TX/RX socket buffers */ - dma_free_rx_skbufs(priv); - dma_free_tx_skbufs(priv); - - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) { - dma_free_coherent(priv->device, - DMA_TX_SIZE * sizeof(struct dma_desc), - priv->dma_tx, priv->dma_tx_phy); - dma_free_coherent(priv->device, - DMA_RX_SIZE * sizeof(struct dma_desc), - priv->dma_rx, priv->dma_rx_phy); - } else { - dma_free_coherent(priv->device, DMA_TX_SIZE * - sizeof(struct dma_extended_desc), - priv->dma_etx, priv->dma_tx_phy); - dma_free_coherent(priv->device, DMA_RX_SIZE * - sizeof(struct dma_extended_desc), - priv->dma_erx, priv->dma_rx_phy); - } - kfree(priv->rx_skbuff_dma); - kfree(priv->rx_skbuff); - kfree(priv->tx_skbuff_dma); - kfree(priv->tx_skbuff); + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; + + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_start_rx_dma(priv, chan); + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); } /** - * stmmac_mac_enable_rx_queues - Enable MAC rx queues - * @priv: driver private structure - * Description: It is used for enabling the rx queues in the MAC + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels */ -static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) +static void stmmac_stop_all_dma(struct stmmac_priv *priv) { - int rx_count = priv->dma_cap.number_rx_queues; - int queue = 0; + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; - /* If GMAC does not have multiple queues, then this is not necessary*/ - if (rx_count == 1) - return; + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); - /** - * If the core is synthesized with multiple rx queues / multiple - * dma channels, then rx queues will be disabled by default. - * For now only rx queue 0 is enabled. - */ - priv->hw->mac->rx_queue_enable(priv->hw, queue); + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); } /** @@ -1279,11 +1739,20 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) */ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) { + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; + u32 txmode = 0; + u32 rxmode = 0; + u32 chan = 0; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; - if (priv->plat->force_thresh_dma_mode) - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); - else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + if (priv->plat->force_thresh_dma_mode) { + txmode = tc; + rxmode = tc; + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { /* * In case of GMAC, SF mode can be enabled * to perform the TX COE in HW. This depends on: @@ -1291,37 +1760,53 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) * 2) There is no bugged Jumbo frame support * that needs to not insert csum in the TDES. */ - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, - rxfifosz); + txmode = SF_DMA_MODE; + rxmode = SF_DMA_MODE; priv->xstats.threshold = SF_DMA_MODE; - } else - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, + } else { + txmode = tc; + rxmode = SF_DMA_MODE; + } + + /* configure all channels */ + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + for (chan = 0; chan < rx_channels_count; chan++) + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, + rxfifosz); + + for (chan = 0; chan < tx_channels_count; chan++) + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); + } else { + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, rxfifosz); + } } /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure + * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ -static void stmmac_tx_clean(struct stmmac_priv *priv) +static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry = priv->dirty_tx; + unsigned int entry = tx_q->dirty_tx; netif_tx_lock(priv->dev); priv->xstats.tx_clean++; - while (entry != priv->cur_tx) { - struct sk_buff *skb = priv->tx_skbuff[entry]; + while (entry != tx_q->cur_tx) { + struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; int status; if (priv->extend_desc) - p = (struct dma_desc *)(priv->dma_etx + entry); + p = (struct dma_desc *)(tx_q->dma_etx + entry); else - p = priv->dma_tx + entry; + p = tx_q->dma_tx + entry; status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, @@ -1342,48 +1827,51 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) stmmac_get_tx_hwtstamp(priv, p, skb); } - if (likely(priv->tx_skbuff_dma[entry].buf)) { - if (priv->tx_skbuff_dma[entry].map_as_page) + if (likely(tx_q->tx_skbuff_dma[entry].buf)) { + if (tx_q->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, - priv->tx_skbuff_dma[entry].buf, - priv->tx_skbuff_dma[entry].len, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, - priv->tx_skbuff_dma[entry].buf, - priv->tx_skbuff_dma[entry].len, + tx_q->tx_skbuff_dma[entry].buf, + tx_q->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry].buf = 0; - priv->tx_skbuff_dma[entry].len = 0; - priv->tx_skbuff_dma[entry].map_as_page = false; + tx_q->tx_skbuff_dma[entry].buf = 0; + tx_q->tx_skbuff_dma[entry].len = 0; + tx_q->tx_skbuff_dma[entry].map_as_page = false; } if (priv->hw->mode->clean_desc3) - priv->hw->mode->clean_desc3(priv, p); + priv->hw->mode->clean_desc3(tx_q, p); - priv->tx_skbuff_dma[entry].last_segment = false; - priv->tx_skbuff_dma[entry].is_jumbo = false; + tx_q->tx_skbuff_dma[entry].last_segment = false; + tx_q->tx_skbuff_dma[entry].is_jumbo = false; if (likely(skb != NULL)) { pkts_compl++; bytes_compl += skb->len; dev_consume_skb_any(skb); - priv->tx_skbuff[entry] = NULL; + tx_q->tx_skbuff[entry] = NULL; } priv->hw->desc->release_tx_desc(p, priv->mode); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); } - priv->dirty_tx = entry; + tx_q->dirty_tx = entry; + + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), + pkts_compl, bytes_compl); - netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, + queue))) && + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { - if (unlikely(netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { netif_dbg(priv, tx_done, priv->dev, "%s: restart transmit\n", __func__); - netif_wake_queue(priv->dev); + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { @@ -1393,45 +1881,76 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) netif_tx_unlock(priv->dev); } -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) { - priv->hw->dma->enable_dma_irq(priv->ioaddr); + priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); } -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) { - priv->hw->dma->disable_dma_irq(priv->ioaddr); + priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); } /** * stmmac_tx_err - to manage the tx error * @priv: driver private structure + * @chan: channel index * Description: it cleans the descriptors and restarts the transmission * in case of transmission errors. */ -static void stmmac_tx_err(struct stmmac_priv *priv) +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; int i; - netif_stop_queue(priv->dev); - priv->hw->dma->stop_tx(priv->ioaddr); - dma_free_tx_skbufs(priv); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); + + stmmac_stop_tx_dma(priv, chan); + dma_free_tx_skbufs(priv, chan); for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], priv->mode, (i == DMA_TX_SIZE - 1)); - priv->dirty_tx = 0; - priv->cur_tx = 0; - netdev_reset_queue(priv->dev); - priv->hw->dma->start_tx(priv->ioaddr); + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); + stmmac_start_tx_dma(priv, chan); priv->dev->stats.tx_errors++; - netif_wake_queue(priv->dev); + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); +} + +/** + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel + * @priv: driver private structure + * @txmode: TX operating mode + * @rxmode: RX operating mode + * @chan: channel index + * Description: it is used for configuring of the DMA operation mode in + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward + * mode. + */ +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan) +{ + int rxfifosz = priv->plat->rx_fifo_size; + + if (rxfifosz == 0) + rxfifosz = priv->dma_cap.rx_fifo_size; + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, + rxfifosz); + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); + } else { + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, + rxfifosz); + } } /** @@ -1443,31 +1962,43 @@ static void stmmac_tx_err(struct stmmac_priv *priv) */ static void stmmac_dma_interrupt(struct stmmac_priv *priv) { + u32 tx_channel_count = priv->plat->tx_queues_to_use; int status; - int rxfifosz = priv->plat->rx_fifo_size; + u32 chan; + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); - if (likely((status & handle_rx)) || (status & handle_tx)) { - if (likely(napi_schedule_prep(&priv->napi))) { - stmmac_disable_dma_irq(priv); - __napi_schedule(&priv->napi); + status = priv->hw->dma->dma_interrupt(priv->ioaddr, + &priv->xstats, chan); + if (likely((status & handle_rx)) || (status & handle_tx)) { + if (likely(napi_schedule_prep(&rx_q->napi))) { + stmmac_disable_dma_irq(priv, chan); + __napi_schedule(&rx_q->napi); + } } - } - if (unlikely(status & tx_hard_error_bump_tc)) { - /* Try to bump up the dma threshold on this failure */ - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && - (tc <= 256)) { - tc += 64; - if (priv->plat->force_thresh_dma_mode) - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, - rxfifosz); - else - priv->hw->dma->dma_mode(priv->ioaddr, tc, - SF_DMA_MODE, rxfifosz); - priv->xstats.threshold = tc; + + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && + (tc <= 256)) { + tc += 64; + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, + tc, + tc, + chan); + else + stmmac_set_dma_operation_mode(priv, + tc, + SF_DMA_MODE, + chan); + priv->xstats.threshold = tc; + } + } else if (unlikely(status == tx_hard_error)) { + stmmac_tx_err(priv, chan); } - } else if (unlikely(status == tx_hard_error)) - stmmac_tx_err(priv); + } } /** @@ -1574,6 +2105,13 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 dummy_dma_rx_phy = 0; + u32 dummy_dma_tx_phy = 0; + u32 chan = 0; int atds = 0; int ret = 0; @@ -1591,19 +2129,49 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) return ret; } - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, - priv->dma_tx_phy, priv->dma_rx_phy, atds); - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->rx_tail_addr = priv->dma_rx_phy + - (DMA_RX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, - STMMAC_CHAN0); + /* DMA Configuration */ + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, + dummy_dma_tx_phy, dummy_dma_rx_phy, atds); + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->rx_queue[chan]; + + priv->hw->dma->init_rx_chan(priv->ioaddr, + priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, + rx_q->rx_tail_addr, + chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_channels_count; chan++) { + tx_q = &priv->tx_queue[chan]; + + priv->hw->dma->init_chan(priv->ioaddr, + priv->plat->dma_cfg, + chan); - priv->tx_tail_addr = priv->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, - STMMAC_CHAN0); + priv->hw->dma->init_tx_chan(priv->ioaddr, + priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + + (DMA_TX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, + tx_q->tx_tail_addr, + chan); + } + } else { + rx_q = &priv->rx_queue[chan]; + tx_q = &priv->tx_queue[chan]; + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); } if (priv->plat->axi && priv->hw->dma->axi) @@ -1621,8 +2189,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) static void stmmac_tx_timer(unsigned long data) { struct stmmac_priv *priv = (struct stmmac_priv *)data; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue; - stmmac_tx_clean(priv); + /* let's scan all the tx queues */ + for (queue = 0; queue < tx_queues_count; queue++) + stmmac_tx_clean(priv, queue); } /** @@ -1644,6 +2216,196 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) add_timer(&priv->txtimer); } +static void stmmac_set_rings_length(struct stmmac_priv *priv) +{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan; + + /* set TX ring length */ + if (priv->hw->dma->set_tx_ring_len) { + for (chan = 0; chan < tx_channels_count; chan++) + priv->hw->dma->set_tx_ring_len(priv->ioaddr, + (DMA_TX_SIZE - 1), chan); + } + + /* set RX ring length */ + if (priv->hw->dma->set_rx_ring_len) { + for (chan = 0; chan < rx_channels_count; chan++) + priv->hw->dma->set_rx_ring_len(priv->ioaddr, + (DMA_RX_SIZE - 1), chan); + } +} + +/** + * stmmac_set_tx_queue_weight - Set TX queue weight + * @priv: driver private structure + * Description: It is used for setting TX queues weight + */ +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 weight; + u32 queue; + + for (queue = 0; queue < tx_queues_count; queue++) { + weight = priv->plat->tx_queues_cfg[queue].weight; + priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); + } +} + +/** + * stmmac_configure_cbs - Configure CBS in TX queue + * @priv: driver private structure + * Description: It is used for configuring CBS in AVB TX queues + */ +static void stmmac_configure_cbs(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 mode_to_use; + u32 queue; + + /* queue 0 is reserved for legacy traffic */ + for (queue = 1; queue < tx_queues_count; queue++) { + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB) + continue; + + priv->hw->mac->config_cbs(priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + } +} + +/** + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel + * @priv: driver private structure + * Description: It is used for mapping RX queues to RX dma channels + */ +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 chan; + + for (queue = 0; queue < rx_queues_count; queue++) { + chan = priv->plat->rx_queues_cfg[queue].chan; + priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); + } +} + +/** + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the RX Queue Priority + */ +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < rx_queues_count; queue++) { + if (!priv->plat->rx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->rx_queues_cfg[queue].prio; + priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority + * @priv: driver private structure + * Description: It is used for configuring the TX Queue Priority + */ +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue; + u32 prio; + + for (queue = 0; queue < tx_queues_count; queue++) { + if (!priv->plat->tx_queues_cfg[queue].use_prio) + continue; + + prio = priv->plat->tx_queues_cfg[queue].prio; + priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); + } +} + +/** + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing + * @priv: driver private structure + * Description: It is used for configuring the RX queue routing + */ +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 queue; + u8 packet; + + for (queue = 0; queue < rx_queues_count; queue++) { + /* no specific packet type routing specified for the queue */ + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) + continue; + + packet = priv->plat->rx_queues_cfg[queue].pkt_route; + priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); + } +} + +/** + * stmmac_mtl_configuration - Configure MTL + * @priv: driver private structure + * Description: It is used for configurring MTL + */ +static void stmmac_mtl_configuration(struct stmmac_priv *priv) +{ + u32 rx_queues_count = priv->plat->rx_queues_to_use; + u32 tx_queues_count = priv->plat->tx_queues_to_use; + + if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) + stmmac_set_tx_queue_weight(priv); + + /* Configure MTL RX algorithms */ + if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) + priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, + priv->plat->rx_sched_algorithm); + + /* Configure MTL TX algorithms */ + if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) + priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, + priv->plat->tx_sched_algorithm); + + /* Configure CBS in AVB TX queues */ + if (tx_queues_count > 1 && priv->hw->mac->config_cbs) + stmmac_configure_cbs(priv); + + /* Map RX MTL to DMA channels */ + if (priv->hw->mac->map_mtl_to_dma) + stmmac_rx_queue_dma_chan_map(priv); + + /* Enable MAC RX Queues */ + if (priv->hw->mac->rx_queue_enable) + stmmac_mac_enable_rx_queues(priv); + + /* Set RX priorities */ + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) + stmmac_mac_config_rx_queues_prio(priv); + + /* Set TX priorities */ + if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) + stmmac_mac_config_tx_queues_prio(priv); + + /* Set RX routing */ + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) + stmmac_mac_config_rx_queues_routing(priv); +} + /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -1659,6 +2421,9 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) { struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 chan; int ret; /* DMA initialization and SW reset */ @@ -1688,9 +2453,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); - /* Initialize MAC RX Queues */ - if (priv->hw->mac->rx_queue_enable) - stmmac_mac_enable_rx_queues(priv); + /* Initialize MTL*/ + if (priv->synopsys_id >= DWMAC_CORE_4_00) + stmmac_mtl_configuration(priv); ret = priv->hw->mac->rx_ipc(priv->hw); if (!ret) { @@ -1700,10 +2465,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Enable the MAC Rx/Tx */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) - stmmac_dwmac4_set_mac(priv->ioaddr, true); - else - stmmac_set_mac(priv->ioaddr, true); + priv->hw->mac->set_mac(priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -1711,6 +2473,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); if (init_ptp) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); + ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -1725,35 +2491,37 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) __func__); #endif /* Start the ball rolling... */ - netdev_dbg(priv->dev, "DMA RX/TX processes started...\n"); - priv->hw->dma->start_tx(priv->ioaddr); - priv->hw->dma->start_rx(priv->ioaddr); + stmmac_start_all_dma(priv); priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { priv->rx_riwt = MAX_DMA_RIWT; - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); } if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); - /* set TX ring length */ - if (priv->hw->dma->set_tx_ring_len) - priv->hw->dma->set_tx_ring_len(priv->ioaddr, - (DMA_TX_SIZE - 1)); - /* set RX ring length */ - if (priv->hw->dma->set_rx_ring_len) - priv->hw->dma->set_rx_ring_len(priv->ioaddr, - (DMA_RX_SIZE - 1)); + /* set TX and RX rings length */ + stmmac_set_rings_length(priv); + /* Enable TSO */ - if (priv->tso) - priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); + if (priv->tso) { + for (chan = 0; chan < tx_cnt; chan++) + priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); + } return 0; } +static void stmmac_hw_teardown(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + clk_disable_unprepare(priv->plat->clk_ptp_ref); +} + /** * stmmac_open - open entry point of the driver * @dev : pointer to the device structure. @@ -1821,7 +2589,7 @@ static int stmmac_open(struct net_device *dev) netdev_err(priv->dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); - goto init_error; + goto irq_error; } /* Request the Wake IRQ in case of another line is used for WoL */ @@ -1848,8 +2616,8 @@ static int stmmac_open(struct net_device *dev) } } - napi_enable(&priv->napi); - netif_start_queue(dev); + stmmac_enable_all_queues(priv); + stmmac_start_all_queues(priv); return 0; @@ -1858,7 +2626,12 @@ static int stmmac_open(struct net_device *dev) free_irq(priv->wol_irq, dev); wolirq_error: free_irq(dev->irq, dev); +irq_error: + if (dev->phydev) + phy_stop(dev->phydev); + del_timer_sync(&priv->txtimer); + stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv); dma_desc_error: @@ -1887,9 +2660,9 @@ static int stmmac_release(struct net_device *dev) phy_disconnect(dev->phydev); } - netif_stop_queue(dev); + stmmac_stop_all_queues(priv); - napi_disable(&priv->napi); + stmmac_disable_all_queues(priv); del_timer_sync(&priv->txtimer); @@ -1901,14 +2674,13 @@ static int stmmac_release(struct net_device *dev) free_irq(priv->lpi_irq, dev); /* Stop TX/RX DMA and clear the descriptors */ - priv->hw->dma->stop_tx(priv->ioaddr); - priv->hw->dma->stop_rx(priv->ioaddr); + stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ - stmmac_set_mac(priv->ioaddr, false); + priv->hw->mac->set_mac(priv->ioaddr, false); netif_carrier_off(dev); @@ -1927,22 +2699,24 @@ static int stmmac_release(struct net_device *dev) * @des: buffer start address * @total_len: total length to fill in descriptors * @last_segmant: condition for the last descriptor + * @queue: TX queue index * Description: * This function fills descriptor and request new descriptors according to * buffer length to fill */ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, - int total_len, bool last_segment) + int total_len, bool last_segment, u32 queue) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct dma_desc *desc; - int tmp_len; u32 buff_size; + int tmp_len; tmp_len = total_len; while (tmp_len > 0) { - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); - desc = priv->dma_tx + priv->cur_tx; + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); + desc = tx_q->dma_tx + tx_q->cur_tx; desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? @@ -1986,23 +2760,28 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, */ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 pay_len, mss; - int tmp_pay_len = 0; + struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); int nfrags = skb_shinfo(skb)->nr_frags; + u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, des; - struct dma_desc *desc, *first, *mss_desc = NULL; + struct stmmac_tx_queue *tx_q; + int tmp_pay_len = 0; + u32 pay_len, mss; u8 proto_hdr_len; int i; + tx_q = &priv->tx_queue[queue]; + /* Compute header lengths */ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); /* Desc availability based on threshold should be enough safe */ - if (unlikely(stmmac_tx_avail(priv) < + if (unlikely(stmmac_tx_avail(priv, queue) < (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", @@ -2017,10 +2796,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* set new MSS value if needed */ if (mss != priv->mss) { - mss_desc = priv->dma_tx + priv->cur_tx; + mss_desc = tx_q->dma_tx + tx_q->cur_tx; priv->hw->desc->set_mss(mss_desc, mss); priv->mss = mss; - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); } if (netif_msg_tx_queued(priv)) { @@ -2030,9 +2809,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) skb->data_len); } - first_entry = priv->cur_tx; + first_entry = tx_q->cur_tx; - desc = priv->dma_tx + first_entry; + desc = tx_q->dma_tx + first_entry; first = desc; /* first descriptor: fill Headers on Buf1 */ @@ -2041,9 +2820,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; - priv->tx_skbuff_dma[first_entry].buf = des; - priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - priv->tx_skbuff[first_entry] = skb; + tx_q->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + tx_q->tx_skbuff[first_entry] = skb; first->des0 = cpu_to_le32(des); @@ -2054,7 +2833,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* If needed take extra descriptors to fill the remaining payload */ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); /* Prepare fragments */ for (i = 0; i < nfrags; i++) { @@ -2063,24 +2842,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) des = skb_frag_dma_map(priv->device, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; stmmac_tso_allocator(priv, des, skb_frag_size(frag), - (i == nfrags - 1)); + (i == nfrags - 1), queue); - priv->tx_skbuff_dma[priv->cur_tx].buf = des; - priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); - priv->tx_skbuff[priv->cur_tx] = NULL; - priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); + tx_q->tx_skbuff[tx_q->cur_tx] = NULL; + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; } - priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", __func__); - netif_stop_queue(dev); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } dev->stats.tx_bytes += skb->len; @@ -2112,7 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->desc->prepare_tso_tx_desc(first, 1, proto_hdr_len, pay_len, - 1, priv->tx_skbuff_dma[first_entry].last_segment, + 1, tx_q->tx_skbuff_dma[first_entry].last_segment, tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ @@ -2127,20 +2908,20 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (netif_msg_pktdata(priv)) { pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", - __func__, priv->cur_tx, priv->dirty_tx, first_entry, - priv->cur_tx, first, nfrags); + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, + tx_q->cur_tx, first, nfrags); - priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, + priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, 0); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb_headlen(skb)); } - netdev_sent_queue(dev, skb->len); + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, - STMMAC_CHAN0); + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, + queue); return NETDEV_TX_OK; @@ -2164,21 +2945,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; + u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; unsigned int entry, first_entry; struct dma_desc *desc, *first; + struct stmmac_tx_queue *tx_q; unsigned int enh_desc; unsigned int des; + tx_q = &priv->tx_queue[queue]; + /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { if (ip_hdr(skb)->protocol == IPPROTO_TCP) return stmmac_tso_xmit(skb, dev); } - if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { - if (!netif_queue_stopped(dev)) { - netif_stop_queue(dev); + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, + queue)); /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", @@ -2190,19 +2976,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); - entry = priv->cur_tx; + entry = tx_q->cur_tx; first_entry = entry; csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); if (likely(priv->extend_desc)) - desc = (struct dma_desc *)(priv->dma_etx + entry); + desc = (struct dma_desc *)(tx_q->dma_etx + entry); else - desc = priv->dma_tx + entry; + desc = tx_q->dma_tx + entry; first = desc; - priv->tx_skbuff[first_entry] = skb; + tx_q->tx_skbuff[first_entry] = skb; enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ @@ -2211,7 +2997,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(is_jumbo) && likely(priv->synopsys_id < DWMAC_CORE_4_00)) { - entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; } @@ -2224,48 +3010,49 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (likely(priv->extend_desc)) - desc = (struct dma_desc *)(priv->dma_etx + entry); + desc = (struct dma_desc *)(tx_q->dma_etx + entry); else - desc = priv->dma_tx + entry; + desc = tx_q->dma_tx + entry; des = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ - priv->tx_skbuff[entry] = NULL; + tx_q->tx_skbuff[entry] = NULL; - priv->tx_skbuff_dma[entry].buf = des; + tx_q->tx_skbuff_dma[entry].buf = des; if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) desc->des0 = cpu_to_le32(des); else desc->des2 = cpu_to_le32(des); - priv->tx_skbuff_dma[entry].map_as_page = true; - priv->tx_skbuff_dma[entry].len = len; - priv->tx_skbuff_dma[entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[entry].map_as_page = true; + tx_q->tx_skbuff_dma[entry].len = len; + tx_q->tx_skbuff_dma[entry].last_segment = last_segment; /* Prepare the descriptor and set the own bit too */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode, 1, last_segment); + priv->mode, 1, last_segment, + skb->len); } entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - priv->cur_tx = entry; + tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { void *tx_head; netdev_dbg(priv->dev, "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", - __func__, priv->cur_tx, priv->dirty_tx, first_entry, + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, entry, first, nfrags); if (priv->extend_desc) - tx_head = (void *)priv->dma_etx; + tx_head = (void *)tx_q->dma_etx; else - tx_head = (void *)priv->dma_tx; + tx_head = (void *)tx_q->dma_tx; priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); @@ -2273,10 +3060,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) print_pkt(skb->data, skb->len); } - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", __func__); - netif_stop_queue(dev); + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } dev->stats.tx_bytes += skb->len; @@ -2311,14 +3098,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; - priv->tx_skbuff_dma[first_entry].buf = des; + tx_q->tx_skbuff_dma[first_entry].buf = des; if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) first->des0 = cpu_to_le32(des); else first->des2 = cpu_to_le32(des); - priv->tx_skbuff_dma[first_entry].len = nopaged_len; - priv->tx_skbuff_dma[first_entry].last_segment = last_segment; + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { @@ -2330,7 +3117,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare the first descriptor setting the OWN bit too */ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, csum_insertion, priv->mode, 1, - last_segment); + last_segment, skb->len); /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that @@ -2339,13 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dma_wmb(); } - netdev_sent_queue(dev, skb->len); + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); if (priv->synopsys_id < DWMAC_CORE_4_00) priv->hw->dma->enable_dma_transmission(priv->ioaddr); else - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, - STMMAC_CHAN0); + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, + queue); return NETDEV_TX_OK; @@ -2373,9 +3160,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) } -static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) +static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) { - if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) + if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) return 0; return 1; @@ -2384,30 +3171,33 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure + * @queue: RX queue index * Description : this is to reallocate the skb for the reception process * that is based on zero-copy. */ -static inline void stmmac_rx_refill(struct stmmac_priv *priv) +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + int dirty = stmmac_rx_dirty(priv, queue); + unsigned int entry = rx_q->dirty_rx; + int bfsize = priv->dma_buf_sz; - unsigned int entry = priv->dirty_rx; - int dirty = stmmac_rx_dirty(priv); while (dirty-- > 0) { struct dma_desc *p; if (priv->extend_desc) - p = (struct dma_desc *)(priv->dma_erx + entry); + p = (struct dma_desc *)(rx_q->dma_erx + entry); else - p = priv->dma_rx + entry; + p = rx_q->dma_rx + entry; - if (likely(priv->rx_skbuff[entry] == NULL)) { + if (likely(!rx_q->rx_skbuff[entry])) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); if (unlikely(!skb)) { /* so for a while no zero-copy! */ - priv->rx_zeroc_thresh = STMMAC_RX_THRESH; + rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; if (unlikely(net_ratelimit())) dev_err(priv->device, "fail to alloc skb entry %d\n", @@ -2415,28 +3205,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) break; } - priv->rx_skbuff[entry] = skb; - priv->rx_skbuff_dma[entry] = + rx_q->rx_skbuff[entry] = skb; + rx_q->rx_skbuff_dma[entry] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, - priv->rx_skbuff_dma[entry])) { + rx_q->rx_skbuff_dma[entry])) { netdev_err(priv->dev, "Rx DMA map failed\n"); dev_kfree_skb(skb); break; } if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]); + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); p->des1 = 0; } else { - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]); + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); } if (priv->hw->mode->refill_desc3) - priv->hw->mode->refill_desc3(priv, p); + priv->hw->mode->refill_desc3(rx_q, p); - if (priv->rx_zeroc_thresh > 0) - priv->rx_zeroc_thresh--; + if (rx_q->rx_zeroc_thresh > 0) + rx_q->rx_zeroc_thresh--; netif_dbg(priv, rx_status, priv->dev, "refill entry #%d\n", entry); @@ -2452,31 +3242,33 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } - priv->dirty_rx = entry; + rx_q->dirty_rx = entry; } /** * stmmac_rx - manage the receive process * @priv: driver private structure - * @limit: napi bugget. + * @limit: napi bugget + * @queue: RX queue index. * Description : this the function called by the napi poll method. * It gets all the frames inside the ring. */ -static int stmmac_rx(struct stmmac_priv *priv, int limit) +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { - unsigned int entry = priv->cur_rx; + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + unsigned int entry = rx_q->cur_rx; + int coe = priv->hw->rx_csum; unsigned int next_entry; unsigned int count = 0; - int coe = priv->hw->rx_csum; if (netif_msg_rx_status(priv)) { void *rx_head; netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); if (priv->extend_desc) - rx_head = (void *)priv->dma_erx; + rx_head = (void *)rx_q->dma_erx; else - rx_head = (void *)priv->dma_rx; + rx_head = (void *)rx_q->dma_rx; priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } @@ -2486,9 +3278,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) struct dma_desc *np; if (priv->extend_desc) - p = (struct dma_desc *)(priv->dma_erx + entry); + p = (struct dma_desc *)(rx_q->dma_erx + entry); else - p = priv->dma_rx + entry; + p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ status = priv->hw->desc->rx_status(&priv->dev->stats, @@ -2499,20 +3291,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) count++; - priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); - next_entry = priv->cur_rx; + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); + next_entry = rx_q->cur_rx; if (priv->extend_desc) - np = (struct dma_desc *)(priv->dma_erx + next_entry); + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); else - np = priv->dma_rx + next_entry; + np = rx_q->dma_rx + next_entry; prefetch(np); if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, &priv->xstats, - priv->dma_erx + + rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; @@ -2522,9 +3314,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) * them in stmmac_rx_refill() function so that * device can reuse it. */ - priv->rx_skbuff[entry] = NULL; + rx_q->rx_skbuff[entry] = NULL; dma_unmap_single(priv->device, - priv->rx_skbuff_dma[entry], + rx_q->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); } @@ -2572,7 +3364,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) */ if (unlikely(!priv->plat->has_gmac4 && ((frame_len < priv->rx_copybreak) || - stmmac_rx_threshold_count(priv)))) { + stmmac_rx_threshold_count(rx_q)))) { skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { @@ -2584,21 +3376,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) } dma_sync_single_for_cpu(priv->device, - priv->rx_skbuff_dma + rx_q->rx_skbuff_dma [entry], frame_len, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, - priv-> + rx_q-> rx_skbuff[entry]->data, frame_len); skb_put(skb, frame_len); dma_sync_single_for_device(priv->device, - priv->rx_skbuff_dma + rx_q->rx_skbuff_dma [entry], frame_len, DMA_FROM_DEVICE); } else { - skb = priv->rx_skbuff[entry]; + skb = rx_q->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(priv->dev, "%s: Inconsistent Rx chain\n", @@ -2607,12 +3399,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) break; } prefetch(skb->data - NET_IP_ALIGN); - priv->rx_skbuff[entry] = NULL; - priv->rx_zeroc_thresh++; + rx_q->rx_skbuff[entry] = NULL; + rx_q->rx_zeroc_thresh++; skb_put(skb, frame_len); dma_unmap_single(priv->device, - priv->rx_skbuff_dma[entry], + rx_q->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); } @@ -2634,7 +3426,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) else skb->ip_summed = CHECKSUM_UNNECESSARY; - napi_gro_receive(&priv->napi, skb); + napi_gro_receive(&rx_q->napi, skb); priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@ -2642,7 +3434,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) entry = next_entry; } - stmmac_rx_refill(priv); + stmmac_rx_refill(priv, queue); priv->xstats.rx_pkt_n += count; @@ -2659,16 +3451,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) */ static int stmmac_poll(struct napi_struct *napi, int budget) { - struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); + struct stmmac_rx_queue *rx_q = + container_of(napi, struct stmmac_rx_queue, napi); + struct stmmac_priv *priv = rx_q->priv_data; + u32 tx_count = priv->plat->tx_queues_to_use; + u32 chan = rx_q->queue_index; int work_done = 0; + u32 queue; priv->xstats.napi_poll++; - stmmac_tx_clean(priv); - work_done = stmmac_rx(priv, budget); + /* check all the queues */ + for (queue = 0; queue < tx_count; queue++) + stmmac_tx_clean(priv, queue); + + work_done = stmmac_rx(priv, budget, rx_q->queue_index); if (work_done < budget) { napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv); + stmmac_enable_dma_irq(priv, chan); } return work_done; } @@ -2684,9 +3484,12 @@ static int stmmac_poll(struct napi_struct *napi, int budget) static void stmmac_tx_timeout(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_count = priv->plat->tx_queues_to_use; + u32 chan; /* Clear Tx resources and restart transmitting again */ - stmmac_tx_err(priv); + for (chan = 0; chan < tx_count; chan++) + stmmac_tx_err(priv, chan); } /** @@ -2795,6 +3598,12 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queues_count; + u32 queue; + + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; if (priv->irq_wake) pm_wakeup_event(priv->device, 0); @@ -2808,16 +3617,30 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); + if (unlikely(status)) { /* For LPI we need to save the tx status */ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) priv->tx_path_in_lpi_mode = true; if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; - if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr) - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, - priv->rx_tail_addr, - STMMAC_CHAN0); + } + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + for (queue = 0; queue < queues_count; queue++) { + struct stmmac_rx_queue *rx_q = + &priv->rx_queue[queue]; + + status |= + priv->hw->mac->host_mtl_irq_status(priv->hw, + queue); + + if (status & CORE_IRQ_MTL_RX_OVERFLOW && + priv->hw->dma->set_rx_tail_ptr) + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, + rx_q->rx_tail_addr, + queue); + } } /* PCS link status */ @@ -2915,17 +3738,40 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_count = priv->plat->rx_queues_to_use; + u32 tx_count = priv->plat->tx_queues_to_use; + u32 queue; - if (priv->extend_desc) { - seq_printf(seq, "Extended RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); - seq_printf(seq, "Extended TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); - } else { - seq_printf(seq, "RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); - seq_printf(seq, "TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + seq_printf(seq, "RX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_erx, + DMA_RX_SIZE, 1, seq); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)rx_q->dma_rx, + DMA_RX_SIZE, 0, seq); + } + } + + for (queue = 0; queue < tx_count; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + seq_printf(seq, "TX Queue %d:\n", queue); + + if (priv->extend_desc) { + seq_printf(seq, "Extended descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_etx, + DMA_TX_SIZE, 1, seq); + } else { + seq_printf(seq, "Descriptor ring:\n"); + sysfs_display_ring((void *)tx_q->dma_tx, + DMA_TX_SIZE, 0, seq); + } } return 0; @@ -3208,11 +4054,14 @@ int stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { - int ret = 0; struct net_device *ndev = NULL; struct stmmac_priv *priv; + int ret = 0; + u32 queue; - ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), + MTL_MAX_TX_QUEUES, + MTL_MAX_RX_QUEUES); if (!ndev) return -ENOMEM; @@ -3254,6 +4103,10 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + /* Configure real RX and TX queues */ + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); + ndev->netdev_ops = &stmmac_netdev_ops; ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -3303,7 +4156,12 @@ int stmmac_dvr_probe(struct device *device, "Enable RX Mitigation via HW Watchdog Timer\n"); } - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + netif_napi_add(ndev, &rx_q->napi, stmmac_poll, + (8 * priv->plat->rx_queues_to_use)); + } spin_lock_init(&priv->lock); @@ -3348,7 +4206,11 @@ int stmmac_dvr_probe(struct device *device, priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - netif_napi_del(&priv->napi); + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + netif_napi_del(&rx_q->napi); + } error_hw_init: free_netdev(ndev); @@ -3369,10 +4231,9 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); - priv->hw->dma->stop_rx(priv->ioaddr); - priv->hw->dma->stop_tx(priv->ioaddr); + stmmac_stop_all_dma(priv); - stmmac_set_mac(priv->ioaddr, false); + priv->hw->mac->set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) @@ -3411,20 +4272,19 @@ int stmmac_suspend(struct device *dev) spin_lock_irqsave(&priv->lock, flags); netif_device_detach(ndev); - netif_stop_queue(ndev); + stmmac_stop_all_queues(priv); - napi_disable(&priv->napi); + stmmac_disable_all_queues(priv); /* Stop TX/RX DMA */ - priv->hw->dma->stop_tx(priv->ioaddr); - priv->hw->dma->stop_rx(priv->ioaddr); + stmmac_stop_all_dma(priv); /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) { priv->hw->mac->pmt(priv->hw, priv->wolopts); priv->irq_wake = 1; } else { - stmmac_set_mac(priv->ioaddr, false); + priv->hw->mac->set_mac(priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); @@ -3439,6 +4299,31 @@ int stmmac_suspend(struct device *dev) } EXPORT_SYMBOL_GPL(stmmac_suspend); +/** + * stmmac_reset_queues_param - reset queue parameters + * @dev: device pointer + */ +static void stmmac_reset_queues_param(struct stmmac_priv *priv) +{ + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < rx_cnt; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; + } + + for (queue = 0; queue < tx_cnt; queue++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + tx_q->cur_tx = 0; + tx_q->dirty_tx = 0; + } +} + /** * stmmac_resume - resume callback * @dev: device pointer @@ -3479,10 +4364,8 @@ int stmmac_resume(struct device *dev) spin_lock_irqsave(&priv->lock, flags); - priv->cur_rx = 0; - priv->dirty_rx = 0; - priv->dirty_tx = 0; - priv->cur_tx = 0; + stmmac_reset_queues_param(priv); + /* reset private mss value to force mss context settings at * next tso xmit (only used for gmac4). */ @@ -3494,9 +4377,9 @@ int stmmac_resume(struct device *dev) stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); - napi_enable(&priv->napi); + stmmac_enable_all_queues(priv); - netif_start_queue(ndev); + stmmac_start_all_queues(priv); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 5c9e462276b9cbc25a8b5c2748988286fe17884f..39be9677914559e18829c64a4df0fb31e35b4de5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -32,6 +32,7 @@ */ struct stmmac_pci_dmi_data { const char *name; + const char *asset_tag; unsigned int func; int phy_addr; }; @@ -46,6 +47,7 @@ struct stmmac_pci_info { static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) { const char *name = dmi_get_system_info(DMI_BOARD_NAME); + const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG); unsigned int func = PCI_FUNC(info->pdev->devfn); struct stmmac_pci_dmi_data *dmi; @@ -57,8 +59,12 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) return 1; for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) { - if (!strcmp(dmi->name, name) && dmi->func == func) + if (!strcmp(dmi->name, name) && dmi->func == func) { + /* If asset tag is provided, match on it as well. */ + if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag)) + continue; return dmi->phy_addr; + } } return -ENODEV; @@ -88,6 +94,17 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) /* Set the maxmtu to a default of JUMBO_LEN */ plat->maxmtu = JUMBO_LEN; + + /* Set default number of RX and TX queues to use */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + + /* Disable Priority config by default */ + plat->tx_queues_cfg[0].use_prio = false; + plat->rx_queues_cfg[0].use_prio = false; + + /* Disable RX queues routing by default */ + plat->rx_queues_cfg[0].pkt_route = 0x0; } static int quark_default_data(struct plat_stmmacenet_data *plat, @@ -142,6 +159,24 @@ static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = { .func = 6, .phy_addr = 1, }, + { + .name = "SIMATIC IOT2000", + .asset_tag = "6ES7647-0AA00-0YA2", + .func = 6, + .phy_addr = 1, + }, + { + .name = "SIMATIC IOT2000", + .asset_tag = "6ES7647-0AA00-1YA2", + .func = 6, + .phy_addr = 1, + }, + { + .name = "SIMATIC IOT2000", + .asset_tag = "6ES7647-0AA00-1YA2", + .func = 7, + .phy_addr = 1, + }, {} }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 433a84239a687bab4ff0572978d7c0eaf849cb46..7fc3a1ef395ab2e99060355d1c47e5b5f1f9d9f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) if (!np) return NULL; - axi = kzalloc(sizeof(*axi), GFP_KERNEL); + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); if (!axi) { of_node_put(np); return ERR_PTR(-ENOMEM); @@ -131,6 +131,155 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) return axi; } +/** + * stmmac_mtl_setup - parse DT parameters for multiple queues configuration + * @pdev: platform device + */ +static void stmmac_mtl_setup(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + struct device_node *q_node; + struct device_node *rx_node; + struct device_node *tx_node; + u8 queue = 0; + + /* For backwards-compatibility with device trees that don't have any + * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back + * to one RX and TX queues each. + */ + plat->rx_queues_to_use = 1; + plat->tx_queues_to_use = 1; + + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); + if (!rx_node) + return; + + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); + if (!tx_node) { + of_node_put(rx_node); + return; + } + + /* Processing RX queues common config */ + if (of_property_read_u8(rx_node, "snps,rx-queues-to-use", + &plat->rx_queues_to_use)) + plat->rx_queues_to_use = 1; + + if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; + else + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; + + /* Processing individual RX queue config */ + for_each_child_of_node(rx_node, q_node) { + if (queue >= plat->rx_queues_to_use) + break; + + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + else if (of_property_read_bool(q_node, "snps,avb-algorithm")) + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + else + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + + if (of_property_read_u8(q_node, "snps,map-to-dma-channel", + &plat->rx_queues_cfg[queue].chan)) + plat->rx_queues_cfg[queue].chan = queue; + /* TODO: Dynamic mapping to be included in the future */ + + if (of_property_read_u32(q_node, "snps,priority", + &plat->rx_queues_cfg[queue].prio)) { + plat->rx_queues_cfg[queue].prio = 0; + plat->rx_queues_cfg[queue].use_prio = false; + } else { + plat->rx_queues_cfg[queue].use_prio = true; + } + + /* RX queue specific packet type routing */ + if (of_property_read_bool(q_node, "snps,route-avcp")) + plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; + else if (of_property_read_bool(q_node, "snps,route-ptp")) + plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; + else if (of_property_read_bool(q_node, "snps,route-dcbcp")) + plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; + else if (of_property_read_bool(q_node, "snps,route-up")) + plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; + else if (of_property_read_bool(q_node, "snps,route-multi-broad")) + plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; + else + plat->rx_queues_cfg[queue].pkt_route = 0x0; + + queue++; + } + + /* Processing TX queues common config */ + if (of_property_read_u8(tx_node, "snps,tx-queues-to-use", + &plat->tx_queues_to_use)) + plat->tx_queues_to_use = 1; + + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; + else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; + else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; + else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; + else + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; + + queue = 0; + + /* Processing individual TX queue config */ + for_each_child_of_node(tx_node, q_node) { + if (queue >= plat->tx_queues_to_use) + break; + + if (of_property_read_u8(q_node, "snps,weight", + &plat->tx_queues_cfg[queue].weight)) + plat->tx_queues_cfg[queue].weight = 0x10 + queue; + + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + } else if (of_property_read_bool(q_node, + "snps,avb-algorithm")) { + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + + /* Credit Base Shaper parameters used by AVB */ + if (of_property_read_u32(q_node, "snps,send_slope", + &plat->tx_queues_cfg[queue].send_slope)) + plat->tx_queues_cfg[queue].send_slope = 0x0; + if (of_property_read_u32(q_node, "snps,idle_slope", + &plat->tx_queues_cfg[queue].idle_slope)) + plat->tx_queues_cfg[queue].idle_slope = 0x0; + if (of_property_read_u32(q_node, "snps,high_credit", + &plat->tx_queues_cfg[queue].high_credit)) + plat->tx_queues_cfg[queue].high_credit = 0x0; + if (of_property_read_u32(q_node, "snps,low_credit", + &plat->tx_queues_cfg[queue].low_credit)) + plat->tx_queues_cfg[queue].low_credit = 0x0; + } else { + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; + } + + if (of_property_read_u32(q_node, "snps,priority", + &plat->tx_queues_cfg[queue].prio)) { + plat->tx_queues_cfg[queue].prio = 0; + plat->tx_queues_cfg[queue].use_prio = false; + } else { + plat->tx_queues_cfg[queue].use_prio = true; + } + + queue++; + } + + of_node_put(rx_node); + of_node_put(tx_node); + of_node_put(q_node); +} + /** * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources * @plat: driver data platform structure @@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->axi = stmmac_axi_setup(pdev); + stmmac_mtl_setup(pdev, plat); + /* clock setup */ plat->stmmac_clk = devm_clk_get(&pdev->dev, STMMAC_RESOURCE_NAME); @@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) clk_prepare_enable(plat->pclk); /* Fall-back to main clock in case of no PTP ref is passed */ - plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref"); + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); if (IS_ERR(plat->clk_ptp_ref)) { plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); plat->clk_ptp_ref = NULL; dev_warn(&pdev->dev, "PTP uses main clock\n"); } else { - clk_prepare_enable(plat->clk_ptp_ref); plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 0e8e89f17dbb1128c6b562b17ae736622b6cf45b..382993c1561c5c9b071138d0f8ca0def10743e83 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -691,7 +691,8 @@ static void cas_mif_poll(struct cas *cp, const int enable) } /* Must be invoked under cp->lock */ -static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) +static void cas_begin_auto_negotiation(struct cas *cp, + const struct ethtool_link_ksettings *ep) { u16 ctl; #if 1 @@ -704,16 +705,16 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) if (!ep) goto start_aneg; lcntl = cp->link_cntl; - if (ep->autoneg == AUTONEG_ENABLE) + if (ep->base.autoneg == AUTONEG_ENABLE) { cp->link_cntl = BMCR_ANENABLE; - else { - u32 speed = ethtool_cmd_speed(ep); + } else { + u32 speed = ep->base.speed; cp->link_cntl = 0; if (speed == SPEED_100) cp->link_cntl |= BMCR_SPEED100; else if (speed == SPEED_1000) cp->link_cntl |= CAS_BMCR_SPEED1000; - if (ep->duplex == DUPLEX_FULL) + if (ep->base.duplex == DUPLEX_FULL) cp->link_cntl |= BMCR_FULLDPLX; } #if 1 @@ -4528,19 +4529,21 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); } -static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int cas_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct cas *cp = netdev_priv(dev); u16 bmcr; int full_duplex, speed, pause; unsigned long flags; enum link_state linkstate = link_up; + u32 supported, advertising; - cmd->advertising = 0; - cmd->supported = SUPPORTED_Autoneg; + advertising = 0; + supported = SUPPORTED_Autoneg; if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { - cmd->supported |= SUPPORTED_1000baseT_Full; - cmd->advertising |= ADVERTISED_1000baseT_Full; + supported |= SUPPORTED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } /* Record PHY settings if HW is on. */ @@ -4548,17 +4551,15 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) bmcr = 0; linkstate = cp->lstate; if (CAS_PHY_MII(cp->phy_type)) { - cmd->port = PORT_MII; - cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? - XCVR_INTERNAL : XCVR_EXTERNAL; - cmd->phy_address = cp->phy_addr; - cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | + cmd->base.port = PORT_MII; + cmd->base.phy_address = cp->phy_addr; + advertising |= ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - cmd->supported |= + supported |= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | @@ -4574,11 +4575,10 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) } } else { - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_INTERNAL; - cmd->phy_address = 0; - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; if (cp->hw_running) { /* pcs uses the same bits as mii */ @@ -4590,21 +4590,20 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) spin_unlock_irqrestore(&cp->lock, flags); if (bmcr & BMCR_ANENABLE) { - cmd->advertising |= ADVERTISED_Autoneg; - cmd->autoneg = AUTONEG_ENABLE; - ethtool_cmd_speed_set(cmd, ((speed == 10) ? + advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.speed = ((speed == 10) ? SPEED_10 : ((speed == 1000) ? - SPEED_1000 : SPEED_100))); - cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + SPEED_1000 : SPEED_100)); + cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ? + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? - SPEED_100 : SPEED_10))); - cmd->duplex = - (bmcr & BMCR_FULLDPLX) ? + SPEED_100 : SPEED_10)); + cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } if (linkstate != link_up) { @@ -4619,39 +4618,46 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) * settings that we configured. */ if (cp->link_cntl & BMCR_ANENABLE) { - ethtool_cmd_speed_set(cmd, 0); - cmd->duplex = 0xff; + cmd->base.speed = 0; + cmd->base.duplex = 0xff; } else { - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; if (cp->link_cntl & BMCR_SPEED100) { - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; } - cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? + cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } } + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } -static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int cas_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct cas *cp = netdev_priv(dev); unsigned long flags; - u32 speed = ethtool_cmd_speed(cmd); + u32 speed = cmd->base.speed; /* Verify the settings we care about. */ - if (cmd->autoneg != AUTONEG_ENABLE && - cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE && + if (cmd->base.autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || - (cmd->duplex != DUPLEX_HALF && - cmd->duplex != DUPLEX_FULL))) + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ @@ -4753,8 +4759,6 @@ static void cas_get_ethtool_stats(struct net_device *dev, static const struct ethtool_ops cas_ethtool_ops = { .get_drvinfo = cas_get_drvinfo, - .get_settings = cas_get_settings, - .set_settings = cas_set_settings, .nway_reset = cas_nway_reset, .get_link = cas_get_link, .get_msglevel = cas_get_msglevel, @@ -4764,6 +4768,8 @@ static const struct ethtool_ops cas_ethtool_ops = { .get_sset_count = cas_get_sset_count, .get_strings = cas_get_strings, .get_ethtool_stats = cas_get_ethtool_stats, + .get_link_ksettings = cas_get_link_ksettings, + .set_link_ksettings = cas_set_link_ksettings, }; static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 89952deae47fc813b66c6d856f16dd648a803a0c..5a90fed0626065613ba59fa7c5f8ca3c2dff6ef3 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -1,6 +1,6 @@ /* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. * - * Copyright (C) 2016 Oracle. All rights reserved. + * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -41,8 +41,8 @@ static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; #define DRV_MODULE_NAME "ldmvsw" -#define DRV_MODULE_VERSION "1.1" -#define DRV_MODULE_RELDATE "February 3, 2017" +#define DRV_MODULE_VERSION "1.2" +#define DRV_MODULE_RELDATE "March 4, 2017" static char version[] = DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; @@ -123,6 +123,20 @@ static void vsw_set_rx_mode(struct net_device *dev) return sunvnet_set_rx_mode_common(dev, port->vp); } +int ldmvsw_open(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + struct vio_driver_state *vio = &port->vio; + + /* reset the channel */ + vio_link_state_change(vio, LDC_EVENT_RESET); + vnet_port_reset(port); + vio_port_up(vio); + + return 0; +} +EXPORT_SYMBOL_GPL(ldmvsw_open); + #ifdef CONFIG_NET_POLL_CONTROLLER static void vsw_poll_controller(struct net_device *dev) { @@ -133,7 +147,7 @@ static void vsw_poll_controller(struct net_device *dev) #endif static const struct net_device_ops vsw_ops = { - .ndo_open = sunvnet_open_common, + .ndo_open = ldmvsw_open, .ndo_stop = sunvnet_close_common, .ndo_set_rx_mode = vsw_set_rx_mode, .ndo_set_mac_address = sunvnet_set_mac_addr_common, @@ -365,6 +379,11 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) napi_enable(&port->napi); vio_port_up(&port->vio); + /* assure no carrier until we receive an LDC_EVENT_UP, + * even if the vsw config script tries to force us up + */ + netif_carrier_off(dev); + netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr); pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 57978056b3366f0ecb0410f96dbfbde228ea44f1..2dcca249eb9c732e48a563f22d4e98bc79429e24 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6813,7 +6813,8 @@ static void niu_get_drvinfo(struct net_device *dev, sizeof(info->bus_info)); } -static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int niu_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp; @@ -6821,28 +6822,30 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) lp = &np->link_config; memset(cmd, 0, sizeof(*cmd)); - cmd->phy_address = np->phy_addr; - cmd->supported = lp->supported; - cmd->advertising = lp->active_advertising; - cmd->autoneg = lp->active_autoneg; - ethtool_cmd_speed_set(cmd, lp->active_speed); - cmd->duplex = lp->active_duplex; - cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; - cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? - XCVR_EXTERNAL : XCVR_INTERNAL; + cmd->base.phy_address = np->phy_addr; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + lp->supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + lp->active_advertising); + cmd->base.autoneg = lp->active_autoneg; + cmd->base.speed = lp->active_speed; + cmd->base.duplex = lp->active_duplex; + cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; return 0; } -static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int niu_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp = &np->link_config; - lp->advertising = cmd->advertising; - lp->speed = ethtool_cmd_speed(cmd); - lp->duplex = cmd->duplex; - lp->autoneg = cmd->autoneg; + ethtool_convert_link_mode_to_legacy_u32(&lp->advertising, + cmd->link_modes.advertising); + lp->speed = cmd->base.speed; + lp->duplex = cmd->base.duplex; + lp->autoneg = cmd->base.autoneg; return niu_init_link(np); } @@ -7902,14 +7905,14 @@ static const struct ethtool_ops niu_ethtool_ops = { .nway_reset = niu_nway_reset, .get_eeprom_len = niu_get_eeprom_len, .get_eeprom = niu_get_eeprom, - .get_settings = niu_get_settings, - .set_settings = niu_set_settings, .get_strings = niu_get_strings, .get_sset_count = niu_get_sset_count, .get_ethtool_stats = niu_get_ethtool_stats, .set_phys_id = niu_set_phys_id, .get_rxnfc = niu_get_nfc, .set_rxnfc = niu_set_nfc, + .get_link_ksettings = niu_get_link_ksettings, + .set_link_ksettings = niu_set_link_ksettings, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index c4caf486cbeffee85ca7ca829ae3fef0d6ad9fe4..3189722110c262fc4a59c1fb67aef953500e0ff6 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -169,7 +169,7 @@ static void bigmac_stop(struct bigmac *bp) static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { - struct net_device_stats *stats = &bp->enet_stats; + struct net_device_stats *stats = &bp->dev->stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); @@ -774,8 +774,8 @@ static void bigmac_tx(struct bigmac *bp) if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; - bp->enet_stats.tx_packets++; - bp->enet_stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); @@ -811,12 +811,12 @@ static void bigmac_rx(struct bigmac *bp) /* Check for errors. */ if (len < ETH_ZLEN) { - bp->enet_stats.rx_errors++; - bp->enet_stats.rx_length_errors++; + bp->dev->stats.rx_errors++; + bp->dev->stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ - bp->enet_stats.rx_dropped++; + bp->dev->stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; @@ -875,8 +875,8 @@ static void bigmac_rx(struct bigmac *bp) /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); - bp->enet_stats.rx_packets++; - bp->enet_stats.rx_bytes += len; + bp->dev->stats.rx_packets++; + bp->dev->stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; @@ -987,7 +987,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev) struct bigmac *bp = netdev_priv(dev); bigmac_get_counters(bp, bp->bregs); - return &bp->enet_stats; + return &dev->stats; } static void bigmac_set_multicast(struct net_device *dev) diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h index 532fc56830cf319b3067b3caa6c8149f1ac115d6..ee56930475a8e5941c468b09d84da8dbf5314a94 100644 --- a/drivers/net/ethernet/sun/sunbmac.h +++ b/drivers/net/ethernet/sun/sunbmac.h @@ -311,7 +311,6 @@ struct bigmac { enum bigmac_timer_state timer_state; unsigned int timer_ticks; - struct net_device_stats enet_stats; struct platform_device *qec_op; struct platform_device *bigmac_op; struct net_device *dev; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 5c5952e782cd223c0aee844d414ee6c749c35804..fa607d062cb3130eff15295f61a8efce7c6a969c 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1250,12 +1250,18 @@ static void gem_stop_dma(struct gem *gp) // XXX dbl check what that function should do when called on PCS PHY -static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) +static void gem_begin_auto_negotiation(struct gem *gp, + const struct ethtool_link_ksettings *ep) { u32 advertise, features; int autoneg; int speed; int duplex; + u32 advertising; + + if (ep) + ethtool_convert_link_mode_to_legacy_u32( + &advertising, ep->link_modes.advertising); if (gp->phy_type != phy_mii_mdio0 && gp->phy_type != phy_mii_mdio1) @@ -1278,13 +1284,13 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) /* Setup link parameters */ if (!ep) goto start_aneg; - if (ep->autoneg == AUTONEG_ENABLE) { - advertise = ep->advertising; + if (ep->base.autoneg == AUTONEG_ENABLE) { + advertise = advertising; autoneg = 1; } else { autoneg = 0; - speed = ethtool_cmd_speed(ep); - duplex = ep->duplex; + speed = ep->base.speed; + duplex = ep->base.duplex; } start_aneg: @@ -2515,85 +2521,96 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); } -static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int gem_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); + u32 supported, advertising; if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { if (gp->phy_mii.def) - cmd->supported = gp->phy_mii.def->features; + supported = gp->phy_mii.def->features; else - cmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); /* XXX hardcoded stuff for now */ - cmd->port = PORT_MII; - cmd->transceiver = XCVR_EXTERNAL; - cmd->phy_address = 0; /* XXX fixed PHYAD */ + cmd->base.port = PORT_MII; + cmd->base.phy_address = 0; /* XXX fixed PHYAD */ /* Return current PHY settings */ - cmd->autoneg = gp->want_autoneg; - ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); - cmd->duplex = gp->phy_mii.duplex; - cmd->advertising = gp->phy_mii.advertising; + cmd->base.autoneg = gp->want_autoneg; + cmd->base.speed = gp->phy_mii.speed; + cmd->base.duplex = gp->phy_mii.duplex; + advertising = gp->phy_mii.advertising; /* If we started with a forced mode, we don't have a default * advertise set, we need to return something sensible so * userland can re-enable autoneg properly. */ - if (cmd->advertising == 0) - cmd->advertising = cmd->supported; + if (advertising == 0) + advertising = supported; } else { // XXX PCS ? - cmd->supported = + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg); - cmd->advertising = cmd->supported; - ethtool_cmd_speed_set(cmd, 0); - cmd->duplex = cmd->port = cmd->phy_address = - cmd->transceiver = cmd->autoneg = 0; + advertising = supported; + cmd->base.speed = 0; + cmd->base.duplex = 0; + cmd->base.port = 0; + cmd->base.phy_address = 0; + cmd->base.autoneg = 0; /* serdes means usually a Fibre connector, with most fixed */ if (gp->phy_type == phy_serdes) { - cmd->port = PORT_FIBRE; - cmd->supported = (SUPPORTED_1000baseT_Half | + cmd->base.port = PORT_FIBRE; + supported = (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause); - cmd->advertising = cmd->supported; - cmd->transceiver = XCVR_INTERNAL; + advertising = supported; if (gp->lstate == link_up) - ethtool_cmd_speed_set(cmd, SPEED_1000); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = 1; + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = 1; } } - cmd->maxtxpkt = cmd->maxrxpkt = 0; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int gem_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(cmd); + u32 speed = cmd->base.speed; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); /* Verify the settings we care about. */ - if (cmd->autoneg != AUTONEG_ENABLE && - cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE && - cmd->advertising == 0) + if (cmd->base.autoneg == AUTONEG_ENABLE && + advertising == 0) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE && + if (cmd->base.autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || - (cmd->duplex != DUPLEX_HALF && - cmd->duplex != DUPLEX_FULL))) + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ @@ -2666,13 +2683,13 @@ static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static const struct ethtool_ops gem_ethtool_ops = { .get_drvinfo = gem_get_drvinfo, .get_link = ethtool_op_get_link, - .get_settings = gem_get_settings, - .set_settings = gem_set_settings, .nway_reset = gem_nway_reset, .get_msglevel = gem_get_msglevel, .set_msglevel = gem_set_msglevel, .get_wol = gem_get_wol, .set_wol = gem_set_wol, + .get_link_ksettings = gem_get_link_ksettings, + .set_link_ksettings = gem_set_link_ksettings, }; static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 72ff05cd3ed80c883d2bc34a969e8bdb1b61992a..9e983e1d824984d5f1023478f2680eec41aeac53 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -933,7 +933,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) /* hp->happy_lock must be held */ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) { - struct net_device_stats *stats = &hp->net_stats; + struct net_device_stats *stats = &hp->dev->stats; stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); hme_write32(hp, bregs + BMAC_RCRCECTR, 0); @@ -1294,9 +1294,10 @@ static void happy_meal_init_rings(struct happy_meal *hp) } /* hp->happy_lock must be held */ -static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, - void __iomem *tregs, - struct ethtool_cmd *ep) +static void +happy_meal_begin_auto_negotiation(struct happy_meal *hp, + void __iomem *tregs, + const struct ethtool_link_ksettings *ep) { int timeout; @@ -1309,7 +1310,7 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); - if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { + if (!ep || ep->base.autoneg == AUTONEG_ENABLE) { /* Advertise everything we can support. */ if (hp->sw_bmsr & BMSR_10HALF) hp->sw_advertise |= (ADVERTISE_10HALF); @@ -1384,14 +1385,14 @@ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, /* Disable auto-negotiation in BMCR, enable the duplex and * speed setting, init the timer state machine, and fire it off. */ - if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { + if (!ep || ep->base.autoneg == AUTONEG_ENABLE) { hp->sw_bmcr = BMCR_SPEED100; } else { - if (ethtool_cmd_speed(ep) == SPEED_100) + if (ep->base.speed == SPEED_100) hp->sw_bmcr = BMCR_SPEED100; else hp->sw_bmcr = 0; - if (ep->duplex == DUPLEX_FULL) + if (ep->base.duplex == DUPLEX_FULL) hp->sw_bmcr |= BMCR_FULLDPLX; } happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); @@ -1856,7 +1857,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) if (status & GREG_STAT_TXLERR) printk("LateError "); if (status & GREG_STAT_TXPERR) - printk("ParityErro "); + printk("ParityError "); if (status & GREG_STAT_TXTERR) printk("TagBotch "); printk("]\n"); @@ -1946,7 +1947,7 @@ static void happy_meal_tx(struct happy_meal *hp) break; } hp->tx_skbs[elem] = NULL; - hp->net_stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { dma_addr = hme_read_desc32(hp, &this->tx_addr); @@ -1963,7 +1964,7 @@ static void happy_meal_tx(struct happy_meal *hp) } dev_kfree_skb_irq(skb); - hp->net_stats.tx_packets++; + dev->stats.tx_packets++; } hp->tx_old = elem; TXD((">")); @@ -2008,17 +2009,17 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) /* Check for errors. */ if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); - hp->net_stats.rx_errors++; + dev->stats.rx_errors++; if (len < ETH_ZLEN) - hp->net_stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (len & (RXFLAG_OVERFLOW >> 16)) { - hp->net_stats.rx_over_errors++; - hp->net_stats.rx_fifo_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: - hp->net_stats.rx_dropped++; + dev->stats.rx_dropped++; hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_addr); @@ -2083,8 +2084,8 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - hp->net_stats.rx_packets++; - hp->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; @@ -2395,7 +2396,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) happy_meal_get_counters(hp, hp->bigmacregs); spin_unlock_irq(&hp->happy_lock); - return &hp->net_stats; + return &dev->stats; } static void happy_meal_set_multicast(struct net_device *dev) @@ -2434,20 +2435,21 @@ static void happy_meal_set_multicast(struct net_device *dev) } /* Ethtool support... */ -static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int hme_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct happy_meal *hp = netdev_priv(dev); u32 speed; + u32 supported; - cmd->supported = + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); /* XXX hardcoded stuff for now */ - cmd->port = PORT_TP; /* XXX no MII support */ - cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ - cmd->phy_address = 0; /* XXX fixed PHYAD */ + cmd->base.port = PORT_TP; /* XXX no MII support */ + cmd->base.phy_address = 0; /* XXX fixed PHYAD */ /* Record PHY settings. */ spin_lock_irq(&hp->happy_lock); @@ -2456,41 +2458,45 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) spin_unlock_irq(&hp->happy_lock); if (hp->sw_bmcr & BMCR_ANENABLE) { - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); if (speed == SPEED_100) - cmd->duplex = + cmd->base.duplex = (hp->sw_lpa & (LPA_100FULL)) ? DUPLEX_FULL : DUPLEX_HALF; else - cmd->duplex = + cmd->base.duplex = (hp->sw_lpa & (LPA_10FULL)) ? DUPLEX_FULL : DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; - cmd->duplex = + cmd->base.duplex = (hp->sw_bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } - ethtool_cmd_speed_set(cmd, speed); + cmd->base.speed = speed; + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + return 0; } -static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int hme_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct happy_meal *hp = netdev_priv(dev); /* Verify the settings we care about. */ - if (cmd->autoneg != AUTONEG_ENABLE && - cmd->autoneg != AUTONEG_DISABLE) + if (cmd->base.autoneg != AUTONEG_ENABLE && + cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_DISABLE && - ((ethtool_cmd_speed(cmd) != SPEED_100 && - ethtool_cmd_speed(cmd) != SPEED_10) || - (cmd->duplex != DUPLEX_HALF && - cmd->duplex != DUPLEX_FULL))) + if (cmd->base.autoneg == AUTONEG_DISABLE && + ((cmd->base.speed != SPEED_100 && + cmd->base.speed != SPEED_10) || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Ok, do it to it. */ @@ -2537,10 +2543,10 @@ static u32 hme_get_link(struct net_device *dev) } static const struct ethtool_ops hme_ethtool_ops = { - .get_settings = hme_get_settings, - .set_settings = hme_set_settings, .get_drvinfo = hme_get_drvinfo, .get_link = hme_get_link, + .get_link_ksettings = hme_get_link_ksettings, + .set_link_ksettings = hme_set_link_ksettings, }; static int hme_version_printed; diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h index 4a8d5b18dfd5738d9254706b84174eb801f6c562..3af540adb3c58e83b4265c77fa436d7d44da06c7 100644 --- a/drivers/net/ethernet/sun/sunhme.h +++ b/drivers/net/ethernet/sun/sunhme.h @@ -418,8 +418,6 @@ struct happy_meal { int rx_new, tx_new, rx_old, tx_old; - struct net_device_stats net_stats; /* Statistical counters */ - #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) u32 (*read32)(void __iomem *); void (*write32)(void __iomem *, u32); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 4cc2571f71c6b65a076071789cef36f537bc0ddd..0b95105f706007ae9e0ff4325ea9983f9d514ee4 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1,7 +1,7 @@ /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller - * Copyright (C) 2016 Oracle. All rights reserved. + * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -77,11 +77,125 @@ static void vnet_set_msglevel(struct net_device *dev, u32 value) vp->msg_enable = value; } +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx_packets" }, + { "tx_packets" }, + { "rx_bytes" }, + { "tx_bytes" }, + { "rx_errors" }, + { "tx_errors" }, + { "rx_dropped" }, + { "tx_dropped" }, + { "multicast" }, + { "rx_length_errors" }, + { "rx_frame_errors" }, + { "rx_missed_errors" }, + { "tx_carrier_errors" }, + { "nports" }, +}; + +static int vnet_get_sset_count(struct net_device *dev, int sset) +{ + struct vnet *vp = (struct vnet *)netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys) + + (NUM_VNET_PORT_STATS * vp->nports); + default: + return -EOPNOTSUPP; + } +} + +static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct vnet *vp = (struct vnet *)netdev_priv(dev); + struct vnet_port *port; + char *p = (char *)buf; + + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + p += sizeof(ethtool_stats_keys); + + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { + snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM", + port->q_index, port->switch_port ? "s" : "q", + port->raddr); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets", + port->q_index); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets", + port->q_index); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes", + port->q_index); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes", + port->q_index); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.event_up", + port->q_index); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset", + port->q_index); + p += ETH_GSTRING_LEN; + } + rcu_read_unlock(); + break; + default: + WARN_ON(1); + break; + } +} + +static void vnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *data) +{ + struct vnet *vp = (struct vnet *)netdev_priv(dev); + struct vnet_port *port; + int i = 0; + + data[i++] = dev->stats.rx_packets; + data[i++] = dev->stats.tx_packets; + data[i++] = dev->stats.rx_bytes; + data[i++] = dev->stats.tx_bytes; + data[i++] = dev->stats.rx_errors; + data[i++] = dev->stats.tx_errors; + data[i++] = dev->stats.rx_dropped; + data[i++] = dev->stats.tx_dropped; + data[i++] = dev->stats.multicast; + data[i++] = dev->stats.rx_length_errors; + data[i++] = dev->stats.rx_frame_errors; + data[i++] = dev->stats.rx_missed_errors; + data[i++] = dev->stats.tx_carrier_errors; + data[i++] = vp->nports; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { + data[i++] = port->q_index; + data[i++] = port->stats.rx_packets; + data[i++] = port->stats.tx_packets; + data[i++] = port->stats.rx_bytes; + data[i++] = port->stats.tx_bytes; + data[i++] = port->stats.event_up; + data[i++] = port->stats.event_reset; + } + rcu_read_unlock(); +} + static const struct ethtool_ops vnet_ethtool_ops = { .get_drvinfo = vnet_get_drvinfo, .get_msglevel = vnet_get_msglevel, .set_msglevel = vnet_set_msglevel, .get_link = ethtool_op_get_link, + .get_sset_count = vnet_get_sset_count, + .get_strings = vnet_get_strings, + .get_ethtool_stats = vnet_get_ethtool_stats, }; static LIST_HEAD(vnet_list); diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index fa2d11ca9b81e49d84a0b66dbe262cd3977c0560..9e86833249d48beed95fe8c4d19774582f29dc75 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1,7 +1,7 @@ /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller - * Copyright (C) 2016 Oracle. All rights reserved. + * Copyright (C) 2016-2017 Oracle. All rights reserved. */ #include @@ -43,7 +43,6 @@ MODULE_LICENSE("GPL"); MODULE_VERSION("1.1"); static int __vnet_tx_trigger(struct vnet_port *port, u32 start); -static void vnet_port_reset(struct vnet_port *port); static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) { @@ -410,8 +409,12 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest))) + dev->stats.multicast++; dev->stats.rx_packets++; dev->stats.rx_bytes += len; + port->stats.rx_packets++; + port->stats.rx_bytes += len; napi_gro_receive(&port->napi, skb); return 0; @@ -747,6 +750,13 @@ static int vnet_event_napi(struct vnet_port *port, int budget) /* RESET takes precedent over any other event */ if (port->rx_event & LDC_EVENT_RESET) { + /* a link went down */ + + if (port->vsw == 1) { + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + } + vio_link_state_change(vio, LDC_EVENT_RESET); vnet_port_reset(port); vio_port_up(vio); @@ -762,12 +772,21 @@ static int vnet_event_napi(struct vnet_port *port, int budget) maybe_tx_wakeup(port); port->rx_event = 0; + port->stats.event_reset++; return 0; } if (port->rx_event & LDC_EVENT_UP) { + /* a link came up */ + + if (port->vsw == 1) { + netif_carrier_on(port->dev); + netif_tx_start_all_queues(port->dev); + } + vio_link_state_change(vio, LDC_EVENT_UP); port->rx_event = 0; + port->stats.event_up++; return 0; } @@ -1417,6 +1436,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, dev->stats.tx_packets++; dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; + port->stats.tx_packets++; + port->stats.tx_bytes += port->tx_bufs[txi].skb->len; dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 1)) { @@ -1631,7 +1652,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) } EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); -static void vnet_port_reset(struct vnet_port *port) +void vnet_port_reset(struct vnet_port *port) { del_timer(&port->clean_timer); sunvnet_port_free_tx_bufs_common(port); @@ -1639,6 +1660,7 @@ static void vnet_port_reset(struct vnet_port *port) port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ port->tsolen = 0; } +EXPORT_SYMBOL_GPL(vnet_port_reset); static int vnet_port_alloc_tx_ring(struct vnet_port *port) { @@ -1708,20 +1730,32 @@ EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); void sunvnet_port_add_txq_common(struct vnet_port *port) { struct vnet *vp = port->vp; - int n; + int smallest = 0; + int i; + + /* find the first least-used q + * When there are more ldoms than q's, we start to + * double up on ports per queue. + */ + for (i = 0; i < VNET_MAX_TXQS; i++) { + if (vp->q_used[i] == 0) { + smallest = i; + break; + } + if (vp->q_used[i] < vp->q_used[smallest]) + smallest = i; + } - n = vp->nports++; - n = n & (VNET_MAX_TXQS - 1); - port->q_index = n; - netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), - port->q_index)); + vp->nports++; + vp->q_used[smallest]++; + port->q_index = smallest; } EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); void sunvnet_port_rm_txq_common(struct vnet_port *port) { port->vp->nports--; - netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), - port->q_index)); + port->vp->q_used[port->q_index]--; + port->q_index = 0; } EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index ce5c824128a3698acb31ddbac42555657863b10f..b20d6fa7ef25b798401e24a59398c893a026c462 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -35,6 +35,19 @@ struct vnet_tx_entry { struct vnet; +struct vnet_port_stats { + /* keep them all the same size */ + u32 rx_bytes; + u32 tx_bytes; + u32 rx_packets; + u32 tx_packets; + u32 event_up; + u32 event_reset; + u32 q_placeholder; +}; + +#define NUM_VNET_PORT_STATS (sizeof(struct vnet_port_stats) / sizeof(u32)) + /* Structure to describe a vnet-port or vsw-port in the MD. * If the vsw bit is set, this structure represents a vswitch * port, and the net_device can be found from ->dev. If the @@ -44,6 +57,8 @@ struct vnet; struct vnet_port { struct vio_driver_state vio; + struct vnet_port_stats stats; + struct hlist_node hash; u8 raddr[ETH_ALEN]; unsigned switch_port:1; @@ -97,22 +112,15 @@ struct vnet_mcast_entry { }; struct vnet { - /* Protects port_list and port_hash. */ - spinlock_t lock; - + spinlock_t lock; /* Protects port_list and port_hash. */ struct net_device *dev; - u32 msg_enable; - + u8 q_used[VNET_MAX_TXQS]; struct list_head port_list; - struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; - struct vnet_mcast_entry *mcast_list; - struct list_head list; u64 local_mac; - int nports; }; @@ -139,6 +147,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg); void sunvnet_handshake_complete_common(struct vio_driver_state *vio); int sunvnet_poll_common(struct napi_struct *napi, int budget); void sunvnet_port_free_tx_bufs_common(struct vnet_port *port); +void vnet_port_reset(struct vnet_port *port); bool sunvnet_port_is_up_common(struct vnet_port *vnet); void sunvnet_port_add_txq_common(struct vnet_port *port); void sunvnet_port_rm_txq_common(struct vnet_port *port); diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..a9503884e1c2bea12320ce87894a0a549a035bb8 --- /dev/null +++ b/drivers/net/ethernet/synopsys/Kconfig @@ -0,0 +1,41 @@ +# +# Synopsys network device configuration +# + +config NET_VENDOR_SYNOPSYS + bool "Synopsys devices" + default y + ---help--- + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Synopsys devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_SYNOPSYS + +config DWC_XLGMAC + tristate "Synopsys DWC Enterprise Ethernet (XLGMAC) driver support" + depends on HAS_IOMEM && HAS_DMA + select BITREVERSE + select CRC32 + ---help--- + This driver supports the Synopsys DesignWare Cores Enterprise + Ethernet (dwc-xlgmac). + +if DWC_XLGMAC + +config DWC_XLGMAC_PCI + tristate "XLGMAC PCI bus support" + depends on DWC_XLGMAC && PCI + ---help--- + This selects the pci bus support for the dwc-xlgmac driver. + This driver was tested on Synopsys XLGMAC IP Prototyping Kit. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + +endif # DWC_XLGMAC + +endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0ad01916f11e4c60ec1fc88a16ecd241eb119c17 --- /dev/null +++ b/drivers/net/ethernet/synopsys/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Synopsys network device drivers. +# + +obj-$(CONFIG_DWC_XLGMAC) += dwc-xlgmac.o +dwc-xlgmac-objs := dwc-xlgmac-net.o dwc-xlgmac-desc.o \ + dwc-xlgmac-hw.o dwc-xlgmac-common.o \ + dwc-xlgmac-ethtool.o + +dwc-xlgmac-$(CONFIG_DWC_XLGMAC_PCI) += dwc-xlgmac-pci.o diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c new file mode 100644 index 0000000000000000000000000000000000000000..d655a4261e982921a291cbccd6ccae39827fee0f --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -0,0 +1,737 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include +#include + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +MODULE_LICENSE("Dual BSD/GPL"); + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)"); +static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP); + +static unsigned char dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; + +static void xlgmac_read_mac_addr(struct xlgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + + /* Currently it uses a static mac address for test */ + memcpy(pdata->mac_addr, dev_addr, netdev->addr_len); +} + +static void xlgmac_default_config(struct xlgmac_pdata *pdata) +{ + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->rx_sf_mode = MTL_RSF_DISABLE; + pdata->pblx8 = DMA_PBL_X8_ENABLE; + pdata->tx_pbl = DMA_PBL_32; + pdata->rx_pbl = DMA_PBL_32; + pdata->tx_threshold = MTL_TX_THRESHOLD_128; + pdata->rx_threshold = MTL_RX_THRESHOLD_128; + pdata->tx_pause = 1; + pdata->rx_pause = 1; + pdata->phy_speed = SPEED_25000; + pdata->sysclk_rate = XLGMAC_SYSCLOCK; + + strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name)); + strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); +} + +static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata) +{ + xlgmac_init_desc_ops(&pdata->desc_ops); + xlgmac_init_hw_ops(&pdata->hw_ops); +} + +static int xlgmac_init(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + unsigned int i; + int ret; + + /* Set default configuration data */ + xlgmac_default_config(pdata); + + /* Set irq, base_addr, MAC address, */ + netdev->irq = pdata->dev_irq; + netdev->base_addr = (unsigned long)pdata->mac_regs; + xlgmac_read_mac_addr(pdata); + memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + + /* Set all the function pointers */ + xlgmac_init_all_ops(pdata); + + /* Issue software reset to device */ + hw_ops->exit(pdata); + + /* Populate the hardware features */ + xlgmac_get_all_hw_features(pdata); + xlgmac_print_all_hw_features(pdata); + + /* TODO: Set the PHY mode to XLGMII */ + + /* Set the DMA mask */ + ret = dma_set_mask_and_coherent(pdata->dev, + DMA_BIT_MASK(pdata->hw_feat.dma_width)); + if (ret) { + dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); + return ret; + } + + /* Channel and ring params initializtion + * pdata->channel_count; + * pdata->tx_ring_count; + * pdata->rx_ring_count; + * pdata->tx_desc_count; + * pdata->rx_desc_count; + */ + BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_TX_DESC_CNT); + pdata->tx_desc_count = XLGMAC_TX_DESC_CNT; + if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { + dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n", + pdata->tx_desc_count); + ret = -EINVAL; + return ret; + } + BUILD_BUG_ON_NOT_POWER_OF_2(XLGMAC_RX_DESC_CNT); + pdata->rx_desc_count = XLGMAC_RX_DESC_CNT; + if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { + dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n", + pdata->rx_desc_count); + ret = -EINVAL; + return ret; + } + + pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), + pdata->hw_feat.tx_ch_cnt); + pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, + pdata->hw_feat.tx_q_cnt); + pdata->tx_q_count = pdata->tx_ring_count; + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); + if (ret) { + dev_err(pdata->dev, "error setting real tx queue count\n"); + return ret; + } + + pdata->rx_ring_count = min_t(unsigned int, + netif_get_num_default_rss_queues(), + pdata->hw_feat.rx_ch_cnt); + pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, + pdata->hw_feat.rx_q_cnt); + pdata->rx_q_count = pdata->rx_ring_count; + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count); + if (ret) { + dev_err(pdata->dev, "error setting real rx queue count\n"); + return ret; + } + + pdata->channel_count = + max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + /* Initialize RSS hash key and lookup table */ + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); + + for (i = 0; i < XLGMAC_RSS_MAX_TABLE_SIZE; i++) + pdata->rss_table[i] = XLGMAC_SET_REG_BITS( + pdata->rss_table[i], + MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + i % pdata->rx_ring_count); + + pdata->rss_options = XLGMAC_SET_REG_BITS( + pdata->rss_options, + MAC_RSSCR_IP2TE_POS, + MAC_RSSCR_IP2TE_LEN, 1); + pdata->rss_options = XLGMAC_SET_REG_BITS( + pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN, 1); + pdata->rss_options = XLGMAC_SET_REG_BITS( + pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN, 1); + + /* Set device operations */ + netdev->netdev_ops = xlgmac_get_netdev_ops(); + netdev->ethtool_ops = xlgmac_get_ethtool_ops(); + + /* Set device features */ + if (pdata->hw_feat.tso) { + netdev->hw_features = NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } else if (pdata->hw_feat.tx_coe) { + netdev->hw_features = NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } + + if (pdata->hw_feat.rx_coe) { + netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_GRO; + } + + if (pdata->hw_feat.rss) + netdev->hw_features |= NETIF_F_RXHASH; + + netdev->vlan_features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (pdata->hw_feat.sa_vlan_ins) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (pdata->hw_feat.vlhash) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features |= netdev->hw_features; + pdata->netdev_features = netdev->features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Use default watchdog timeout */ + netdev->watchdog_timeo = 0; + + /* Tx coalesce parameters initialization */ + pdata->tx_usecs = XLGMAC_INIT_DMA_TX_USECS; + pdata->tx_frames = XLGMAC_INIT_DMA_TX_FRAMES; + + /* Rx coalesce parameters initialization */ + pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, XLGMAC_INIT_DMA_RX_USECS); + pdata->rx_usecs = XLGMAC_INIT_DMA_RX_USECS; + pdata->rx_frames = XLGMAC_INIT_DMA_RX_FRAMES; + + return 0; +} + +int xlgmac_drv_probe(struct device *dev, struct xlgmac_resources *res) +{ + struct xlgmac_pdata *pdata; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct xlgmac_pdata), + XLGMAC_MAX_DMA_CHANNELS); + + if (!netdev) { + dev_err(dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(netdev, dev); + dev_set_drvdata(dev, netdev); + pdata = netdev_priv(netdev); + pdata->dev = dev; + pdata->netdev = netdev; + + pdata->dev_irq = res->irq; + pdata->mac_regs = res->addr; + + mutex_init(&pdata->rss_mutex); + pdata->msg_enable = netif_msg_init(debug, default_msg_level); + + ret = xlgmac_init(pdata); + if (ret) { + dev_err(dev, "xlgmac init failed\n"); + goto err_free_netdev; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "net device registration failed\n"); + goto err_free_netdev; + } + + return 0; + +err_free_netdev: + free_netdev(netdev); + + return ret; +} + +int xlgmac_drv_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + unsigned int idx, + unsigned int count, + unsigned int flag) +{ + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + + while (count--) { + desc_data = XLGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, "TX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), + le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); + + idx++; + } +} + +void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + unsigned int idx) +{ + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + + desc_data = XLGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", + idx, + le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), + le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); +} + +void xlgmac_print_pkt(struct net_device *netdev, + struct sk_buff *skb, bool tx_rx) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + unsigned char *buf = skb->data; + unsigned char buffer[128]; + unsigned int i, j; + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); + + netdev_dbg(netdev, "%s packet of %d bytes\n", + (tx_rx ? "TX" : "RX"), skb->len); + + netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); + netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); + netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + + for (i = 0, j = 0; i < skb->len;) { + j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", + buf[i++]); + + if ((i % 32) == 0) { + netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer); + j = 0; + } else if ((i % 16) == 0) { + buffer[j++] = ' '; + buffer[j++] = ' '; + } else if ((i % 4) == 0) { + buffer[j++] = ' '; + } + } + if (i % 32) + netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer); + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); +} + +void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_features *hw_feat = &pdata->hw_feat; + unsigned int mac_hfr0, mac_hfr1, mac_hfr2; + + mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R); + mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R); + mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = readl(pdata->mac_regs + MAC_VR); + + /* Hardware feature register 0 */ + hw_feat->phyifsel = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_PHYIFSEL_POS, + MAC_HWF0R_PHYIFSEL_LEN); + hw_feat->vlhash = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_VLHASH_POS, + MAC_HWF0R_VLHASH_LEN); + hw_feat->sma = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_SMASEL_POS, + MAC_HWF0R_SMASEL_LEN); + hw_feat->rwk = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_RWKSEL_POS, + MAC_HWF0R_RWKSEL_LEN); + hw_feat->mgk = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_MGKSEL_POS, + MAC_HWF0R_MGKSEL_LEN); + hw_feat->mmc = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_MMCSEL_POS, + MAC_HWF0R_MMCSEL_LEN); + hw_feat->aoe = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_ARPOFFSEL_POS, + MAC_HWF0R_ARPOFFSEL_LEN); + hw_feat->ts = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_TSSEL_POS, + MAC_HWF0R_TSSEL_LEN); + hw_feat->eee = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_EEESEL_POS, + MAC_HWF0R_EEESEL_LEN); + hw_feat->tx_coe = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_TXCOESEL_POS, + MAC_HWF0R_TXCOESEL_LEN); + hw_feat->rx_coe = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_RXCOESEL_POS, + MAC_HWF0R_RXCOESEL_LEN); + hw_feat->addn_mac = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_ADDMACADRSEL_POS, + MAC_HWF0R_ADDMACADRSEL_LEN); + hw_feat->ts_src = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_TSSTSSEL_POS, + MAC_HWF0R_TSSTSSEL_LEN); + hw_feat->sa_vlan_ins = XLGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_SAVLANINS_POS, + MAC_HWF0R_SAVLANINS_LEN); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_RXFIFOSIZE_POS, + MAC_HWF1R_RXFIFOSIZE_LEN); + hw_feat->tx_fifo_size = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_TXFIFOSIZE_POS, + MAC_HWF1R_TXFIFOSIZE_LEN); + hw_feat->adv_ts_hi = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_ADVTHWORD_POS, + MAC_HWF1R_ADVTHWORD_LEN); + hw_feat->dma_width = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_ADDR64_POS, + MAC_HWF1R_ADDR64_LEN); + hw_feat->dcb = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_DCBEN_POS, + MAC_HWF1R_DCBEN_LEN); + hw_feat->sph = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_SPHEN_POS, + MAC_HWF1R_SPHEN_LEN); + hw_feat->tso = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_TSOEN_POS, + MAC_HWF1R_TSOEN_LEN); + hw_feat->dma_debug = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_DBGMEMA_POS, + MAC_HWF1R_DBGMEMA_LEN); + hw_feat->rss = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_RSSEN_POS, + MAC_HWF1R_RSSEN_LEN); + hw_feat->tc_cnt = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_NUMTC_POS, + MAC_HWF1R_NUMTC_LEN); + hw_feat->hash_table_size = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_HASHTBLSZ_POS, + MAC_HWF1R_HASHTBLSZ_LEN); + hw_feat->l3l4_filter_num = XLGMAC_GET_REG_BITS(mac_hfr1, + MAC_HWF1R_L3L4FNUM_POS, + MAC_HWF1R_L3L4FNUM_LEN); + + /* Hardware feature register 2 */ + hw_feat->rx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_RXQCNT_POS, + MAC_HWF2R_RXQCNT_LEN); + hw_feat->tx_q_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_TXQCNT_POS, + MAC_HWF2R_TXQCNT_LEN); + hw_feat->rx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_RXCHCNT_POS, + MAC_HWF2R_RXCHCNT_LEN); + hw_feat->tx_ch_cnt = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_TXCHCNT_POS, + MAC_HWF2R_TXCHCNT_LEN); + hw_feat->pps_out_num = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_PPSOUTNUM_POS, + MAC_HWF2R_PPSOUTNUM_LEN); + hw_feat->aux_snap_num = XLGMAC_GET_REG_BITS(mac_hfr2, + MAC_HWF2R_AUXSNAPNUM_POS, + MAC_HWF2R_AUXSNAPNUM_LEN); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; +} + +void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata) +{ + char *str = NULL; + + XLGMAC_PR("\n"); + XLGMAC_PR("=====================================================\n"); + XLGMAC_PR("\n"); + XLGMAC_PR("HW support following features\n"); + XLGMAC_PR("\n"); + /* HW Feature Register0 */ + XLGMAC_PR("VLAN Hash Filter Selected : %s\n", + pdata->hw_feat.vlhash ? "YES" : "NO"); + XLGMAC_PR("SMA (MDIO) Interface : %s\n", + pdata->hw_feat.sma ? "YES" : "NO"); + XLGMAC_PR("PMT Remote Wake-up Packet Enable : %s\n", + pdata->hw_feat.rwk ? "YES" : "NO"); + XLGMAC_PR("PMT Magic Packet Enable : %s\n", + pdata->hw_feat.mgk ? "YES" : "NO"); + XLGMAC_PR("RMON/MMC Module Enable : %s\n", + pdata->hw_feat.mmc ? "YES" : "NO"); + XLGMAC_PR("ARP Offload Enabled : %s\n", + pdata->hw_feat.aoe ? "YES" : "NO"); + XLGMAC_PR("IEEE 1588-2008 Timestamp Enabled : %s\n", + pdata->hw_feat.ts ? "YES" : "NO"); + XLGMAC_PR("Energy Efficient Ethernet Enabled : %s\n", + pdata->hw_feat.eee ? "YES" : "NO"); + XLGMAC_PR("Transmit Checksum Offload Enabled : %s\n", + pdata->hw_feat.tx_coe ? "YES" : "NO"); + XLGMAC_PR("Receive Checksum Offload Enabled : %s\n", + pdata->hw_feat.rx_coe ? "YES" : "NO"); + XLGMAC_PR("Additional MAC Addresses 1-31 Selected : %s\n", + pdata->hw_feat.addn_mac ? "YES" : "NO"); + + switch (pdata->hw_feat.ts_src) { + case 0: + str = "RESERVED"; + break; + case 1: + str = "INTERNAL"; + break; + case 2: + str = "EXTERNAL"; + break; + case 3: + str = "BOTH"; + break; + } + XLGMAC_PR("Timestamp System Time Source : %s\n", str); + + XLGMAC_PR("Source Address or VLAN Insertion Enable : %s\n", + pdata->hw_feat.sa_vlan_ins ? "YES" : "NO"); + + /* HW Feature Register1 */ + switch (pdata->hw_feat.rx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + XLGMAC_PR("MTL Receive FIFO Size : %s\n", str); + + switch (pdata->hw_feat.tx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + XLGMAC_PR("MTL Transmit FIFO Size : %s\n", str); + + XLGMAC_PR("IEEE 1588 High Word Register Enable : %s\n", + pdata->hw_feat.adv_ts_hi ? "YES" : "NO"); + XLGMAC_PR("Address width : %u\n", + pdata->hw_feat.dma_width); + XLGMAC_PR("DCB Feature Enable : %s\n", + pdata->hw_feat.dcb ? "YES" : "NO"); + XLGMAC_PR("Split Header Feature Enable : %s\n", + pdata->hw_feat.sph ? "YES" : "NO"); + XLGMAC_PR("TCP Segmentation Offload Enable : %s\n", + pdata->hw_feat.tso ? "YES" : "NO"); + XLGMAC_PR("DMA Debug Registers Enabled : %s\n", + pdata->hw_feat.dma_debug ? "YES" : "NO"); + XLGMAC_PR("RSS Feature Enabled : %s\n", + pdata->hw_feat.rss ? "YES" : "NO"); + XLGMAC_PR("Number of Traffic classes : %u\n", + (pdata->hw_feat.tc_cnt)); + XLGMAC_PR("Hash Table Size : %u\n", + pdata->hw_feat.hash_table_size); + XLGMAC_PR("Total number of L3 or L4 Filters : %u\n", + pdata->hw_feat.l3l4_filter_num); + + /* HW Feature Register2 */ + XLGMAC_PR("Number of MTL Receive Queues : %u\n", + pdata->hw_feat.rx_q_cnt); + XLGMAC_PR("Number of MTL Transmit Queues : %u\n", + pdata->hw_feat.tx_q_cnt); + XLGMAC_PR("Number of DMA Receive Channels : %u\n", + pdata->hw_feat.rx_ch_cnt); + XLGMAC_PR("Number of DMA Transmit Channels : %u\n", + pdata->hw_feat.tx_ch_cnt); + + switch (pdata->hw_feat.pps_out_num) { + case 0: + str = "No PPS output"; + break; + case 1: + str = "1 PPS output"; + break; + case 2: + str = "2 PPS output"; + break; + case 3: + str = "3 PPS output"; + break; + case 4: + str = "4 PPS output"; + break; + default: + str = "RESERVED"; + } + XLGMAC_PR("Number of PPS Outputs : %s\n", str); + + switch (pdata->hw_feat.aux_snap_num) { + case 0: + str = "No auxiliary input"; + break; + case 1: + str = "1 auxiliary input"; + break; + case 2: + str = "2 auxiliary input"; + break; + case 3: + str = "3 auxiliary input"; + break; + case 4: + str = "4 auxiliary input"; + break; + default: + str = "RESERVED"; + } + XLGMAC_PR("Number of Auxiliary Snapshot Inputs : %s", str); + + XLGMAC_PR("\n"); + XLGMAC_PR("=====================================================\n"); + XLGMAC_PR("\n"); +} diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c new file mode 100644 index 0000000000000000000000000000000000000000..e9672b1f99680578af142c3ae25582fa2b88fc72 --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c @@ -0,0 +1,644 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata, + struct xlgmac_desc_data *desc_data) +{ + if (desc_data->skb_dma) { + if (desc_data->mapped_as_page) { + dma_unmap_page(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } else { + dma_unmap_single(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } + desc_data->skb_dma = 0; + desc_data->skb_dma_len = 0; + } + + if (desc_data->skb) { + dev_kfree_skb_any(desc_data->skb); + desc_data->skb = NULL; + } + + if (desc_data->rx.hdr.pa.pages) + put_page(desc_data->rx.hdr.pa.pages); + + if (desc_data->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma, + desc_data->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.hdr.pa_unmap.pages); + } + + if (desc_data->rx.buf.pa.pages) + put_page(desc_data->rx.buf.pa.pages); + + if (desc_data->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma, + desc_data->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.buf.pa_unmap.pages); + } + + memset(&desc_data->tx, 0, sizeof(desc_data->tx)); + memset(&desc_data->rx, 0, sizeof(desc_data->rx)); + + desc_data->mapped_as_page = 0; + + if (desc_data->state_saved) { + desc_data->state_saved = 0; + desc_data->state.skb = NULL; + desc_data->state.len = 0; + desc_data->state.error = 0; + } +} + +static void xlgmac_free_ring(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring) +{ + struct xlgmac_desc_data *desc_data; + unsigned int i; + + if (!ring) + return; + + if (ring->desc_data_head) { + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, i); + xlgmac_unmap_desc_data(pdata, desc_data); + } + + kfree(ring->desc_data_head); + ring->desc_data_head = NULL; + } + + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } + + if (ring->dma_desc_head) { + dma_free_coherent(pdata->dev, + (sizeof(struct xlgmac_dma_desc) * + ring->dma_desc_count), + ring->dma_desc_head, + ring->dma_desc_head_addr); + ring->dma_desc_head = NULL; + } +} + +static int xlgmac_init_ring(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + unsigned int dma_desc_count) +{ + if (!ring) + return 0; + + /* Descriptors */ + ring->dma_desc_count = dma_desc_count; + ring->dma_desc_head = dma_alloc_coherent(pdata->dev, + (sizeof(struct xlgmac_dma_desc) * + dma_desc_count), + &ring->dma_desc_head_addr, + GFP_KERNEL); + if (!ring->dma_desc_head) + return -ENOMEM; + + /* Array of descriptor data */ + ring->desc_data_head = kcalloc(dma_desc_count, + sizeof(struct xlgmac_desc_data), + GFP_KERNEL); + if (!ring->desc_data_head) + return -ENOMEM; + + netif_dbg(pdata, drv, pdata->netdev, + "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n", + ring->dma_desc_head, + &ring->dma_desc_head_addr, + ring->desc_data_head); + + return 0; +} + +static void xlgmac_free_rings(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + + if (!pdata->channel_head) + return; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + xlgmac_free_ring(pdata, channel->tx_ring); + xlgmac_free_ring(pdata, channel->rx_ring); + } +} + +static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + int ret; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", + channel->name); + + ret = xlgmac_init_ring(pdata, channel->tx_ring, + pdata->tx_desc_count); + + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Tx ring"); + goto err_init_ring; + } + + netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", + channel->name); + + ret = xlgmac_init_ring(pdata, channel->rx_ring, + pdata->rx_desc_count); + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Rx ring\n"); + goto err_init_ring; + } + } + + return 0; + +err_init_ring: + xlgmac_free_rings(pdata); + + return ret; +} + +static void xlgmac_free_channels(struct xlgmac_pdata *pdata) +{ + if (!pdata->channel_head) + return; + + kfree(pdata->channel_head->tx_ring); + pdata->channel_head->tx_ring = NULL; + + kfree(pdata->channel_head->rx_ring); + pdata->channel_head->rx_ring = NULL; + + kfree(pdata->channel_head); + + pdata->channel_head = NULL; + pdata->channel_count = 0; +} + +static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel_head, *channel; + struct xlgmac_ring *tx_ring, *rx_ring; + int ret = -ENOMEM; + unsigned int i; + + channel_head = kcalloc(pdata->channel_count, + sizeof(struct xlgmac_channel), GFP_KERNEL); + if (!channel_head) + return ret; + + netif_dbg(pdata, drv, pdata->netdev, + "channel_head=%p\n", channel_head); + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + for (i = 0, channel = channel_head; i < pdata->channel_count; + i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = pdata->mac_regs + DMA_CH_BASE + + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the per DMA interrupt */ + ret = pdata->channel_irq[i]; + if (ret < 0) { + netdev_err(pdata->netdev, + "get_irq %u failed\n", + i + 1); + goto err_irq; + } + channel->dma_irq = ret; + } + + if (i < pdata->tx_ring_count) + channel->tx_ring = tx_ring++; + + if (i < pdata->rx_ring_count) + channel->rx_ring = rx_ring++; + + netif_dbg(pdata, drv, pdata->netdev, + "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", + channel->name, channel->dma_regs, + channel->tx_ring, channel->rx_ring); + } + + pdata->channel_head = channel_head; + + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_head); + + return ret; +} + +static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata) +{ + xlgmac_free_rings(pdata); + + xlgmac_free_channels(pdata); +} + +static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata) +{ + int ret; + + ret = xlgmac_alloc_channels(pdata); + if (ret) + goto err_alloc; + + ret = xlgmac_alloc_rings(pdata); + if (ret) + goto err_alloc; + + return 0; + +err_alloc: + xlgmac_free_channels_and_rings(pdata); + + return ret; +} + +static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata, + struct xlgmac_page_alloc *pa, + gfp_t gfp, int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, + PAGE_SIZE << order, DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, pages_dma)) { + put_page(pages); + return -ENOMEM; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + +static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd, + struct xlgmac_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma_base = pa->pages_dma; + bd->dma_off = pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + struct xlgmac_desc_data *desc_data) +{ + int order, ret; + + if (!ring->rx_hdr_pa.pages) { + ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa, + GFP_ATOMIC, 0); + if (ret) + return ret; + } + + if (!ring->rx_buf_pa.pages) { + order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); + ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa, + GFP_ATOMIC, order); + if (ret) + return ret; + } + + /* Set up the header page info */ + xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa, + XLGMAC_SKB_ALLOC_SIZE); + + /* Set up the buffer page info */ + xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa, + pdata->rx_buf_size); + + return 0; +} + +static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + struct xlgmac_channel *channel; + struct xlgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + dma_desc++; + dma_desc_addr += sizeof(struct xlgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); + + hw_ops->tx_desc_init(channel); + } +} + +static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + struct xlgmac_channel *channel; + struct xlgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + if (xlgmac_map_rx_buffer(pdata, ring, desc_data)) + break; + + dma_desc++; + dma_desc_addr += sizeof(struct xlgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + + hw_ops->rx_desc_init(channel); + } +} + +static int xlgmac_map_tx_skb(struct xlgmac_channel *channel, + struct sk_buff *skb) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->tx_ring; + unsigned int start_index, cur_index; + struct xlgmac_desc_data *desc_data; + unsigned int offset, datalen, len; + struct xlgmac_pkt_info *pkt_info; + struct skb_frag_struct *frag; + unsigned int tso, vlan; + dma_addr_t skb_dma; + unsigned int i; + + offset = 0; + start_index = ring->cur; + cur_index = ring->cur; + + pkt_info = &ring->pkt_info; + pkt_info->desc_count = 0; + pkt_info->length = 0; + + tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + /* Save space for a context descriptor if needed */ + if ((tso && (pkt_info->mss != ring->tx.cur_mss)) || + (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))) + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + + if (tso) { + /* Map the TSO header */ + skb_dma = dma_map_single(pdata->dev, skb->data, + pkt_info->header_len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = pkt_info->header_len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb header: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, pkt_info->header_len); + + offset = pkt_info->header_len; + + pkt_info->length += pkt_info->header_len; + + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + } + + /* Map the (remainder of the) packet */ + for (datalen = skb_headlen(skb) - offset; datalen; ) { + len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE); + + skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, + DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb data: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "mapping frag %u\n", i); + + frag = &skb_shinfo(skb)->frags[i]; + offset = 0; + + for (datalen = skb_frag_size(frag); datalen; ) { + len = min_t(unsigned int, datalen, + XLGMAC_TX_MAX_BUF_SIZE); + + skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, + len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, + "skb_frag_dma_map failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + desc_data->mapped_as_page = 1; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb frag: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + } + } + + /* Save the skb address in the last entry. We always have some data + * that has been mapped so desc_data is always advanced past the last + * piece of mapped data - use the entry pointed to by cur_index - 1. + */ + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1); + desc_data->skb = skb; + + /* Save the number of descriptor entries used */ + pkt_info->desc_count = cur_index - start_index; + + return pkt_info->desc_count; + +err_out: + while (start_index < cur_index) { + desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++); + xlgmac_unmap_desc_data(pdata, desc_data); + } + + return 0; +} + +void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops) +{ + desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings; + desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings; + desc_ops->map_tx_skb = xlgmac_map_tx_skb; + desc_ops->map_rx_buffer = xlgmac_map_rx_buffer; + desc_ops->unmap_desc_data = xlgmac_unmap_desc_data; + desc_ops->tx_desc_init = xlgmac_tx_desc_init; + desc_ops->rx_desc_init = xlgmac_rx_desc_init; +} diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..fde722136869a142ec8f103a8189d684c45c8e49 --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c @@ -0,0 +1,275 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include +#include +#include + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +struct xlgmac_stats_desc { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define XLGMAC_STAT(str, var) \ + { \ + str, \ + offsetof(struct xlgmac_pdata, stats.var), \ + } + +static const struct xlgmac_stats_desc xlgmac_gstring_stats[] = { + /* MMC TX counters */ + XLGMAC_STAT("tx_bytes", txoctetcount_gb), + XLGMAC_STAT("tx_bytes_good", txoctetcount_g), + XLGMAC_STAT("tx_packets", txframecount_gb), + XLGMAC_STAT("tx_packets_good", txframecount_g), + XLGMAC_STAT("tx_unicast_packets", txunicastframes_gb), + XLGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + XLGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g), + XLGMAC_STAT("tx_multicast_packets", txmulticastframes_gb), + XLGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g), + XLGMAC_STAT("tx_vlan_packets_good", txvlanframes_g), + XLGMAC_STAT("tx_64_byte_packets", tx64octets_gb), + XLGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + XLGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + XLGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + XLGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + XLGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + XLGMAC_STAT("tx_underflow_errors", txunderflowerror), + XLGMAC_STAT("tx_pause_frames", txpauseframes), + + /* MMC RX counters */ + XLGMAC_STAT("rx_bytes", rxoctetcount_gb), + XLGMAC_STAT("rx_bytes_good", rxoctetcount_g), + XLGMAC_STAT("rx_packets", rxframecount_gb), + XLGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g), + XLGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g), + XLGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g), + XLGMAC_STAT("rx_vlan_packets", rxvlanframes_gb), + XLGMAC_STAT("rx_64_byte_packets", rx64octets_gb), + XLGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + XLGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + XLGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + XLGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + XLGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + XLGMAC_STAT("rx_undersize_packets_good", rxundersize_g), + XLGMAC_STAT("rx_oversize_packets_good", rxoversize_g), + XLGMAC_STAT("rx_crc_errors", rxcrcerror), + XLGMAC_STAT("rx_crc_errors_small_packets", rxrunterror), + XLGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + XLGMAC_STAT("rx_length_errors", rxlengtherror), + XLGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype), + XLGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + XLGMAC_STAT("rx_watchdog_errors", rxwatchdogerror), + XLGMAC_STAT("rx_pause_frames", rxpauseframes), + + /* Extra counters */ + XLGMAC_STAT("tx_tso_packets", tx_tso_packets), + XLGMAC_STAT("rx_split_header_packets", rx_split_header_packets), + XLGMAC_STAT("tx_process_stopped", tx_process_stopped), + XLGMAC_STAT("rx_process_stopped", rx_process_stopped), + XLGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable), + XLGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable), + XLGMAC_STAT("fatal_bus_error", fatal_bus_error), + XLGMAC_STAT("tx_vlan_packets", tx_vlan_packets), + XLGMAC_STAT("rx_vlan_packets", rx_vlan_packets), + XLGMAC_STAT("napi_poll_isr", napi_poll_isr), + XLGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer), +}; + +#define XLGMAC_STATS_COUNT ARRAY_SIZE(xlgmac_gstring_stats) + +static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + u32 ver = pdata->hw_feat.version; + u32 snpsver, devid, userver; + + strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(pdata->dev), + sizeof(drvinfo->bus_info)); + /* S|SNPSVER: Synopsys-defined Version + * D|DEVID: Indicates the Device family + * U|USERVER: User-defined Version + */ + snpsver = XLGMAC_GET_REG_BITS(ver, MAC_VR_SNPSVER_POS, + MAC_VR_SNPSVER_LEN); + devid = XLGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, + MAC_VR_DEVID_LEN); + userver = XLGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, + MAC_VR_USERVER_LEN); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "S.D.U: %x.%x.%x", snpsver, devid, userver); +} + +static u32 xlgmac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + + return pdata->msg_enable; +} + +static void xlgmac_ethtool_set_msglevel(struct net_device *netdev, + u32 msglevel) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + + pdata->msg_enable = msglevel; +} + +static void xlgmac_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *channel) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + + channel->max_rx = XLGMAC_MAX_DMA_CHANNELS; + channel->max_tx = XLGMAC_MAX_DMA_CHANNELS; + channel->rx_count = pdata->rx_q_count; + channel->tx_count = pdata->tx_q_count; +} + +static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + + memset(ec, 0, sizeof(struct ethtool_coalesce)); + ec->rx_coalesce_usecs = pdata->rx_usecs; + ec->rx_max_coalesced_frames = pdata->rx_frames; + ec->tx_max_coalesced_frames = pdata->tx_frames; + + return 0; +} + +static int xlgmac_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int rx_frames, rx_riwt, rx_usecs; + unsigned int tx_frames; + + /* Check for not supported parameters */ + if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || + (ec->stats_block_coalesce_usecs) || (ec->pkt_rate_low) || + (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || + (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_max_coalesced_frames_high) || + (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + rx_usecs = ec->rx_coalesce_usecs; + rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs); + rx_frames = ec->rx_max_coalesced_frames; + tx_frames = ec->tx_max_coalesced_frames; + + if ((rx_riwt > XLGMAC_MAX_DMA_RIWT) || + (rx_riwt < XLGMAC_MIN_DMA_RIWT) || + (rx_frames > pdata->rx_desc_count)) + return -EINVAL; + + if (tx_frames > pdata->tx_desc_count) + return -EINVAL; + + pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; + pdata->rx_frames = rx_frames; + hw_ops->config_rx_coalesce(pdata); + + pdata->tx_frames = tx_frames; + hw_ops->config_tx_coalesce(pdata); + + return 0; +} + +static void xlgmac_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < XLGMAC_STATS_COUNT; i++) { + memcpy(data, xlgmac_gstring_stats[i].stat_string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static int xlgmac_ethtool_get_sset_count(struct net_device *netdev, + int stringset) +{ + int ret; + + switch (stringset) { + case ETH_SS_STATS: + ret = XLGMAC_STATS_COUNT; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + u8 *stat; + int i; + + pdata->hw_ops.read_mmc_stats(pdata); + for (i = 0; i < XLGMAC_STATS_COUNT; i++) { + stat = (u8 *)pdata + xlgmac_gstring_stats[i].stat_offset; + *data++ = *(u64 *)stat; + } +} + +static const struct ethtool_ops xlgmac_ethtool_ops = { + .get_drvinfo = xlgmac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = xlgmac_ethtool_get_msglevel, + .set_msglevel = xlgmac_ethtool_set_msglevel, + .get_channels = xlgmac_ethtool_get_channels, + .get_coalesce = xlgmac_ethtool_get_coalesce, + .set_coalesce = xlgmac_ethtool_set_coalesce, + .get_strings = xlgmac_ethtool_get_strings, + .get_sset_count = xlgmac_ethtool_get_sset_count, + .get_ethtool_stats = xlgmac_ethtool_get_ethtool_stats, +}; + +const struct ethtool_ops *xlgmac_get_ethtool_ops(void) +{ + return &xlgmac_ethtool_ops; +} diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c new file mode 100644 index 0000000000000000000000000000000000000000..458a7844260aadfeabdeebe05ef42db155bd0287 --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c @@ -0,0 +1,3147 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include +#include +#include +#include +#include +#include + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +static int xlgmac_tx_complete(struct xlgmac_dma_desc *dma_desc) +{ + return !XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN); +} + +static int xlgmac_disable_rx_csum(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, + MAC_RCR_IPC_LEN, 0); + writel(regval, pdata->mac_regs + MAC_RCR); + + return 0; +} + +static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_IPC_POS, + MAC_RCR_IPC_LEN, 1); + writel(regval, pdata->mac_regs + MAC_RCR); + + return 0; +} + +static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0] << 0); + + writel(mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); + writel(mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); + + return 0; +} + +static void xlgmac_set_mac_reg(struct xlgmac_pdata *pdata, + struct netdev_hw_addr *ha, + unsigned int *mac_reg) +{ + unsigned int mac_addr_hi, mac_addr_lo; + u8 *mac_addr; + + mac_addr_lo = 0; + mac_addr_hi = 0; + + if (ha) { + mac_addr = (u8 *)&mac_addr_lo; + mac_addr[0] = ha->addr[0]; + mac_addr[1] = ha->addr[1]; + mac_addr[2] = ha->addr[2]; + mac_addr[3] = ha->addr[3]; + mac_addr = (u8 *)&mac_addr_hi; + mac_addr[0] = ha->addr[4]; + mac_addr[1] = ha->addr[5]; + + netif_dbg(pdata, drv, pdata->netdev, + "adding mac address %pM at %#x\n", + ha->addr, *mac_reg); + + mac_addr_hi = XLGMAC_SET_REG_BITS(mac_addr_hi, + MAC_MACA1HR_AE_POS, + MAC_MACA1HR_AE_LEN, + 1); + } + + writel(mac_addr_hi, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; + writel(mac_addr_lo, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; +} + +static int xlgmac_enable_rx_vlan_stripping(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_VLANTR); + /* Put the VLAN tag in the Rx descriptor */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, + MAC_VLANTR_EVLRXS_LEN, 1); + /* Don't check the VLAN type */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, + MAC_VLANTR_DOVLTC_LEN, 1); + /* Check only C-TAG (0x8100) packets */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, + MAC_VLANTR_ERSVLM_LEN, 0); + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, + MAC_VLANTR_ESVL_LEN, 0); + /* Enable VLAN tag stripping */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0x3); + writel(regval, pdata->mac_regs + MAC_VLANTR); + + return 0; +} + +static int xlgmac_disable_rx_vlan_stripping(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_VLANTR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0); + writel(regval, pdata->mac_regs + MAC_VLANTR); + + return 0; +} + +static int xlgmac_enable_rx_vlan_filtering(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_PFR); + /* Enable VLAN filtering */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, + MAC_PFR_VTFE_LEN, 1); + writel(regval, pdata->mac_regs + MAC_PFR); + + regval = readl(pdata->mac_regs + MAC_VLANTR); + /* Enable VLAN Hash Table filtering */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, + MAC_VLANTR_VTHM_LEN, 1); + /* Disable VLAN tag inverse matching */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, + MAC_VLANTR_VTIM_LEN, 0); + /* Only filter on the lower 12-bits of the VLAN tag */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, + MAC_VLANTR_ETV_LEN, 1); + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, 1); + writel(regval, pdata->mac_regs + MAC_VLANTR); + + return 0; +} + +static int xlgmac_disable_rx_vlan_filtering(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_PFR); + /* Disable VLAN filtering */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, + MAC_PFR_VTFE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_PFR); + + return 0; +} + +static u32 xlgmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 poly = 0xedb88320; + u32 crc = ~0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xlgmac_update_vlan_hash_table(struct xlgmac_pdata *pdata) +{ + u16 vlan_hash_table = 0; + __le16 vid_le; + u32 regval; + u32 crc; + u16 vid; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xlgmac_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + regval = readl(pdata->mac_regs + MAC_VLANHTR); + /* Set the VLAN Hash Table filtering register */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, + MAC_VLANHTR_VLHT_LEN, vlan_hash_table); + writel(regval, pdata->mac_regs + MAC_VLANHTR); + + return 0; +} + +static int xlgmac_set_promiscuous_mode(struct xlgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), + MAC_PFR_PR_POS, MAC_PFR_PR_LEN); + if (regval == val) + return 0; + + netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", + enable ? "entering" : "leaving"); + + regval = readl(pdata->mac_regs + MAC_PFR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, + MAC_PFR_PR_LEN, val); + writel(regval, pdata->mac_regs + MAC_PFR); + + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + xlgmac_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xlgmac_enable_rx_vlan_filtering(pdata); + } + + return 0; +} + +static int xlgmac_set_all_multicast_mode(struct xlgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_PFR), + MAC_PFR_PM_POS, MAC_PFR_PM_LEN); + if (regval == val) + return 0; + + netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", + enable ? "entering" : "leaving"); + + regval = readl(pdata->mac_regs + MAC_PFR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, + MAC_PFR_PM_LEN, val); + writel(regval, pdata->mac_regs + MAC_PFR); + + return 0; +} + +static void xlgmac_set_mac_addn_addrs(struct xlgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int addn_macs; + unsigned int mac_reg; + + mac_reg = MAC_MACA1HR; + addn_macs = pdata->hw_feat.addn_mac; + + if (netdev_uc_count(netdev) > addn_macs) { + xlgmac_set_promiscuous_mode(pdata, 1); + } else { + netdev_for_each_uc_addr(ha, netdev) { + xlgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + + if (netdev_mc_count(netdev) > addn_macs) { + xlgmac_set_all_multicast_mode(pdata, 1); + } else { + netdev_for_each_mc_addr(ha, netdev) { + xlgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + } + } + + /* Clear remaining additional MAC address entries */ + while (addn_macs--) + xlgmac_set_mac_reg(pdata, NULL, &mac_reg); +} + +static void xlgmac_set_mac_hash_table(struct xlgmac_pdata *pdata) +{ + unsigned int hash_table_shift, hash_table_count; + u32 hash_table[XLGMAC_MAC_HASH_TABLE_SIZE]; + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int hash_reg; + unsigned int i; + u32 crc; + + hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); + hash_table_count = pdata->hw_feat.hash_table_size / 32; + memset(hash_table, 0, sizeof(hash_table)); + + /* Build the MAC Hash Table register values */ + netdev_for_each_uc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); + } + + netdev_for_each_mc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); + } + + /* Set the MAC Hash Table registers */ + hash_reg = MAC_HTR0; + for (i = 0; i < hash_table_count; i++) { + writel(hash_table[i], pdata->mac_regs + hash_reg); + hash_reg += MAC_HTR_INC; + } +} + +static int xlgmac_add_mac_addresses(struct xlgmac_pdata *pdata) +{ + if (pdata->hw_feat.hash_table_size) + xlgmac_set_mac_hash_table(pdata); + else + xlgmac_set_mac_addn_addrs(pdata); + + return 0; +} + +static void xlgmac_config_mac_address(struct xlgmac_pdata *pdata) +{ + u32 regval; + + xlgmac_set_mac_address(pdata, pdata->netdev->dev_addr); + + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + regval = readl(pdata->mac_regs + MAC_PFR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, + MAC_PFR_HPF_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, + MAC_PFR_HUC_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, + MAC_PFR_HMC_LEN, 1); + writel(regval, pdata->mac_regs + MAC_PFR); + } +} + +static void xlgmac_config_jumbo_enable(struct xlgmac_pdata *pdata) +{ + unsigned int val; + u32 regval; + + val = (pdata->netdev->mtu > XLGMAC_STD_PACKET_MTU) ? 1 : 0; + + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_JE_POS, + MAC_RCR_JE_LEN, val); + writel(regval, pdata->mac_regs + MAC_RCR); +} + +static void xlgmac_config_checksum_offload(struct xlgmac_pdata *pdata) +{ + if (pdata->netdev->features & NETIF_F_RXCSUM) + xlgmac_enable_rx_csum(pdata); + else + xlgmac_disable_rx_csum(pdata); +} + +static void xlgmac_config_vlan_support(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_VLANIR); + /* Indicate that VLAN Tx CTAGs come from context descriptors */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, + MAC_VLANIR_CSVL_LEN, 0); + regval = XLGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, + MAC_VLANIR_VLTI_LEN, 1); + writel(regval, pdata->mac_regs + MAC_VLANIR); + + /* Set the current VLAN Hash Table register value */ + xlgmac_update_vlan_hash_table(pdata); + + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xlgmac_enable_rx_vlan_filtering(pdata); + else + xlgmac_disable_rx_vlan_filtering(pdata); + + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + xlgmac_enable_rx_vlan_stripping(pdata); + else + xlgmac_disable_rx_vlan_stripping(pdata); +} + +static int xlgmac_config_rx_mode(struct xlgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + unsigned int pr_mode, am_mode; + + pr_mode = ((netdev->flags & IFF_PROMISC) != 0); + am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + + xlgmac_set_promiscuous_mode(pdata, pr_mode); + xlgmac_set_all_multicast_mode(pdata, am_mode); + + xlgmac_add_mac_addresses(pdata); + + return 0; +} + +static void xlgmac_prepare_tx_stop(struct xlgmac_pdata *pdata, + struct xlgmac_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned long tx_timeout; + unsigned int tx_status; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, tx_timeout)) { + tx_status = readl(pdata->mac_regs + tx_dsr); + tx_status = XLGMAC_GET_REG_BITS(tx_status, tx_pos, + DMA_DSR_TPS_LEN); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + +static void xlgmac_enable_tx(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + /* Enable each Tx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 1); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + + /* Enable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, + MTL_Q_ENABLED); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Enable MAC Tx */ + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, + MAC_TCR_TE_LEN, 1); + writel(regval, pdata->mac_regs + MAC_TCR); +} + +static void xlgmac_disable_tx(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xlgmac_prepare_tx_stop(pdata, channel); + } + + /* Disable MAC Tx */ + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_TE_POS, + MAC_TCR_TE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_TCR); + + /* Disable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, 0); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Disable each Tx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 0); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +} + +static void xlgmac_prepare_rx_stop(struct xlgmac_pdata *pdata, + unsigned int queue) +{ + unsigned int rx_status, prxq, rxqsts; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = jiffies + (XLGMAC_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, rx_timeout)) { + rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); + prxq = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, + MTL_Q_RQDR_PRXQ_LEN); + rxqsts = XLGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, + MTL_Q_RQDR_RXQSTS_LEN); + if ((prxq == 0) && (rxqsts == 0)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +static void xlgmac_enable_rx(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int regval, i; + + /* Enable each Rx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 1); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + + /* Enable each Rx queue */ + regval = 0; + for (i = 0; i < pdata->rx_q_count; i++) + regval |= (0x02 << (i << 1)); + writel(regval, pdata->mac_regs + MAC_RQC0R); + + /* Enable MAC Rx */ + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, + MAC_RCR_DCRCC_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, + MAC_RCR_CST_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, + MAC_RCR_ACS_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, + MAC_RCR_RE_LEN, 1); + writel(regval, pdata->mac_regs + MAC_RCR); +} + +static void xlgmac_disable_rx(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + /* Disable MAC Rx */ + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_DCRCC_POS, + MAC_RCR_DCRCC_LEN, 0); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_CST_POS, + MAC_RCR_CST_LEN, 0); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_ACS_POS, + MAC_RCR_ACS_LEN, 0); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_RE_POS, + MAC_RCR_RE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_RCR); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) + xlgmac_prepare_rx_stop(pdata, i); + + /* Disable each Rx queue */ + writel(0, pdata->mac_regs + MAC_RQC0R); + + /* Disable each Rx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 0); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +} + +static void xlgmac_tx_start_xmit(struct xlgmac_channel *channel, + struct xlgmac_ring *ring) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_desc_data *desc_data; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor + */ + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); + writel(lower_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); + + /* Start the Tx timer */ + if (pdata->tx_usecs && !channel->tx_timer_active) { + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } + + ring->tx.xmit_more = 0; +} + +static void xlgmac_dev_xmit(struct xlgmac_channel *channel) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->tx_ring; + unsigned int tso_context, vlan_context; + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + struct xlgmac_pkt_info *pkt_info; + unsigned int csum, tso, vlan; + int start_index = ring->cur; + int cur_index = ring->cur; + unsigned int tx_set_ic; + int i; + + pkt_info = &ring->pkt_info; + csum = XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); + tso = XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + if (tso && (pkt_info->mss != ring->tx.cur_mss)) + tso_context = 1; + else + tso_context = 0; + + if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) + vlan_context = 1; + else + vlan_context = 0; + + /* Determine if an interrupt should be generated for this Tx: + * Interrupt: + * - Tx frame count exceeds the frame count setting + * - Addition of Tx frame count to the frame count since the + * last interrupt was set exceeds the frame count setting + * No interrupt: + * - No frame count setting specified (ethtool -C ethX tx-frames 0) + * - Addition of Tx frame count to the frame count since the + * last interrupt was set does not exceed the frame count setting + */ + ring->coalesce_count += pkt_info->tx_packets; + if (!pdata->tx_frames) + tx_set_ic = 0; + else if (pkt_info->tx_packets > pdata->tx_frames) + tx_set_ic = 1; + else if ((ring->coalesce_count % pdata->tx_frames) < + pkt_info->tx_packets) + tx_set_ic = 1; + else + tx_set_ic = 0; + + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Create a context descriptor if this is a TSO pkt_info */ + if (tso_context || vlan_context) { + if (tso_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "TSO context descriptor, mss=%u\n", + pkt_info->mss); + + /* Set the MSS size */ + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_CONTEXT_DESC2_MSS_POS, + TX_CONTEXT_DESC2_MSS_LEN, + pkt_info->mss); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, + 1); + + /* Indicate this descriptor contains the MSS */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_CONTEXT_DESC3_TCMSSV_POS, + TX_CONTEXT_DESC3_TCMSSV_LEN, + 1); + + ring->tx.cur_mss = pkt_info->mss; + } + + if (vlan_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + pkt_info->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, + 1); + + /* Set the VLAN tag */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_CONTEXT_DESC3_VT_POS, + TX_CONTEXT_DESC3_VT_LEN, + pkt_info->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_CONTEXT_DESC3_VLTV_POS, + TX_CONTEXT_DESC3_VLTV_LEN, + 1); + + ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; + } + + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + } + + /* Update buffer address (for TSO this is the header) */ + dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) { + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_NORMAL_DESC2_VTIR_POS, + TX_NORMAL_DESC2_VTIR_LEN, + TX_NORMAL_DESC2_VLAN_INSERT); + pdata->stats.tx_vlan_packets++; + } + + /* Timestamp enablement check */ + if (XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_PTP_POS, + TX_PACKET_ATTRIBUTES_PTP_LEN)) + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_NORMAL_DESC2_TTSE_POS, + TX_NORMAL_DESC2_TTSE_LEN, + 1); + + /* Mark it as First Descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_FD_POS, + TX_NORMAL_DESC3_FD_LEN, + 1); + + /* Mark it as a NORMAL descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, + 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, + 1); + + if (tso) { + /* Enable TSO */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_TSE_POS, + TX_NORMAL_DESC3_TSE_LEN, 1); + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_TCPPL_POS, + TX_NORMAL_DESC3_TCPPL_LEN, + pkt_info->tcp_payload_len); + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_TCPHDRLEN_POS, + TX_NORMAL_DESC3_TCPHDRLEN_LEN, + pkt_info->tcp_header_len / 4); + + pdata->stats.tx_tso_packets++; + } else { + /* Enable CRC and Pad Insertion */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_CPC_POS, + TX_NORMAL_DESC3_CPC_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, + 0x3); + + /* Set the total length to be transmitted */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_FL_POS, + TX_NORMAL_DESC3_FL_LEN, + pkt_info->length); + } + + for (i = cur_index - start_index + 1; i < pkt_info->desc_count; i++) { + cur_index++; + desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Update buffer address */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* Set OWN bit */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + /* Mark it as NORMAL descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, + 0x3); + } + + /* Set LAST bit for the last descriptor */ + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN, 1); + + /* Set IC bit based on Tx coalescing settings */ + if (tx_set_ic) + dma_desc->desc2 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc2, + TX_NORMAL_DESC2_IC_POS, + TX_NORMAL_DESC2_IC_LEN, 1); + + /* Save the Tx info to report back during cleanup */ + desc_data->tx.packets = pkt_info->tx_packets; + desc_data->tx.bytes = pkt_info->tx_bytes; + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); + dma_desc = desc_data->dma_desc; + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (netif_msg_tx_queued(pdata)) + xlgmac_dump_tx_desc(pdata, ring, start_index, + pkt_info->desc_count, 1); + + /* Make sure ownership is written to the descriptor */ + smp_wmb(); + + ring->cur = cur_index + 1; + if (!pkt_info->skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, + channel->queue_index))) + xlgmac_tx_start_xmit(channel, ring); + else + ring->tx.xmit_more = 1; + + XLGMAC_PR("%s: descriptors %u to %u written\n", + channel->name, start_index & (ring->dma_desc_count - 1), + (ring->cur - 1) & (ring->dma_desc_count - 1)); +} + +static void xlgmac_get_rx_tstamp(struct xlgmac_pkt_info *pkt_info, + struct xlgmac_dma_desc *dma_desc) +{ + u32 tsa, tsd; + u64 nsec; + + tsa = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_CONTEXT_DESC3_TSA_POS, + RX_CONTEXT_DESC3_TSA_LEN); + tsd = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_CONTEXT_DESC3_TSD_POS, + RX_CONTEXT_DESC3_TSD_LEN); + if (tsa && !tsd) { + nsec = le32_to_cpu(dma_desc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(dma_desc->desc0); + if (nsec != 0xffffffffffffffffULL) { + pkt_info->rx_tstamp = nsec; + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, + 1); + } + } +} + +static void xlgmac_tx_desc_reset(struct xlgmac_desc_data *desc_data) +{ + struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + dma_desc->desc0 = 0; + dma_desc->desc1 = 0; + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void xlgmac_tx_desc_init(struct xlgmac_channel *channel) +{ + struct xlgmac_ring *ring = channel->tx_ring; + struct xlgmac_desc_data *desc_data; + int start_index = ring->cur; + int i; + + /* Initialze all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + xlgmac_tx_desc_reset(desc_data); + } + + /* Update the total number of Tx descriptors */ + writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); + writel(upper_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); + writel(lower_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); +} + +static void xlgmac_rx_desc_reset(struct xlgmac_pdata *pdata, + struct xlgmac_desc_data *desc_data, + unsigned int index) +{ + struct xlgmac_dma_desc *dma_desc = desc_data->dma_desc; + unsigned int rx_frames = pdata->rx_frames; + unsigned int rx_usecs = pdata->rx_usecs; + dma_addr_t hdr_dma, buf_dma; + unsigned int inte; + + if (!rx_usecs && !rx_frames) { + /* No coalescing, interrupt for every descriptor */ + inte = 1; + } else { + /* Set interrupt based on Rx frame coalescing setting */ + if (rx_frames && !((index + 1) % rx_frames)) + inte = 1; + else + inte = 0; + } + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ + hdr_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; + buf_dma = desc_data->rx.buf.dma_base + desc_data->rx.buf.dma_off; + dma_desc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); + dma_desc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); + dma_desc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); + + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_INTE_POS, + RX_NORMAL_DESC3_INTE_LEN, + inte); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + dma_desc->desc3 = XLGMAC_SET_REG_BITS_LE( + dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN, + 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void xlgmac_rx_desc_init(struct xlgmac_channel *channel) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->rx_ring; + unsigned int start_index = ring->cur; + struct xlgmac_desc_data *desc_data; + unsigned int i; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + xlgmac_rx_desc_reset(pdata, desc_data, i); + } + + /* Update the total number of Rx descriptors */ + writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = XLGMAC_GET_DESC_DATA(ring, start_index); + writel(upper_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); + writel(lower_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); + + /* Update the Rx Descriptor Tail Pointer */ + desc_data = XLGMAC_GET_DESC_DATA(ring, start_index + + ring->dma_desc_count - 1); + writel(lower_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static int xlgmac_is_context_desc(struct xlgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ + return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN); +} + +static int xlgmac_is_last_desc(struct xlgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share LD bit, so check TDES3.LD bit */ + return XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN); +} + +static int xlgmac_disable_tx_flow_control(struct xlgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 0); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Clear MAC flow control */ + max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readl(pdata->mac_regs + reg); + regval = XLGMAC_SET_REG_BITS(regval, + MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, + 0); + writel(regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int xlgmac_enable_tx_flow_control(struct xlgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 1); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Set MAC flow control */ + max_q_count = XLGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readl(pdata->mac_regs + reg); + + /* Enable transmit flow control */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, 1); + /* Set pause time */ + regval = XLGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, + MAC_Q0TFCR_PT_LEN, 0xffff); + + writel(regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int xlgmac_disable_rx_flow_control(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_RFCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, + MAC_RFCR_RFE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int xlgmac_enable_rx_flow_control(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MAC_RFCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, + MAC_RFCR_RFE_LEN, 1); + writel(regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int xlgmac_config_tx_flow_control(struct xlgmac_pdata *pdata) +{ + if (pdata->tx_pause) + xlgmac_enable_tx_flow_control(pdata); + else + xlgmac_disable_tx_flow_control(pdata); + + return 0; +} + +static int xlgmac_config_rx_flow_control(struct xlgmac_pdata *pdata) +{ + if (pdata->rx_pause) + xlgmac_enable_rx_flow_control(pdata); + else + xlgmac_disable_rx_flow_control(pdata); + + return 0; +} + +static int xlgmac_config_rx_coalesce(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, + DMA_CH_RIWT_RWT_LEN, + pdata->rx_riwt); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RIWT)); + } + + return 0; +} + +static void xlgmac_config_flow_control(struct xlgmac_pdata *pdata) +{ + xlgmac_config_tx_flow_control(pdata); + xlgmac_config_rx_flow_control(pdata); +} + +static void xlgmac_config_rx_fep_enable(struct xlgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, + MTL_Q_RQOMR_FEP_LEN, 1); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static void xlgmac_config_rx_fup_enable(struct xlgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, + MTL_Q_RQOMR_FUP_LEN, 1); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static int xlgmac_config_tx_coalesce(struct xlgmac_pdata *pdata) +{ + return 0; +} + +static void xlgmac_config_rx_buffer_size(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, + DMA_CH_RCR_RBSZ_LEN, + pdata->rx_buf_size); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +} + +static void xlgmac_config_tso_mode(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + if (pdata->hw_feat.tso) { + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, + DMA_CH_TCR_TSE_LEN, 1); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + } +} + +static void xlgmac_config_sph_mode(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, + DMA_CH_CR_SPH_LEN, 1); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); + } + + regval = readl(pdata->mac_regs + MAC_RCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RCR_HDSMS_POS, + MAC_RCR_HDSMS_LEN, + XLGMAC_SPH_HDSMS_SIZE); + writel(regval, pdata->mac_regs + MAC_RCR); +} + +static unsigned int xlgmac_usec_to_riwt(struct xlgmac_pdata *pdata, + unsigned int usec) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input usec value to the watchdog timer value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( usec * ( system_clock_mhz / 10^6 ) / 256 + */ + ret = (usec * (rate / 1000000)) / 256; + + return ret; +} + +static unsigned int xlgmac_riwt_to_usec(struct xlgmac_pdata *pdata, + unsigned int riwt) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input watchdog timer value to the usec value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) + */ + ret = (riwt * 256) / (rate / 1000000); + + return ret; +} + +static int xlgmac_config_rx_threshold(struct xlgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, + MTL_Q_RQOMR_RTC_LEN, val); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static void xlgmac_config_mtl_mode(struct xlgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + /* Set Tx to weighted round robin scheduling algorithm */ + regval = readl(pdata->mac_regs + MTL_OMR); + regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, + MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); + writel(regval, pdata->mac_regs + MTL_OMR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_ETSCR_TSA_POS, + MTL_TC_ETSCR_TSA_LEN, MTL_TSA_ETS); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_ETSCR)); + + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, + MTL_TC_QWR_QW_LEN, 1); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + } + + /* Set Rx to strict priority algorithm */ + regval = readl(pdata->mac_regs + MTL_OMR); + regval = XLGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, + MTL_OMR_RAA_LEN, MTL_RAA_SP); + writel(regval, pdata->mac_regs + MTL_OMR); +} + +static void xlgmac_config_queue_mapping(struct xlgmac_pdata *pdata) +{ + unsigned int ppq, ppq_extra, prio, prio_queues; + unsigned int qptc, qptc_extra, queue; + unsigned int reg, regval; + unsigned int mask; + unsigned int i, j; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) { + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); + regval = readl(XLGMAC_MTL_REG(pdata, queue, + MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, + MTL_Q_TQOMR_Q2TCMAP_POS, + MTL_Q_TQOMR_Q2TCMAP_LEN, + i); + writel(regval, XLGMAC_MTL_REG(pdata, queue, + MTL_Q_TQOMR)); + queue++; + } + + if (i < qptc_extra) { + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); + regval = readl(XLGMAC_MTL_REG(pdata, queue, + MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, + MTL_Q_TQOMR_Q2TCMAP_POS, + MTL_Q_TQOMR_Q2TCMAP_LEN, + i); + writel(regval, XLGMAC_MTL_REG(pdata, queue, + MTL_Q_TQOMR)); + queue++; + } + } + + /* Map the 8 VLAN priority values to available MTL Rx queues */ + prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, + pdata->rx_q_count); + ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; + ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; + + reg = MAC_RQC2R; + regval = 0; + for (i = 0, prio = 0; i < prio_queues;) { + mask = 0; + for (j = 0; j < ppq; j++) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + if (i < ppq_extra) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); + + if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) + continue; + + writel(regval, pdata->mac_regs + reg); + reg += MAC_RQC2_INC; + regval = 0; + } + + /* Configure one to one, MTL Rx queue to DMA Rx channel mapping + * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 + */ + reg = MTL_RQDCM0R; + regval = readl(pdata->mac_regs + reg); + regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | + MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); + writel(regval, pdata->mac_regs + reg); + + reg += MTL_RQDCM_INC; + regval = readl(pdata->mac_regs + reg); + regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | + MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); + writel(regval, pdata->mac_regs + reg); + + reg += MTL_RQDCM_INC; + regval = readl(pdata->mac_regs + reg); + regval |= (MTL_RQDCM2R_Q8MDMACH | MTL_RQDCM2R_Q9MDMACH | + MTL_RQDCM2R_Q10MDMACH | MTL_RQDCM2R_Q11MDMACH); + writel(regval, pdata->mac_regs + reg); +} + +static unsigned int xlgmac_calculate_per_queue_fifo( + unsigned int fifo_size, + unsigned int queue_count) +{ + unsigned int q_fifo_size; + unsigned int p_fifo; + + /* Calculate the configured fifo size */ + q_fifo_size = 1 << (fifo_size + 7); + + /* The configured value may not be the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, XLGMAC_MAX_FIFO, q_fifo_size); + + q_fifo_size = q_fifo_size / queue_count; + + /* Each increment in the queue fifo size represents 256 bytes of + * fifo, with 0 representing 256 bytes. Distribute the fifo equally + * between the queues. + */ + p_fifo = q_fifo_size / 256; + if (p_fifo) + p_fifo--; + + return p_fifo; +} + +static void xlgmac_config_tx_fifo_size(struct xlgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = xlgmac_calculate_per_queue_fifo( + pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, + MTL_Q_TQOMR_TQS_LEN, fifo_size); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); +} + +static void xlgmac_config_rx_fifo_size(struct xlgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = xlgmac_calculate_per_queue_fifo( + pdata->hw_feat.rx_fifo_size, + pdata->rx_q_count); + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, + MTL_Q_RQOMR_RQS_LEN, fifo_size); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); +} + +static void xlgmac_config_flow_control_threshold(struct xlgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); + /* Activate flow control when less than 4k left in fifo */ + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFA_POS, + MTL_Q_RQFCR_RFA_LEN, 2); + /* De-activate flow control when more than 6k left in fifo */ + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQFCR_RFD_POS, + MTL_Q_RQFCR_RFD_LEN, 4); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQFCR)); + } +} + +static int xlgmac_config_tx_threshold(struct xlgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, + MTL_Q_TQOMR_TTC_LEN, val); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int xlgmac_config_rsf_mode(struct xlgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, + MTL_Q_RQOMR_RSF_LEN, val); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static int xlgmac_config_tsf_mode(struct xlgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, + MTL_Q_TQOMR_TSF_LEN, val); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int xlgmac_config_osp_mode(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, + DMA_CH_TCR_OSP_LEN, + pdata->tx_osp_mode); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + + return 0; +} + +static int xlgmac_config_pblx8(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, + DMA_CH_CR_PBLX8_LEN, + pdata->pblx8); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_CR)); + } + + return 0; +} + +static int xlgmac_get_tx_pbl_val(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); + regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN); + return regval; +} + +static int xlgmac_config_tx_pbl_val(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN, + pdata->tx_pbl); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + + return 0; +} + +static int xlgmac_get_rx_pbl_val(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(XLGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); + regval = XLGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN); + return regval; +} + +static int xlgmac_config_rx_pbl_val(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readl(XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = XLGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN, + pdata->rx_pbl); + writel(regval, XLGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + + return 0; +} + +static u64 xlgmac_mmc_read(struct xlgmac_pdata *pdata, unsigned int reg_lo) +{ + bool read_hi; + u64 val; + + switch (reg_lo) { + /* These registers are always 64 bit */ + case MMC_TXOCTETCOUNT_GB_LO: + case MMC_TXOCTETCOUNT_G_LO: + case MMC_RXOCTETCOUNT_GB_LO: + case MMC_RXOCTETCOUNT_G_LO: + read_hi = true; + break; + + default: + read_hi = false; + } + + val = (u64)readl(pdata->mac_regs + reg_lo); + + if (read_hi) + val |= ((u64)readl(pdata->mac_regs + reg_lo + 4) << 32); + + return val; +} + +static void xlgmac_tx_mmc_int(struct xlgmac_pdata *pdata) +{ + unsigned int mmc_isr = readl(pdata->mac_regs + MMC_TISR); + struct xlgmac_stats *stats = &pdata->stats; + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXOCTETCOUNT_GB_POS, + MMC_TISR_TXOCTETCOUNT_GB_LEN)) + stats->txoctetcount_gb += + xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXFRAMECOUNT_GB_POS, + MMC_TISR_TXFRAMECOUNT_GB_LEN)) + stats->txframecount_gb += + xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXBROADCASTFRAMES_G_POS, + MMC_TISR_TXBROADCASTFRAMES_G_LEN)) + stats->txbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXMULTICASTFRAMES_G_POS, + MMC_TISR_TXMULTICASTFRAMES_G_LEN)) + stats->txmulticastframes_g += + xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX64OCTETS_GB_POS, + MMC_TISR_TX64OCTETS_GB_LEN)) + stats->tx64octets_gb += + xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX65TO127OCTETS_GB_POS, + MMC_TISR_TX65TO127OCTETS_GB_LEN)) + stats->tx65to127octets_gb += + xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX128TO255OCTETS_GB_POS, + MMC_TISR_TX128TO255OCTETS_GB_LEN)) + stats->tx128to255octets_gb += + xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX256TO511OCTETS_GB_POS, + MMC_TISR_TX256TO511OCTETS_GB_LEN)) + stats->tx256to511octets_gb += + xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX512TO1023OCTETS_GB_POS, + MMC_TISR_TX512TO1023OCTETS_GB_LEN)) + stats->tx512to1023octets_gb += + xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TX1024TOMAXOCTETS_GB_POS, + MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) + stats->tx1024tomaxoctets_gb += + xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXUNICASTFRAMES_GB_POS, + MMC_TISR_TXUNICASTFRAMES_GB_LEN)) + stats->txunicastframes_gb += + xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXMULTICASTFRAMES_GB_POS, + MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) + stats->txmulticastframes_gb += + xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXBROADCASTFRAMES_GB_POS, + MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) + stats->txbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXUNDERFLOWERROR_POS, + MMC_TISR_TXUNDERFLOWERROR_LEN)) + stats->txunderflowerror += + xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXOCTETCOUNT_G_POS, + MMC_TISR_TXOCTETCOUNT_G_LEN)) + stats->txoctetcount_g += + xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXFRAMECOUNT_G_POS, + MMC_TISR_TXFRAMECOUNT_G_LEN)) + stats->txframecount_g += + xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXPAUSEFRAMES_POS, + MMC_TISR_TXPAUSEFRAMES_LEN)) + stats->txpauseframes += + xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXVLANFRAMES_G_POS, + MMC_TISR_TXVLANFRAMES_G_LEN)) + stats->txvlanframes_g += + xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); +} + +static void xlgmac_rx_mmc_int(struct xlgmac_pdata *pdata) +{ + unsigned int mmc_isr = readl(pdata->mac_regs + MMC_RISR); + struct xlgmac_stats *stats = &pdata->stats; + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXFRAMECOUNT_GB_POS, + MMC_RISR_RXFRAMECOUNT_GB_LEN)) + stats->rxframecount_gb += + xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXOCTETCOUNT_GB_POS, + MMC_RISR_RXOCTETCOUNT_GB_LEN)) + stats->rxoctetcount_gb += + xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXOCTETCOUNT_G_POS, + MMC_RISR_RXOCTETCOUNT_G_LEN)) + stats->rxoctetcount_g += + xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXBROADCASTFRAMES_G_POS, + MMC_RISR_RXBROADCASTFRAMES_G_LEN)) + stats->rxbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXMULTICASTFRAMES_G_POS, + MMC_RISR_RXMULTICASTFRAMES_G_LEN)) + stats->rxmulticastframes_g += + xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXCRCERROR_POS, + MMC_RISR_RXCRCERROR_LEN)) + stats->rxcrcerror += + xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXRUNTERROR_POS, + MMC_RISR_RXRUNTERROR_LEN)) + stats->rxrunterror += + xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXJABBERERROR_POS, + MMC_RISR_RXJABBERERROR_LEN)) + stats->rxjabbererror += + xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXUNDERSIZE_G_POS, + MMC_RISR_RXUNDERSIZE_G_LEN)) + stats->rxundersize_g += + xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXOVERSIZE_G_POS, + MMC_RISR_RXOVERSIZE_G_LEN)) + stats->rxoversize_g += + xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX64OCTETS_GB_POS, + MMC_RISR_RX64OCTETS_GB_LEN)) + stats->rx64octets_gb += + xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX65TO127OCTETS_GB_POS, + MMC_RISR_RX65TO127OCTETS_GB_LEN)) + stats->rx65to127octets_gb += + xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX128TO255OCTETS_GB_POS, + MMC_RISR_RX128TO255OCTETS_GB_LEN)) + stats->rx128to255octets_gb += + xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX256TO511OCTETS_GB_POS, + MMC_RISR_RX256TO511OCTETS_GB_LEN)) + stats->rx256to511octets_gb += + xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX512TO1023OCTETS_GB_POS, + MMC_RISR_RX512TO1023OCTETS_GB_LEN)) + stats->rx512to1023octets_gb += + xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RX1024TOMAXOCTETS_GB_POS, + MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) + stats->rx1024tomaxoctets_gb += + xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXUNICASTFRAMES_G_POS, + MMC_RISR_RXUNICASTFRAMES_G_LEN)) + stats->rxunicastframes_g += + xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXLENGTHERROR_POS, + MMC_RISR_RXLENGTHERROR_LEN)) + stats->rxlengtherror += + xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXOUTOFRANGETYPE_POS, + MMC_RISR_RXOUTOFRANGETYPE_LEN)) + stats->rxoutofrangetype += + xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXPAUSEFRAMES_POS, + MMC_RISR_RXPAUSEFRAMES_LEN)) + stats->rxpauseframes += + xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXFIFOOVERFLOW_POS, + MMC_RISR_RXFIFOOVERFLOW_LEN)) + stats->rxfifooverflow += + xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXVLANFRAMES_GB_POS, + MMC_RISR_RXVLANFRAMES_GB_LEN)) + stats->rxvlanframes_gb += + xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + if (XLGMAC_GET_REG_BITS(mmc_isr, + MMC_RISR_RXWATCHDOGERROR_POS, + MMC_RISR_RXWATCHDOGERROR_LEN)) + stats->rxwatchdogerror += + xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); +} + +static void xlgmac_read_mmc_stats(struct xlgmac_pdata *pdata) +{ + struct xlgmac_stats *stats = &pdata->stats; + u32 regval; + + /* Freeze counters */ + regval = readl(pdata->mac_regs + MMC_CR); + regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, + MMC_CR_MCF_LEN, 1); + writel(regval, pdata->mac_regs + MMC_CR); + + stats->txoctetcount_gb += + xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + stats->txframecount_gb += + xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + stats->txbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + stats->txmulticastframes_g += + xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + stats->tx64octets_gb += + xlgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + stats->tx65to127octets_gb += + xlgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + stats->tx128to255octets_gb += + xlgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + stats->tx256to511octets_gb += + xlgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + stats->tx512to1023octets_gb += + xlgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + stats->tx1024tomaxoctets_gb += + xlgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + stats->txunicastframes_gb += + xlgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + stats->txmulticastframes_gb += + xlgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + stats->txbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + stats->txunderflowerror += + xlgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + stats->txoctetcount_g += + xlgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + stats->txframecount_g += + xlgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + stats->txpauseframes += + xlgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + stats->txvlanframes_g += + xlgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + stats->rxframecount_gb += + xlgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + stats->rxoctetcount_gb += + xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + stats->rxoctetcount_g += + xlgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + stats->rxbroadcastframes_g += + xlgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + stats->rxmulticastframes_g += + xlgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + stats->rxcrcerror += + xlgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + stats->rxrunterror += + xlgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + stats->rxjabbererror += + xlgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + stats->rxundersize_g += + xlgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + stats->rxoversize_g += + xlgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + stats->rx64octets_gb += + xlgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + stats->rx65to127octets_gb += + xlgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + stats->rx128to255octets_gb += + xlgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + stats->rx256to511octets_gb += + xlgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + stats->rx512to1023octets_gb += + xlgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + stats->rx1024tomaxoctets_gb += + xlgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + stats->rxunicastframes_g += + xlgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + stats->rxlengtherror += + xlgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + stats->rxoutofrangetype += + xlgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + stats->rxpauseframes += + xlgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + stats->rxfifooverflow += + xlgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + stats->rxvlanframes_gb += + xlgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + stats->rxwatchdogerror += + xlgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + /* Un-freeze counters */ + regval = readl(pdata->mac_regs + MMC_CR); + regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, + MMC_CR_MCF_LEN, 0); + writel(regval, pdata->mac_regs + MMC_CR); +} + +static void xlgmac_config_mmc(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + MMC_CR); + /* Set counters to reset on read */ + regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, + MMC_CR_ROR_LEN, 1); + /* Reset the counters */ + regval = XLGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, + MMC_CR_CR_LEN, 1); + writel(regval, pdata->mac_regs + MMC_CR); +} + +static int xlgmac_write_rss_reg(struct xlgmac_pdata *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + int ret = 0; + u32 regval; + + mutex_lock(&pdata->rss_mutex); + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), + MAC_RSSAR_OB_POS, MAC_RSSAR_OB_LEN); + if (regval) { + ret = -EBUSY; + goto unlock; + } + + writel(val, pdata->mac_regs + MAC_RSSDR); + + regval = readl(pdata->mac_regs + MAC_RSSAR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_RSSIA_POS, + MAC_RSSAR_RSSIA_LEN, index); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_ADDRT_POS, + MAC_RSSAR_ADDRT_LEN, type); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_CT_POS, + MAC_RSSAR_CT_LEN, 0); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSAR_OB_POS, + MAC_RSSAR_OB_LEN, 1); + writel(regval, pdata->mac_regs + MAC_RSSAR); + + wait = 1000; + while (wait--) { + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_RSSAR), + MAC_RSSAR_OB_POS, + MAC_RSSAR_OB_LEN); + if (!regval) + goto unlock; + + usleep_range(1000, 1500); + } + + ret = -EBUSY; + +unlock: + mutex_unlock(&pdata->rss_mutex); + + return ret; +} + +static int xlgmac_write_rss_hash_key(struct xlgmac_pdata *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key = (unsigned int *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = xlgmac_write_rss_reg(pdata, XLGMAC_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int xlgmac_write_rss_lookup_table(struct xlgmac_pdata *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = xlgmac_write_rss_reg(pdata, + XLGMAC_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int xlgmac_set_rss_hash_key(struct xlgmac_pdata *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return xlgmac_write_rss_hash_key(pdata); +} + +static int xlgmac_set_rss_lookup_table(struct xlgmac_pdata *pdata, + const u32 *table) +{ + unsigned int i; + u32 tval; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + tval = table[i]; + pdata->rss_table[i] = XLGMAC_SET_REG_BITS( + pdata->rss_table[i], + MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + tval); + } + + return xlgmac_write_rss_lookup_table(pdata); +} + +static int xlgmac_enable_rss(struct xlgmac_pdata *pdata) +{ + u32 regval; + int ret; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + /* Program the hash key */ + ret = xlgmac_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = xlgmac_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + writel(pdata->rss_options, pdata->mac_regs + MAC_RSSCR); + + /* Enable RSS */ + regval = readl(pdata->mac_regs + MAC_RSSCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 1); + writel(regval, pdata->mac_regs + MAC_RSSCR); + + return 0; +} + +static int xlgmac_disable_rss(struct xlgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + regval = readl(pdata->mac_regs + MAC_RSSCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_RSSCR); + + return 0; +} + +static void xlgmac_config_rss(struct xlgmac_pdata *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->netdev->features & NETIF_F_RXHASH) + ret = xlgmac_enable_rss(pdata); + else + ret = xlgmac_disable_rss(pdata); + + if (ret) + netdev_err(pdata->netdev, + "error configuring RSS, RSS disabled\n"); +} + +static void xlgmac_enable_dma_interrupts(struct xlgmac_pdata *pdata) +{ + unsigned int dma_ch_isr, dma_ch_ier; + struct xlgmac_channel *channel; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Clear all the interrupts which are set */ + dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); + writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 1); + dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_AIE_POS, + DMA_CH_IER_AIE_LEN, 1); + dma_ch_ier = XLGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + + if (channel->tx_ring) { + /* Enable the following Tx interrupts + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) + */ + if (!pdata->per_channel_irq) + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, + DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, + 1); + } + if (channel->rx_ring) { + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) + */ + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, + 1); + if (!pdata->per_channel_irq) + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, + DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, + 1); + } + + writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_IER)); + } +} + +static void xlgmac_enable_mtl_interrupts(struct xlgmac_pdata *pdata) +{ + unsigned int q_count, i; + unsigned int mtl_q_isr; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + writel(mtl_q_isr, XLGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + + /* No MTL interrupts to be enabled */ + writel(0, XLGMAC_MTL_REG(pdata, i, MTL_Q_IER)); + } +} + +static void xlgmac_enable_mac_interrupts(struct xlgmac_pdata *pdata) +{ + unsigned int mac_ier = 0; + u32 regval; + + /* Enable Timestamp interrupt */ + mac_ier = XLGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, + MAC_IER_TSIE_LEN, 1); + + writel(mac_ier, pdata->mac_regs + MAC_IER); + + /* Enable all counter interrupts */ + regval = readl(pdata->mac_regs + MMC_RIER); + regval = XLGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, + MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writel(regval, pdata->mac_regs + MMC_RIER); + regval = readl(pdata->mac_regs + MMC_TIER); + regval = XLGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, + MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writel(regval, pdata->mac_regs + MMC_TIER); +} + +static int xlgmac_set_xlgmii_25000_speed(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), + MAC_TCR_SS_POS, MAC_TCR_SS_LEN); + if (regval == 0x1) + return 0; + + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, + MAC_TCR_SS_LEN, 0x1); + writel(regval, pdata->mac_regs + MAC_TCR); + + return 0; +} + +static int xlgmac_set_xlgmii_40000_speed(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), + MAC_TCR_SS_POS, MAC_TCR_SS_LEN); + if (regval == 0) + return 0; + + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, + MAC_TCR_SS_LEN, 0); + writel(regval, pdata->mac_regs + MAC_TCR); + + return 0; +} + +static int xlgmac_set_xlgmii_50000_speed(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), + MAC_TCR_SS_POS, MAC_TCR_SS_LEN); + if (regval == 0x2) + return 0; + + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, + MAC_TCR_SS_LEN, 0x2); + writel(regval, pdata->mac_regs + MAC_TCR); + + return 0; +} + +static int xlgmac_set_xlgmii_100000_speed(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + MAC_TCR), + MAC_TCR_SS_POS, MAC_TCR_SS_LEN); + if (regval == 0x3) + return 0; + + regval = readl(pdata->mac_regs + MAC_TCR); + regval = XLGMAC_SET_REG_BITS(regval, MAC_TCR_SS_POS, + MAC_TCR_SS_LEN, 0x3); + writel(regval, pdata->mac_regs + MAC_TCR); + + return 0; +} + +static void xlgmac_config_mac_speed(struct xlgmac_pdata *pdata) +{ + switch (pdata->phy_speed) { + case SPEED_100000: + xlgmac_set_xlgmii_100000_speed(pdata); + break; + + case SPEED_50000: + xlgmac_set_xlgmii_50000_speed(pdata); + break; + + case SPEED_40000: + xlgmac_set_xlgmii_40000_speed(pdata); + break; + + case SPEED_25000: + xlgmac_set_xlgmii_25000_speed(pdata); + break; + } +} + +static int xlgmac_dev_read(struct xlgmac_channel *channel) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + struct xlgmac_pkt_info *pkt_info; + unsigned int err, etlt, l34t; + + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); + dma_desc = desc_data->dma_desc; + pkt_info = &ring->pkt_info; + + /* Check for data availability */ + if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN)) + return 1; + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + + if (netif_msg_rx_status(pdata)) + xlgmac_dump_rx_desc(pdata, ring, ring->cur); + + if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_CTXT_POS, + RX_NORMAL_DESC3_CTXT_LEN)) { + /* Timestamp Context Descriptor */ + xlgmac_get_rx_tstamp(pkt_info, dma_desc); + + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, + 1); + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, + 0); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, + 0); + + /* Indicate if a Context Descriptor is next */ + if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_CDA_POS, + RX_NORMAL_DESC3_CDA_LEN)) + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, + 1); + + /* Get the header length */ + if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_FD_POS, + RX_NORMAL_DESC3_FD_LEN)) { + desc_data->rx.hdr_len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc2, + RX_NORMAL_DESC2_HL_POS, + RX_NORMAL_DESC2_HL_LEN); + if (desc_data->rx.hdr_len) + pdata->stats.rx_split_header_packets++; + } + + /* Get the RSS hash */ + if (XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_RSV_POS, + RX_NORMAL_DESC3_RSV_LEN)) { + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RSS_HASH_POS, + RX_PACKET_ATTRIBUTES_RSS_HASH_LEN, + 1); + + pkt_info->rss_hash = le32_to_cpu(dma_desc->desc1); + + l34t = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_L34T_POS, + RX_NORMAL_DESC3_L34T_LEN); + switch (l34t) { + case RX_DESC3_L34T_IPV4_TCP: + case RX_DESC3_L34T_IPV4_UDP: + case RX_DESC3_L34T_IPV6_TCP: + case RX_DESC3_L34T_IPV6_UDP: + pkt_info->rss_hash_type = PKT_HASH_TYPE_L4; + break; + default: + pkt_info->rss_hash_type = PKT_HASH_TYPE_L3; + } + } + + /* Get the pkt_info length */ + desc_data->rx.len = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_PL_POS, + RX_NORMAL_DESC3_PL_LEN); + + if (!XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_LD_POS, + RX_NORMAL_DESC3_LD_LEN)) { + /* Not all the data has been transferred for this pkt_info */ + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, + 1); + return 0; + } + + /* This is the last of the data for this pkt_info */ + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, + 0); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, + 1); + + /* Check for errors (only valid in last descriptor) */ + err = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_ES_POS, + RX_NORMAL_DESC3_ES_LEN); + etlt = XLGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_ETLT_POS, + RX_NORMAL_DESC3_ETLT_LEN); + netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); + + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ + if ((etlt == 0x09) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, + 1); + pkt_info->vlan_ctag = + XLGMAC_GET_REG_BITS_LE(dma_desc->desc0, + RX_NORMAL_DESC0_OVT_POS, + RX_NORMAL_DESC0_OVT_LEN); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + pkt_info->vlan_ctag); + } + } else { + if ((etlt == 0x05) || (etlt == 0x06)) + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, + 0); + else + pkt_info->errors = XLGMAC_SET_REG_BITS( + pkt_info->errors, + RX_PACKET_ERRORS_FRAME_POS, + RX_PACKET_ERRORS_FRAME_LEN, + 1); + } + + XLGMAC_PR("%s - descriptor=%u (cur=%d)\n", channel->name, + ring->cur & (ring->dma_desc_count - 1), ring->cur); + + return 0; +} + +static int xlgmac_enable_int(struct xlgmac_channel *channel, + enum xlgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case XLGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + break; + case XLGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + break; + case XLGMAC_INT_DMA_ALL: + dma_ch_ier |= channel->saved_ier; + break; + default: + return -1; + } + + writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int xlgmac_disable_int(struct xlgmac_channel *channel, + enum xlgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readl(XLGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case XLGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + break; + case XLGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = XLGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 0); + break; + case XLGMAC_INT_DMA_ALL: + channel->saved_ier = dma_ch_ier & XLGMAC_DMA_INTERRUPT_MASK; + dma_ch_ier &= ~XLGMAC_DMA_INTERRUPT_MASK; + break; + default: + return -1; + } + + writel(dma_ch_ier, XLGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int xlgmac_flush_tx_queues(struct xlgmac_pdata *pdata) +{ + unsigned int i, count; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN, 1); + writel(regval, XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Poll Until Poll Condition */ + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + regval = readl(XLGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = XLGMAC_GET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN); + while (--count && regval) + usleep_range(500, 600); + + if (!count) + return -EBUSY; + } + + return 0; +} + +static void xlgmac_config_dma_bus(struct xlgmac_pdata *pdata) +{ + u32 regval; + + regval = readl(pdata->mac_regs + DMA_SBMR); + /* Set enhanced addressing mode */ + regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, + DMA_SBMR_EAME_LEN, 1); + /* Set the System Bus mode */ + regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_UNDEF_POS, + DMA_SBMR_UNDEF_LEN, 1); + regval = XLGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_256_POS, + DMA_SBMR_BLEN_256_LEN, 1); + writel(regval, pdata->mac_regs + DMA_SBMR); +} + +static int xlgmac_hw_init(struct xlgmac_pdata *pdata) +{ + struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; + int ret; + + /* Flush Tx queues */ + ret = xlgmac_flush_tx_queues(pdata); + if (ret) + return ret; + + /* Initialize DMA related features */ + xlgmac_config_dma_bus(pdata); + xlgmac_config_osp_mode(pdata); + xlgmac_config_pblx8(pdata); + xlgmac_config_tx_pbl_val(pdata); + xlgmac_config_rx_pbl_val(pdata); + xlgmac_config_rx_coalesce(pdata); + xlgmac_config_tx_coalesce(pdata); + xlgmac_config_rx_buffer_size(pdata); + xlgmac_config_tso_mode(pdata); + xlgmac_config_sph_mode(pdata); + xlgmac_config_rss(pdata); + desc_ops->tx_desc_init(pdata); + desc_ops->rx_desc_init(pdata); + xlgmac_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + xlgmac_config_mtl_mode(pdata); + xlgmac_config_queue_mapping(pdata); + xlgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); + xlgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); + xlgmac_config_tx_threshold(pdata, pdata->tx_threshold); + xlgmac_config_rx_threshold(pdata, pdata->rx_threshold); + xlgmac_config_tx_fifo_size(pdata); + xlgmac_config_rx_fifo_size(pdata); + xlgmac_config_flow_control_threshold(pdata); + xlgmac_config_rx_fep_enable(pdata); + xlgmac_config_rx_fup_enable(pdata); + xlgmac_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + xlgmac_config_mac_address(pdata); + xlgmac_config_rx_mode(pdata); + xlgmac_config_jumbo_enable(pdata); + xlgmac_config_flow_control(pdata); + xlgmac_config_mac_speed(pdata); + xlgmac_config_checksum_offload(pdata); + xlgmac_config_vlan_support(pdata); + xlgmac_config_mmc(pdata); + xlgmac_enable_mac_interrupts(pdata); + + return 0; +} + +static int xlgmac_hw_exit(struct xlgmac_pdata *pdata) +{ + unsigned int count = 2000; + u32 regval; + + /* Issue a software reset */ + regval = readl(pdata->mac_regs + DMA_MR); + regval = XLGMAC_SET_REG_BITS(regval, DMA_MR_SWR_POS, + DMA_MR_SWR_LEN, 1); + writel(regval, pdata->mac_regs + DMA_MR); + usleep_range(10, 15); + + /* Poll Until Poll Condition */ + while (--count && + XLGMAC_GET_REG_BITS(readl(pdata->mac_regs + DMA_MR), + DMA_MR_SWR_POS, DMA_MR_SWR_LEN)) + usleep_range(500, 600); + + if (!count) + return -EBUSY; + + return 0; +} + +void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops) +{ + hw_ops->init = xlgmac_hw_init; + hw_ops->exit = xlgmac_hw_exit; + + hw_ops->tx_complete = xlgmac_tx_complete; + + hw_ops->enable_tx = xlgmac_enable_tx; + hw_ops->disable_tx = xlgmac_disable_tx; + hw_ops->enable_rx = xlgmac_enable_rx; + hw_ops->disable_rx = xlgmac_disable_rx; + + hw_ops->dev_xmit = xlgmac_dev_xmit; + hw_ops->dev_read = xlgmac_dev_read; + hw_ops->enable_int = xlgmac_enable_int; + hw_ops->disable_int = xlgmac_disable_int; + + hw_ops->set_mac_address = xlgmac_set_mac_address; + hw_ops->config_rx_mode = xlgmac_config_rx_mode; + hw_ops->enable_rx_csum = xlgmac_enable_rx_csum; + hw_ops->disable_rx_csum = xlgmac_disable_rx_csum; + + /* For MII speed configuration */ + hw_ops->set_xlgmii_25000_speed = xlgmac_set_xlgmii_25000_speed; + hw_ops->set_xlgmii_40000_speed = xlgmac_set_xlgmii_40000_speed; + hw_ops->set_xlgmii_50000_speed = xlgmac_set_xlgmii_50000_speed; + hw_ops->set_xlgmii_100000_speed = xlgmac_set_xlgmii_100000_speed; + + /* For descriptor related operation */ + hw_ops->tx_desc_init = xlgmac_tx_desc_init; + hw_ops->rx_desc_init = xlgmac_rx_desc_init; + hw_ops->tx_desc_reset = xlgmac_tx_desc_reset; + hw_ops->rx_desc_reset = xlgmac_rx_desc_reset; + hw_ops->is_last_desc = xlgmac_is_last_desc; + hw_ops->is_context_desc = xlgmac_is_context_desc; + hw_ops->tx_start_xmit = xlgmac_tx_start_xmit; + + /* For Flow Control */ + hw_ops->config_tx_flow_control = xlgmac_config_tx_flow_control; + hw_ops->config_rx_flow_control = xlgmac_config_rx_flow_control; + + /* For Vlan related config */ + hw_ops->enable_rx_vlan_stripping = xlgmac_enable_rx_vlan_stripping; + hw_ops->disable_rx_vlan_stripping = xlgmac_disable_rx_vlan_stripping; + hw_ops->enable_rx_vlan_filtering = xlgmac_enable_rx_vlan_filtering; + hw_ops->disable_rx_vlan_filtering = xlgmac_disable_rx_vlan_filtering; + hw_ops->update_vlan_hash_table = xlgmac_update_vlan_hash_table; + + /* For RX coalescing */ + hw_ops->config_rx_coalesce = xlgmac_config_rx_coalesce; + hw_ops->config_tx_coalesce = xlgmac_config_tx_coalesce; + hw_ops->usec_to_riwt = xlgmac_usec_to_riwt; + hw_ops->riwt_to_usec = xlgmac_riwt_to_usec; + + /* For RX and TX threshold config */ + hw_ops->config_rx_threshold = xlgmac_config_rx_threshold; + hw_ops->config_tx_threshold = xlgmac_config_tx_threshold; + + /* For RX and TX Store and Forward Mode config */ + hw_ops->config_rsf_mode = xlgmac_config_rsf_mode; + hw_ops->config_tsf_mode = xlgmac_config_tsf_mode; + + /* For TX DMA Operating on Second Frame config */ + hw_ops->config_osp_mode = xlgmac_config_osp_mode; + + /* For RX and TX PBL config */ + hw_ops->config_rx_pbl_val = xlgmac_config_rx_pbl_val; + hw_ops->get_rx_pbl_val = xlgmac_get_rx_pbl_val; + hw_ops->config_tx_pbl_val = xlgmac_config_tx_pbl_val; + hw_ops->get_tx_pbl_val = xlgmac_get_tx_pbl_val; + hw_ops->config_pblx8 = xlgmac_config_pblx8; + + /* For MMC statistics support */ + hw_ops->tx_mmc_int = xlgmac_tx_mmc_int; + hw_ops->rx_mmc_int = xlgmac_rx_mmc_int; + hw_ops->read_mmc_stats = xlgmac_read_mmc_stats; + + /* For Receive Side Scaling */ + hw_ops->enable_rss = xlgmac_enable_rss; + hw_ops->disable_rss = xlgmac_disable_rss; + hw_ops->set_rss_hash_key = xlgmac_set_rss_hash_key; + hw_ops->set_rss_lookup_table = xlgmac_set_rss_lookup_table; +} diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c new file mode 100644 index 0000000000000000000000000000000000000000..3b91257683bc71ace3a553c522c5cf873e6047fc --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -0,0 +1,1350 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include +#include + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +static int xlgmac_one_poll(struct napi_struct *, int); +static int xlgmac_all_poll(struct napi_struct *, int); + +static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring) +{ + return (ring->dma_desc_count - (ring->cur - ring->dirty)); +} + +static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring) +{ + return (ring->cur - ring->dirty); +} + +static int xlgmac_maybe_stop_tx_queue( + struct xlgmac_channel *channel, + struct xlgmac_ring *ring, + unsigned int count) +{ + struct xlgmac_pdata *pdata = channel->pdata; + + if (count > xlgmac_tx_avail_desc(ring)) { + netif_info(pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_ops.tx_start_xmit(channel, ring); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static void xlgmac_prep_vlan(struct sk_buff *skb, + struct xlgmac_pkt_info *pkt_info) +{ + if (skb_vlan_tag_present(skb)) + pkt_info->vlan_ctag = skb_vlan_tag_get(skb); +} + +static int xlgmac_prep_tso(struct sk_buff *skb, + struct xlgmac_pkt_info *pkt_info) +{ + int ret; + + if (!XLGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + pkt_info->tcp_header_len = tcp_hdrlen(skb); + pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; + pkt_info->mss = skb_shinfo(skb)->gso_size; + + XLGMAC_PR("header_len=%u\n", pkt_info->header_len); + XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n", + pkt_info->tcp_header_len, pkt_info->tcp_payload_len); + XLGMAC_PR("mss=%u\n", pkt_info->mss); + + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; + pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; + + return 0; +} + +static int xlgmac_is_tso(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + return 1; +} + +static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + struct sk_buff *skb, + struct xlgmac_pkt_info *pkt_info) +{ + struct skb_frag_struct *frag; + unsigned int context_desc; + unsigned int len; + unsigned int i; + + pkt_info->skb = skb; + + context_desc = 0; + pkt_info->desc_count = 0; + + pkt_info->tx_packets = 1; + pkt_info->tx_bytes = skb->len; + + if (xlgmac_is_tso(skb)) { + /* TSO requires an extra descriptor if mss is different */ + if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { + context_desc = 1; + pkt_info->desc_count++; + } + + /* TSO requires an extra descriptor for TSO header */ + pkt_info->desc_count++; + + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, + 1); + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, + 1); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, + 1); + + if (skb_vlan_tag_present(skb)) { + /* VLAN requires an extra descriptor if tag is different */ + if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) + /* We can share with the TSO context descriptor */ + if (!context_desc) { + context_desc = 1; + pkt_info->desc_count++; + } + + pkt_info->attributes = XLGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, + 1); + } + + for (len = skb_headlen(skb); len;) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + for (len = skb_frag_size(frag); len; ) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE); + } + } +} + +static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) +{ + unsigned int rx_buf_size; + + if (mtu > XLGMAC_JUMBO_PACKET_MTU) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } + + rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE); + + rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) & + ~(XLGMAC_RX_BUF_ALIGN - 1); + + return rx_buf_size; +} + +static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct xlgmac_channel *channel; + enum xlgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_ops->enable_int(channel, int_id); + } +} + +static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct xlgmac_channel *channel; + enum xlgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XLGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_ops->disable_int(channel, int_id); + } +} + +static irqreturn_t xlgmac_isr(int irq, void *data) +{ + unsigned int dma_isr, dma_ch_isr, mac_isr; + struct xlgmac_pdata *pdata = data; + struct xlgmac_channel *channel; + struct xlgmac_hw_ops *hw_ops; + unsigned int i, ti, ri; + + hw_ops = &pdata->hw_ops; + + /* The DMA interrupt status register also reports MAC and MTL + * interrupts. So for polling mode, we just need to check for + * this register to be non-zero + */ + dma_isr = readl(pdata->mac_regs + DMA_ISR); + if (!dma_isr) + return IRQ_HANDLED; + + netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); + + for (i = 0; i < pdata->channel_count; i++) { + if (!(dma_isr & (1 << i))) + continue; + + channel = pdata->channel_head + i; + + dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", + i, dma_ch_isr); + + /* The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. + */ + ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN); + ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN); + if (!pdata->per_channel_irq && (ti || ri)) { + if (napi_schedule_prep(&pdata->napi)) { + /* Disable Tx and Rx interrupts */ + xlgmac_disable_rx_tx_ints(pdata); + + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->napi); + } + } + + if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, + DMA_CH_SR_TPS_LEN)) + pdata->stats.tx_process_stopped++; + + if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, + DMA_CH_SR_RPS_LEN)) + pdata->stats.rx_process_stopped++; + + if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, + DMA_CH_SR_TBU_LEN)) + pdata->stats.tx_buffer_unavailable++; + + if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, + DMA_CH_SR_RBU_LEN)) + pdata->stats.rx_buffer_unavailable++; + + /* Restart the device on a Fatal Bus Error */ + if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, + DMA_CH_SR_FBE_LEN)) { + pdata->stats.fatal_bus_error++; + schedule_work(&pdata->restart_work); + } + + /* Clear all interrupt signals */ + writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); + } + + if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, + DMA_ISR_MACIS_LEN)) { + mac_isr = readl(pdata->mac_regs + MAC_ISR); + + if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, + MAC_ISR_MMCTXIS_LEN)) + hw_ops->tx_mmc_int(pdata); + + if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, + MAC_ISR_MMCRXIS_LEN)) + hw_ops->rx_mmc_int(pdata); + } + + return IRQ_HANDLED; +} + +static irqreturn_t xlgmac_dma_isr(int irq, void *data) +{ + struct xlgmac_channel *channel = data; + + /* Per channel DMA interrupts are enabled, so we use the per + * channel napi structure and not the private data napi structure + */ + if (napi_schedule_prep(&channel->napi)) { + /* Disable Tx and Rx interrupts */ + disable_irq_nosync(channel->dma_irq); + + /* Turn on polling */ + __napi_schedule_irqoff(&channel->napi); + } + + return IRQ_HANDLED; +} + +static void xlgmac_tx_timer(unsigned long data) +{ + struct xlgmac_channel *channel = (struct xlgmac_channel *)data; + struct xlgmac_pdata *pdata = channel->pdata; + struct napi_struct *napi; + + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + + if (napi_schedule_prep(napi)) { + /* Disable Tx and Rx interrupts */ + if (pdata->per_channel_irq) + disable_irq_nosync(channel->dma_irq); + else + xlgmac_disable_rx_tx_ints(pdata); + + pdata->stats.napi_poll_txtimer++; + /* Turn on polling */ + __napi_schedule(napi); + } + + channel->tx_timer_active = 0; +} + +static void xlgmac_init_timers(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + setup_timer(&channel->tx_timer, xlgmac_tx_timer, + (unsigned long)channel); + } +} + +static void xlgmac_stop_timers(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + del_timer_sync(&channel->tx_timer); + } +} + +static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add) +{ + struct xlgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) + netif_napi_add(pdata->netdev, &channel->napi, + xlgmac_one_poll, + NAPI_POLL_WEIGHT); + + napi_enable(&channel->napi); + } + } else { + if (add) + netif_napi_add(pdata->netdev, &pdata->napi, + xlgmac_all_poll, NAPI_POLL_WEIGHT); + + napi_enable(&pdata->napi); + } +} + +static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del) +{ + struct xlgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->napi); + + if (del) + netif_napi_del(&channel->napi); + } + } else { + napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); + } +} + +static int xlgmac_request_irqs(struct xlgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct xlgmac_channel *channel; + unsigned int i; + int ret; + + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr, + IRQF_SHARED, netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + pdata->dev_irq); + return ret; + } + + if (!pdata->per_channel_irq) + return 0; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xlgmac_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + return 0; + +err_irq: + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + return ret; +} + +static void xlgmac_free_irqs(struct xlgmac_pdata *pdata) +{ + struct xlgmac_channel *channel; + unsigned int i; + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); +} + +static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata) +{ + struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct xlgmac_desc_data *desc_data; + struct xlgmac_channel *channel; + struct xlgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } +} + +static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata) +{ + struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct xlgmac_desc_data *desc_data; + struct xlgmac_channel *channel; + struct xlgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = XLGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } +} + +static int xlgmac_start(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + int ret; + + hw_ops->init(pdata); + xlgmac_napi_enable(pdata, 1); + + ret = xlgmac_request_irqs(pdata); + if (ret) + goto err_napi; + + hw_ops->enable_tx(pdata); + hw_ops->enable_rx(pdata); + netif_tx_start_all_queues(netdev); + + return 0; + +err_napi: + xlgmac_napi_disable(pdata, 1); + hw_ops->exit(pdata); + + return ret; +} + +static void xlgmac_stop(struct xlgmac_pdata *pdata) +{ + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + struct xlgmac_channel *channel; + struct netdev_queue *txq; + unsigned int i; + + netif_tx_stop_all_queues(netdev); + xlgmac_stop_timers(pdata); + hw_ops->disable_tx(pdata); + hw_ops->disable_rx(pdata); + xlgmac_free_irqs(pdata); + xlgmac_napi_disable(pdata, 1); + hw_ops->exit(pdata); + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } +} + +static void xlgmac_restart_dev(struct xlgmac_pdata *pdata) +{ + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + xlgmac_stop(pdata); + + xlgmac_free_tx_data(pdata); + xlgmac_free_rx_data(pdata); + + xlgmac_start(pdata); +} + +static void xlgmac_restart(struct work_struct *work) +{ + struct xlgmac_pdata *pdata = container_of(work, + struct xlgmac_pdata, + restart_work); + + rtnl_lock(); + + xlgmac_restart_dev(pdata); + + rtnl_unlock(); +} + +static int xlgmac_open(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_desc_ops *desc_ops; + int ret; + + desc_ops = &pdata->desc_ops; + + /* TODO: Initialize the phy */ + + /* Calculate the Rx buffer size before allocating rings */ + ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu); + if (ret < 0) + return ret; + pdata->rx_buf_size = ret; + + /* Allocate the channels and rings */ + ret = desc_ops->alloc_channles_and_rings(pdata); + if (ret) + return ret; + + INIT_WORK(&pdata->restart_work, xlgmac_restart); + xlgmac_init_timers(pdata); + + ret = xlgmac_start(pdata); + if (ret) + goto err_channels_and_rings; + + return 0; + +err_channels_and_rings: + desc_ops->free_channels_and_rings(pdata); + + return ret; +} + +static int xlgmac_close(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_desc_ops *desc_ops; + + desc_ops = &pdata->desc_ops; + + /* Stop the device */ + xlgmac_stop(pdata); + + /* Free the channels and rings */ + desc_ops->free_channels_and_rings(pdata); + + return 0; +} + +static void xlgmac_tx_timeout(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + + netdev_warn(netdev, "tx timeout, device restarting\n"); + schedule_work(&pdata->restart_work); +} + +static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_pkt_info *tx_pkt_info; + struct xlgmac_desc_ops *desc_ops; + struct xlgmac_channel *channel; + struct xlgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + struct xlgmac_ring *ring; + int ret; + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + XLGMAC_PR("skb->len = %d\n", skb->len); + + channel = pdata->channel_head + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + ring = channel->tx_ring; + tx_pkt_info = &ring->pkt_info; + + if (skb->len == 0) { + netif_err(pdata, tx_err, netdev, + "empty skb received from stack\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Prepare preliminary packet info for TX */ + memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); + xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); + + /* Check that there are enough descriptors available */ + ret = xlgmac_maybe_stop_tx_queue(channel, ring, + tx_pkt_info->desc_count); + if (ret) + return ret; + + ret = xlgmac_prep_tso(skb, tx_pkt_info); + if (ret) { + netif_err(pdata, tx_err, netdev, + "error processing TSO packet\n"); + dev_kfree_skb_any(skb); + return ret; + } + xlgmac_prep_vlan(skb, tx_pkt_info); + + if (!desc_ops->map_tx_skb(channel, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); + + /* Configure required descriptor fields for transmission */ + hw_ops->dev_xmit(channel); + + if (netif_msg_pktdata(pdata)) + xlgmac_print_pkt(netdev, skb, true); + + /* Stop the queue in advance if there may not be enough descriptors */ + xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR); + + return NETDEV_TX_OK; +} + +static void xlgmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_stats *pstats = &pdata->stats; + + pdata->hw_ops.read_mmc_stats(pdata); + + s->rx_packets = pstats->rxframecount_gb; + s->rx_bytes = pstats->rxoctetcount_gb; + s->rx_errors = pstats->rxframecount_gb - + pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - + pstats->rxunicastframes_g; + s->multicast = pstats->rxmulticastframes_g; + s->rx_length_errors = pstats->rxlengtherror; + s->rx_crc_errors = pstats->rxcrcerror; + s->rx_fifo_errors = pstats->rxfifooverflow; + + s->tx_packets = pstats->txframecount_gb; + s->tx_bytes = pstats->txoctetcount_gb; + s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; + s->tx_dropped = netdev->stats.tx_dropped; +} + +static int xlgmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + + hw_ops->set_mac_address(pdata, netdev->dev_addr); + + return 0; +} + +static int xlgmac_ioctl(struct net_device *netdev, + struct ifreq *ifreq, int cmd) +{ + if (!netif_running(netdev)) + return -ENODEV; + + return 0; +} + +static int xlgmac_change_mtu(struct net_device *netdev, int mtu) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + int ret; + + ret = xlgmac_calc_rx_buf_size(netdev, mtu); + if (ret < 0) + return ret; + + pdata->rx_buf_size = ret; + netdev->mtu = mtu; + + xlgmac_restart_dev(pdata); + + return 0; +} + +static int xlgmac_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, + u16 vid) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + + set_bit(vid, pdata->active_vlans); + hw_ops->update_vlan_hash_table(pdata); + + return 0; +} + +static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, + u16 vid) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + + clear_bit(vid, pdata->active_vlans); + hw_ops->update_vlan_hash_table(pdata); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void xlgmac_poll_controller(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) + xlgmac_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + xlgmac_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int xlgmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret = 0; + + rxhash = pdata->netdev_features & NETIF_F_RXHASH; + rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; + rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; + rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_ops->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_ops->disable_rss(pdata); + if (ret) + return ret; + + if ((features & NETIF_F_RXCSUM) && !rxcsum) + hw_ops->enable_rx_csum(pdata); + else if (!(features & NETIF_F_RXCSUM) && rxcsum) + hw_ops->disable_rx_csum(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) + hw_ops->enable_rx_vlan_stripping(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) + hw_ops->disable_rx_vlan_stripping(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) + hw_ops->enable_rx_vlan_filtering(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) + hw_ops->disable_rx_vlan_filtering(pdata); + + pdata->netdev_features = features; + + return 0; +} + +static void xlgmac_set_rx_mode(struct net_device *netdev) +{ + struct xlgmac_pdata *pdata = netdev_priv(netdev); + struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; + + hw_ops->config_rx_mode(pdata); +} + +static const struct net_device_ops xlgmac_netdev_ops = { + .ndo_open = xlgmac_open, + .ndo_stop = xlgmac_close, + .ndo_start_xmit = xlgmac_xmit, + .ndo_tx_timeout = xlgmac_tx_timeout, + .ndo_get_stats64 = xlgmac_get_stats64, + .ndo_change_mtu = xlgmac_change_mtu, + .ndo_set_mac_address = xlgmac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = xlgmac_ioctl, + .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = xlgmac_poll_controller, +#endif + .ndo_set_features = xlgmac_set_features, + .ndo_set_rx_mode = xlgmac_set_rx_mode, +}; + +const struct net_device_ops *xlgmac_get_netdev_ops(void) +{ + return &xlgmac_netdev_ops; +} + +static void xlgmac_rx_refresh(struct xlgmac_channel *channel) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->rx_ring; + struct xlgmac_desc_data *desc_data; + struct xlgmac_desc_ops *desc_ops; + struct xlgmac_hw_ops *hw_ops; + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + while (ring->dirty != ring->cur) { + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); + + /* Reset desc_data values */ + desc_ops->unmap_desc_data(pdata, desc_data); + + if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) + break; + + hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); + + ring->dirty++; + } + + /* Make sure everything is written before the register write */ + wmb(); + + /* Update the Rx Tail Pointer Register with address of + * the last cleaned entry + */ + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); + writel(lower_32_bits(desc_data->dma_desc_addr), + XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata, + struct napi_struct *napi, + struct xlgmac_desc_data *desc_data, + unsigned int len) +{ + unsigned int copy_len; + struct sk_buff *skb; + u8 *packet; + + skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); + if (!skb) + return NULL; + + /* Start with the header buffer which may contain just the header + * or the header plus data + */ + dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, + desc_data->rx.hdr.dma_off, + desc_data->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + packet = page_address(desc_data->rx.hdr.pa.pages) + + desc_data->rx.hdr.pa.pages_offset; + copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len; + copy_len = min(desc_data->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); + + len -= copy_len; + if (len) { + /* Add the remaining data as a frag */ + dma_sync_single_range_for_cpu(pdata->dev, + desc_data->rx.buf.dma_base, + desc_data->rx.buf.dma_off, + desc_data->rx.buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + desc_data->rx.buf.pa.pages, + desc_data->rx.buf.pa.pages_offset, + len, desc_data->rx.buf.dma_len); + desc_data->rx.buf.pa.pages = NULL; + } + + return skb; +} + +static int xlgmac_tx_poll(struct xlgmac_channel *channel) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->tx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int tx_packets = 0, tx_bytes = 0; + struct xlgmac_desc_data *desc_data; + struct xlgmac_dma_desc *dma_desc; + struct xlgmac_desc_ops *desc_ops; + struct xlgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + int processed = 0; + unsigned int cur; + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Tx ring for this channel */ + if (!ring) + return 0; + + cur = ring->cur; + + /* Be sure we get ring->cur before accessing descriptor data */ + smp_rmb(); + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + + while ((processed < XLGMAC_TX_DESC_MAX_PROC) && + (ring->dirty != cur)) { + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); + dma_desc = desc_data->dma_desc; + + if (!hw_ops->tx_complete(dma_desc)) + break; + + /* Make sure descriptor fields are read after reading + * the OWN bit + */ + dma_rmb(); + + if (netif_msg_tx_done(pdata)) + xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); + + if (hw_ops->is_last_desc(dma_desc)) { + tx_packets += desc_data->tx.packets; + tx_bytes += desc_data->tx.bytes; + } + + /* Free the SKB and reset the descriptor for re-use */ + desc_ops->unmap_desc_data(pdata, desc_data); + hw_ops->tx_desc_reset(desc_data); + + processed++; + ring->dirty++; + } + + if (!processed) + return 0; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + + if ((ring->tx.queue_stopped == 1) && + (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) { + ring->tx.queue_stopped = 0; + netif_tx_wake_queue(txq); + } + + XLGMAC_PR("processed=%d\n", processed); + + return processed; +} + +static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget) +{ + struct xlgmac_pdata *pdata = channel->pdata; + struct xlgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int len, dma_desc_len, max_len; + unsigned int context_next, context; + struct xlgmac_desc_data *desc_data; + struct xlgmac_pkt_info *pkt_info; + unsigned int incomplete, error; + struct xlgmac_hw_ops *hw_ops; + unsigned int received = 0; + struct napi_struct *napi; + struct sk_buff *skb; + int packet_count = 0; + + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Rx ring for this channel */ + if (!ring) + return 0; + + incomplete = 0; + context_next = 0; + + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); + pkt_info = &ring->pkt_info; + while (packet_count < budget) { + /* First time in loop see if we need to restore state */ + if (!received && desc_data->state_saved) { + skb = desc_data->state.skb; + error = desc_data->state.error; + len = desc_data->state.len; + } else { + memset(pkt_info, 0, sizeof(*pkt_info)); + skb = NULL; + error = 0; + len = 0; + } + +read_again: + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); + + if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY) + xlgmac_rx_refresh(channel); + + if (hw_ops->dev_read(channel)) + break; + + received++; + ring->cur++; + + incomplete = XLGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); + context_next = XLGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); + context = XLGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN); + + /* Earlier error, just drain the remaining data */ + if ((incomplete || context_next) && error) + goto read_again; + + if (error || pkt_info->errors) { + if (pkt_info->errors) + netif_err(pdata, rx_err, netdev, + "error in received packet\n"); + dev_kfree_skb(skb); + goto next_packet; + } + + if (!context) { + /* Length is cumulative, get this descriptor's length */ + dma_desc_len = desc_data->rx.len - len; + len += dma_desc_len; + + if (dma_desc_len && !skb) { + skb = xlgmac_create_skb(pdata, napi, desc_data, + dma_desc_len); + if (!skb) + error = 1; + } else if (dma_desc_len) { + dma_sync_single_range_for_cpu( + pdata->dev, + desc_data->rx.buf.dma_base, + desc_data->rx.buf.dma_off, + desc_data->rx.buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag( + skb, skb_shinfo(skb)->nr_frags, + desc_data->rx.buf.pa.pages, + desc_data->rx.buf.pa.pages_offset, + dma_desc_len, + desc_data->rx.buf.dma_len); + desc_data->rx.buf.pa.pages = NULL; + } + } + + if (incomplete || context_next) + goto read_again; + + if (!skb) + goto next_packet; + + /* Be sure we don't exceed the configured MTU */ + max_len = netdev->mtu + ETH_HLEN; + if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (skb->protocol == htons(ETH_P_8021Q))) + max_len += VLAN_HLEN; + + if (skb->len > max_len) { + netif_err(pdata, rx_err, netdev, + "packet length exceeds configured MTU\n"); + dev_kfree_skb(skb); + goto next_packet; + } + + if (netif_msg_pktdata(pdata)) + xlgmac_print_pkt(netdev, skb, false); + + skb_checksum_none_assert(skb); + if (XLGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (XLGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + pkt_info->vlan_ctag); + pdata->stats.rx_vlan_packets++; + } + + if (XLGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RSS_HASH_POS, + RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) + skb_set_hash(skb, pkt_info->rss_hash, + pkt_info->rss_hash_type); + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, channel->queue_index); + + napi_gro_receive(napi, skb); + +next_packet: + packet_count++; + } + + /* Check if we need to save state before leaving */ + if (received && (incomplete || context_next)) { + desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); + desc_data->state_saved = 1; + desc_data->state.skb = skb; + desc_data->state.len = len; + desc_data->state.error = error; + } + + XLGMAC_PR("packet_count = %d\n", packet_count); + + return packet_count; +} + +static int xlgmac_one_poll(struct napi_struct *napi, int budget) +{ + struct xlgmac_channel *channel = container_of(napi, + struct xlgmac_channel, + napi); + int processed = 0; + + XLGMAC_PR("budget=%d\n", budget); + + /* Cleanup Tx ring first */ + xlgmac_tx_poll(channel); + + /* Process Rx ring next */ + processed = xlgmac_rx_poll(channel, budget); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete_done(napi, processed); + + /* Enable Tx and Rx interrupts */ + enable_irq(channel->dma_irq); + } + + XLGMAC_PR("received = %d\n", processed); + + return processed; +} + +static int xlgmac_all_poll(struct napi_struct *napi, int budget) +{ + struct xlgmac_pdata *pdata = container_of(napi, + struct xlgmac_pdata, + napi); + struct xlgmac_channel *channel; + int processed, last_processed; + int ring_budget; + unsigned int i; + + XLGMAC_PR("budget=%d\n", budget); + + processed = 0; + ring_budget = budget / pdata->rx_ring_count; + do { + last_processed = processed; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Cleanup Tx ring first */ + xlgmac_tx_poll(channel); + + /* Process Rx ring next */ + if (ring_budget > (budget - processed)) + ring_budget = budget - processed; + processed += xlgmac_rx_poll(channel, ring_budget); + } + } while ((processed < budget) && (processed != last_processed)); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete_done(napi, processed); + + /* Enable Tx and Rx interrupts */ + xlgmac_enable_rx_tx_ints(pdata); + } + + XLGMAC_PR("received = %d\n", processed); + + return processed; +} diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..386bafe74c3f6ffa3910e223101a5b52eacc8dfb --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c @@ -0,0 +1,78 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#include +#include +#include + +#include "dwc-xlgmac.h" +#include "dwc-xlgmac-reg.h" + +static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) +{ + struct device *dev = &pcidev->dev; + struct xlgmac_resources res; + int i, ret; + + ret = pcim_enable_device(pcidev); + if (ret) { + dev_err(dev, "ERROR: failed to enable device\n"); + return ret; + } + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pcidev, i) == 0) + continue; + ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME); + if (ret) + return ret; + break; + } + + pci_set_master(pcidev); + + memset(&res, 0, sizeof(res)); + res.irq = pcidev->irq; + res.addr = pcim_iomap_table(pcidev)[i]; + + return xlgmac_drv_probe(&pcidev->dev, &res); +} + +static void xlgmac_remove(struct pci_dev *pcidev) +{ + xlgmac_drv_remove(&pcidev->dev); +} + +static const struct pci_device_id xlgmac_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0x7302) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, xlgmac_pci_tbl); + +static struct pci_driver xlgmac_pci_driver = { + .name = XLGMAC_DRV_NAME, + .id_table = xlgmac_pci_tbl, + .probe = xlgmac_probe, + .remove = xlgmac_remove, +}; + +module_pci_driver(xlgmac_pci_driver); + +MODULE_DESCRIPTION(XLGMAC_DRV_DESC); +MODULE_VERSION(XLGMAC_DRV_VERSION); +MODULE_AUTHOR("Jie Deng "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h new file mode 100644 index 0000000000000000000000000000000000000000..3754f220567dbde2ff4f768659fe9b2b0f581f4f --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h @@ -0,0 +1,744 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#ifndef __DWC_XLGMAC_REG_H__ +#define __DWC_XLGMAC_REG_H__ + +/* MAC register offsets */ +#define MAC_TCR 0x0000 +#define MAC_RCR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_VR 0x0110 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_MACA0HR 0x0300 +#define MAC_MACA0LR 0x0304 +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_POS 18 +#define MAC_HWF0R_ADDMACADRSEL_LEN 5 +#define MAC_HWF0R_ARPOFFSEL_POS 9 +#define MAC_HWF0R_ARPOFFSEL_LEN 1 +#define MAC_HWF0R_EEESEL_POS 13 +#define MAC_HWF0R_EEESEL_LEN 1 +#define MAC_HWF0R_PHYIFSEL_POS 1 +#define MAC_HWF0R_PHYIFSEL_LEN 2 +#define MAC_HWF0R_MGKSEL_POS 7 +#define MAC_HWF0R_MGKSEL_LEN 1 +#define MAC_HWF0R_MMCSEL_POS 8 +#define MAC_HWF0R_MMCSEL_LEN 1 +#define MAC_HWF0R_RWKSEL_POS 6 +#define MAC_HWF0R_RWKSEL_LEN 1 +#define MAC_HWF0R_RXCOESEL_POS 16 +#define MAC_HWF0R_RXCOESEL_LEN 1 +#define MAC_HWF0R_SAVLANINS_POS 27 +#define MAC_HWF0R_SAVLANINS_LEN 1 +#define MAC_HWF0R_SMASEL_POS 5 +#define MAC_HWF0R_SMASEL_LEN 1 +#define MAC_HWF0R_TSSEL_POS 12 +#define MAC_HWF0R_TSSEL_LEN 1 +#define MAC_HWF0R_TSSTSSEL_POS 25 +#define MAC_HWF0R_TSSTSSEL_LEN 2 +#define MAC_HWF0R_TXCOESEL_POS 14 +#define MAC_HWF0R_TXCOESEL_LEN 1 +#define MAC_HWF0R_VLHASH_POS 4 +#define MAC_HWF0R_VLHASH_LEN 1 +#define MAC_HWF1R_ADDR64_POS 14 +#define MAC_HWF1R_ADDR64_LEN 2 +#define MAC_HWF1R_ADVTHWORD_POS 13 +#define MAC_HWF1R_ADVTHWORD_LEN 1 +#define MAC_HWF1R_DBGMEMA_POS 19 +#define MAC_HWF1R_DBGMEMA_LEN 1 +#define MAC_HWF1R_DCBEN_POS 16 +#define MAC_HWF1R_DCBEN_LEN 1 +#define MAC_HWF1R_HASHTBLSZ_POS 24 +#define MAC_HWF1R_HASHTBLSZ_LEN 3 +#define MAC_HWF1R_L3L4FNUM_POS 27 +#define MAC_HWF1R_L3L4FNUM_LEN 4 +#define MAC_HWF1R_NUMTC_POS 21 +#define MAC_HWF1R_NUMTC_LEN 3 +#define MAC_HWF1R_RSSEN_POS 20 +#define MAC_HWF1R_RSSEN_LEN 1 +#define MAC_HWF1R_RXFIFOSIZE_POS 0 +#define MAC_HWF1R_RXFIFOSIZE_LEN 5 +#define MAC_HWF1R_SPHEN_POS 17 +#define MAC_HWF1R_SPHEN_LEN 1 +#define MAC_HWF1R_TSOEN_POS 18 +#define MAC_HWF1R_TSOEN_LEN 1 +#define MAC_HWF1R_TXFIFOSIZE_POS 6 +#define MAC_HWF1R_TXFIFOSIZE_LEN 5 +#define MAC_HWF2R_AUXSNAPNUM_POS 28 +#define MAC_HWF2R_AUXSNAPNUM_LEN 3 +#define MAC_HWF2R_PPSOUTNUM_POS 24 +#define MAC_HWF2R_PPSOUTNUM_LEN 3 +#define MAC_HWF2R_RXCHCNT_POS 12 +#define MAC_HWF2R_RXCHCNT_LEN 4 +#define MAC_HWF2R_RXQCNT_POS 0 +#define MAC_HWF2R_RXQCNT_LEN 4 +#define MAC_HWF2R_TXCHCNT_POS 18 +#define MAC_HWF2R_TXCHCNT_LEN 4 +#define MAC_HWF2R_TXQCNT_POS 6 +#define MAC_HWF2R_TXQCNT_LEN 4 +#define MAC_IER_TSIE_POS 12 +#define MAC_IER_TSIE_LEN 1 +#define MAC_ISR_MMCRXIS_POS 9 +#define MAC_ISR_MMCRXIS_LEN 1 +#define MAC_ISR_MMCTXIS_POS 10 +#define MAC_ISR_MMCTXIS_LEN 1 +#define MAC_ISR_PMTIS_POS 4 +#define MAC_ISR_PMTIS_LEN 1 +#define MAC_ISR_TSIS_POS 12 +#define MAC_ISR_TSIS_LEN 1 +#define MAC_MACA1HR_AE_POS 31 +#define MAC_MACA1HR_AE_LEN 1 +#define MAC_PFR_HMC_POS 2 +#define MAC_PFR_HMC_LEN 1 +#define MAC_PFR_HPF_POS 10 +#define MAC_PFR_HPF_LEN 1 +#define MAC_PFR_HUC_POS 1 +#define MAC_PFR_HUC_LEN 1 +#define MAC_PFR_PM_POS 4 +#define MAC_PFR_PM_LEN 1 +#define MAC_PFR_PR_POS 0 +#define MAC_PFR_PR_LEN 1 +#define MAC_PFR_VTFE_POS 16 +#define MAC_PFR_VTFE_LEN 1 +#define MAC_Q0TFCR_PT_POS 16 +#define MAC_Q0TFCR_PT_LEN 16 +#define MAC_Q0TFCR_TFE_POS 1 +#define MAC_Q0TFCR_TFE_LEN 1 +#define MAC_RCR_ACS_POS 1 +#define MAC_RCR_ACS_LEN 1 +#define MAC_RCR_CST_POS 2 +#define MAC_RCR_CST_LEN 1 +#define MAC_RCR_DCRCC_POS 3 +#define MAC_RCR_DCRCC_LEN 1 +#define MAC_RCR_HDSMS_POS 12 +#define MAC_RCR_HDSMS_LEN 3 +#define MAC_RCR_IPC_POS 9 +#define MAC_RCR_IPC_LEN 1 +#define MAC_RCR_JE_POS 8 +#define MAC_RCR_JE_LEN 1 +#define MAC_RCR_LM_POS 10 +#define MAC_RCR_LM_LEN 1 +#define MAC_RCR_RE_POS 0 +#define MAC_RCR_RE_LEN 1 +#define MAC_RFCR_PFCE_POS 8 +#define MAC_RFCR_PFCE_LEN 1 +#define MAC_RFCR_RFE_POS 0 +#define MAC_RFCR_RFE_LEN 1 +#define MAC_RFCR_UP_POS 1 +#define MAC_RFCR_UP_LEN 1 +#define MAC_RQC0R_RXQ0EN_POS 0 +#define MAC_RQC0R_RXQ0EN_LEN 2 +#define MAC_RSSAR_ADDRT_POS 2 +#define MAC_RSSAR_ADDRT_LEN 1 +#define MAC_RSSAR_CT_POS 1 +#define MAC_RSSAR_CT_LEN 1 +#define MAC_RSSAR_OB_POS 0 +#define MAC_RSSAR_OB_LEN 1 +#define MAC_RSSAR_RSSIA_POS 8 +#define MAC_RSSAR_RSSIA_LEN 8 +#define MAC_RSSCR_IP2TE_POS 1 +#define MAC_RSSCR_IP2TE_LEN 1 +#define MAC_RSSCR_RSSE_POS 0 +#define MAC_RSSCR_RSSE_LEN 1 +#define MAC_RSSCR_TCP4TE_POS 2 +#define MAC_RSSCR_TCP4TE_LEN 1 +#define MAC_RSSCR_UDP4TE_POS 3 +#define MAC_RSSCR_UDP4TE_LEN 1 +#define MAC_RSSDR_DMCH_POS 0 +#define MAC_RSSDR_DMCH_LEN 4 +#define MAC_TCR_SS_POS 28 +#define MAC_TCR_SS_LEN 3 +#define MAC_TCR_TE_POS 0 +#define MAC_TCR_TE_LEN 1 +#define MAC_VLANHTR_VLHT_POS 0 +#define MAC_VLANHTR_VLHT_LEN 16 +#define MAC_VLANIR_VLTI_POS 20 +#define MAC_VLANIR_VLTI_LEN 1 +#define MAC_VLANIR_CSVL_POS 19 +#define MAC_VLANIR_CSVL_LEN 1 +#define MAC_VLANTR_DOVLTC_POS 20 +#define MAC_VLANTR_DOVLTC_LEN 1 +#define MAC_VLANTR_ERSVLM_POS 19 +#define MAC_VLANTR_ERSVLM_LEN 1 +#define MAC_VLANTR_ESVL_POS 18 +#define MAC_VLANTR_ESVL_LEN 1 +#define MAC_VLANTR_ETV_POS 16 +#define MAC_VLANTR_ETV_LEN 1 +#define MAC_VLANTR_EVLS_POS 21 +#define MAC_VLANTR_EVLS_LEN 2 +#define MAC_VLANTR_EVLRXS_POS 24 +#define MAC_VLANTR_EVLRXS_LEN 1 +#define MAC_VLANTR_VL_POS 0 +#define MAC_VLANTR_VL_LEN 16 +#define MAC_VLANTR_VTHM_POS 25 +#define MAC_VLANTR_VTHM_LEN 1 +#define MAC_VLANTR_VTIM_POS 17 +#define MAC_VLANTR_VTIM_LEN 1 +#define MAC_VR_DEVID_POS 8 +#define MAC_VR_DEVID_LEN 8 +#define MAC_VR_SNPSVER_POS 0 +#define MAC_VR_SNPSVER_LEN 8 +#define MAC_VR_USERVER_POS 16 +#define MAC_VR_USERVER_LEN 8 + +/* MMC register offsets */ +#define MMC_CR 0x0800 +#define MMC_RISR 0x0804 +#define MMC_TISR 0x0808 +#define MMC_RIER 0x080c +#define MMC_TIER 0x0810 +#define MMC_TXOCTETCOUNT_GB_LO 0x0814 +#define MMC_TXFRAMECOUNT_GB_LO 0x081c +#define MMC_TXBROADCASTFRAMES_G_LO 0x0824 +#define MMC_TXMULTICASTFRAMES_G_LO 0x082c +#define MMC_TX64OCTETS_GB_LO 0x0834 +#define MMC_TX65TO127OCTETS_GB_LO 0x083c +#define MMC_TX128TO255OCTETS_GB_LO 0x0844 +#define MMC_TX256TO511OCTETS_GB_LO 0x084c +#define MMC_TX512TO1023OCTETS_GB_LO 0x0854 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c +#define MMC_TXUNICASTFRAMES_GB_LO 0x0864 +#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 +#define MMC_TXUNDERFLOWERROR_LO 0x087c +#define MMC_TXOCTETCOUNT_G_LO 0x0884 +#define MMC_TXFRAMECOUNT_G_LO 0x088c +#define MMC_TXPAUSEFRAMES_LO 0x0894 +#define MMC_TXVLANFRAMES_G_LO 0x089c +#define MMC_RXFRAMECOUNT_GB_LO 0x0900 +#define MMC_RXOCTETCOUNT_GB_LO 0x0908 +#define MMC_RXOCTETCOUNT_G_LO 0x0910 +#define MMC_RXBROADCASTFRAMES_G_LO 0x0918 +#define MMC_RXMULTICASTFRAMES_G_LO 0x0920 +#define MMC_RXCRCERROR_LO 0x0928 +#define MMC_RXRUNTERROR 0x0930 +#define MMC_RXJABBERERROR 0x0934 +#define MMC_RXUNDERSIZE_G 0x0938 +#define MMC_RXOVERSIZE_G 0x093c +#define MMC_RX64OCTETS_GB_LO 0x0940 +#define MMC_RX65TO127OCTETS_GB_LO 0x0948 +#define MMC_RX128TO255OCTETS_GB_LO 0x0950 +#define MMC_RX256TO511OCTETS_GB_LO 0x0958 +#define MMC_RX512TO1023OCTETS_GB_LO 0x0960 +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 +#define MMC_RXUNICASTFRAMES_G_LO 0x0970 +#define MMC_RXLENGTHERROR_LO 0x0978 +#define MMC_RXOUTOFRANGETYPE_LO 0x0980 +#define MMC_RXPAUSEFRAMES_LO 0x0988 +#define MMC_RXFIFOOVERFLOW_LO 0x0990 +#define MMC_RXVLANFRAMES_GB_LO 0x0998 +#define MMC_RXWATCHDOGERROR 0x09a0 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_POS 0 +#define MMC_CR_CR_LEN 1 +#define MMC_CR_CSR_POS 1 +#define MMC_CR_CSR_LEN 1 +#define MMC_CR_ROR_POS 2 +#define MMC_CR_ROR_LEN 1 +#define MMC_CR_MCF_POS 3 +#define MMC_CR_MCF_LEN 1 +#define MMC_CR_MCT_POS 4 +#define MMC_CR_MCT_LEN 2 +#define MMC_RIER_ALL_INTERRUPTS_POS 0 +#define MMC_RIER_ALL_INTERRUPTS_LEN 23 +#define MMC_RISR_RXFRAMECOUNT_GB_POS 0 +#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_GB_POS 1 +#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_G_POS 2 +#define MMC_RISR_RXOCTETCOUNT_G_LEN 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXCRCERROR_POS 5 +#define MMC_RISR_RXCRCERROR_LEN 1 +#define MMC_RISR_RXRUNTERROR_POS 6 +#define MMC_RISR_RXRUNTERROR_LEN 1 +#define MMC_RISR_RXJABBERERROR_POS 7 +#define MMC_RISR_RXJABBERERROR_LEN 1 +#define MMC_RISR_RXUNDERSIZE_G_POS 8 +#define MMC_RISR_RXUNDERSIZE_G_LEN 1 +#define MMC_RISR_RXOVERSIZE_G_POS 9 +#define MMC_RISR_RXOVERSIZE_G_LEN 1 +#define MMC_RISR_RX64OCTETS_GB_POS 10 +#define MMC_RISR_RX64OCTETS_GB_LEN 1 +#define MMC_RISR_RX65TO127OCTETS_GB_POS 11 +#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1 +#define MMC_RISR_RX128TO255OCTETS_GB_POS 12 +#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1 +#define MMC_RISR_RX256TO511OCTETS_GB_POS 13 +#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_POS 14 +#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 15 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_RISR_RXUNICASTFRAMES_G_POS 16 +#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXLENGTHERROR_POS 17 +#define MMC_RISR_RXLENGTHERROR_LEN 1 +#define MMC_RISR_RXOUTOFRANGETYPE_POS 18 +#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1 +#define MMC_RISR_RXPAUSEFRAMES_POS 19 +#define MMC_RISR_RXPAUSEFRAMES_LEN 1 +#define MMC_RISR_RXFIFOOVERFLOW_POS 20 +#define MMC_RISR_RXFIFOOVERFLOW_LEN 1 +#define MMC_RISR_RXVLANFRAMES_GB_POS 21 +#define MMC_RISR_RXVLANFRAMES_GB_LEN 1 +#define MMC_RISR_RXWATCHDOGERROR_POS 22 +#define MMC_RISR_RXWATCHDOGERROR_LEN 1 +#define MMC_TIER_ALL_INTERRUPTS_POS 0 +#define MMC_TIER_ALL_INTERRUPTS_LEN 18 +#define MMC_TISR_TXOCTETCOUNT_GB_POS 0 +#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_GB_POS 1 +#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1 +#define MMC_TISR_TX64OCTETS_GB_POS 4 +#define MMC_TISR_TX64OCTETS_GB_LEN 1 +#define MMC_TISR_TX65TO127OCTETS_GB_POS 5 +#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1 +#define MMC_TISR_TX128TO255OCTETS_GB_POS 6 +#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1 +#define MMC_TISR_TX256TO511OCTETS_GB_POS 7 +#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXUNDERFLOWERROR_POS 13 +#define MMC_TISR_TXUNDERFLOWERROR_LEN 1 +#define MMC_TISR_TXOCTETCOUNT_G_POS 14 +#define MMC_TISR_TXOCTETCOUNT_G_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_G_POS 15 +#define MMC_TISR_TXFRAMECOUNT_G_LEN 1 +#define MMC_TISR_TXPAUSEFRAMES_POS 16 +#define MMC_TISR_TXPAUSEFRAMES_LEN 1 +#define MMC_TISR_TXVLANFRAMES_G_POS 17 +#define MMC_TISR_TXVLANFRAMES_G_LEN 1 + +/* MTL register offsets */ +#define MTL_OMR 0x1000 +#define MTL_FDDR 0x1010 +#define MTL_RQDCM0R 0x1030 + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_POS 5 +#define MTL_OMR_ETSALG_LEN 2 +#define MTL_OMR_RAA_POS 2 +#define MTL_OMR_RAA_LEN 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_Q_BASE 0x1100 +#define MTL_Q_INC 0x80 + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_RQOMR 0x40 +#define MTL_Q_RQDR 0x48 +#define MTL_Q_RQFCR 0x50 +#define MTL_Q_IER 0x70 +#define MTL_Q_ISR 0x74 + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_POS 16 +#define MTL_Q_RQDR_PRXQ_LEN 14 +#define MTL_Q_RQDR_RXQSTS_POS 4 +#define MTL_Q_RQDR_RXQSTS_LEN 2 +#define MTL_Q_RQFCR_RFA_POS 1 +#define MTL_Q_RQFCR_RFA_LEN 6 +#define MTL_Q_RQFCR_RFD_POS 17 +#define MTL_Q_RQFCR_RFD_LEN 6 +#define MTL_Q_RQOMR_EHFC_POS 7 +#define MTL_Q_RQOMR_EHFC_LEN 1 +#define MTL_Q_RQOMR_RQS_POS 16 +#define MTL_Q_RQOMR_RQS_LEN 9 +#define MTL_Q_RQOMR_RSF_POS 5 +#define MTL_Q_RQOMR_RSF_LEN 1 +#define MTL_Q_RQOMR_FEP_POS 4 +#define MTL_Q_RQOMR_FEP_LEN 1 +#define MTL_Q_RQOMR_FUP_POS 3 +#define MTL_Q_RQOMR_FUP_LEN 1 +#define MTL_Q_RQOMR_RTC_POS 0 +#define MTL_Q_RQOMR_RTC_LEN 2 +#define MTL_Q_TQOMR_FTQ_POS 0 +#define MTL_Q_TQOMR_FTQ_LEN 1 +#define MTL_Q_TQOMR_Q2TCMAP_POS 8 +#define MTL_Q_TQOMR_Q2TCMAP_LEN 3 +#define MTL_Q_TQOMR_TQS_POS 16 +#define MTL_Q_TQOMR_TQS_LEN 10 +#define MTL_Q_TQOMR_TSF_POS 1 +#define MTL_Q_TQOMR_TSF_LEN 1 +#define MTL_Q_TQOMR_TTC_POS 4 +#define MTL_Q_TQOMR_TTC_LEN 3 +#define MTL_Q_TQOMR_TXQEN_POS 2 +#define MTL_Q_TQOMR_TXQEN_LEN 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_64 0x00 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_ENABLED 0x02 + +#define MTL_RQDCM0R_Q0MDMACH 0x0 +#define MTL_RQDCM0R_Q1MDMACH 0x00000100 +#define MTL_RQDCM0R_Q2MDMACH 0x00020000 +#define MTL_RQDCM0R_Q3MDMACH 0x03000000 +#define MTL_RQDCM1R_Q4MDMACH 0x00000004 +#define MTL_RQDCM1R_Q5MDMACH 0x00000500 +#define MTL_RQDCM1R_Q6MDMACH 0x00060000 +#define MTL_RQDCM1R_Q7MDMACH 0x07000000 +#define MTL_RQDCM2R_Q8MDMACH 0x00000008 +#define MTL_RQDCM2R_Q9MDMACH 0x00000900 +#define MTL_RQDCM2R_Q10MDMACH 0x000A0000 +#define MTL_RQDCM2R_Q11MDMACH 0x0B000000 + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_POS 0 +#define MTL_TC_ETSCR_TSA_LEN 2 +#define MTL_TC_QWR_QW_POS 0 +#define MTL_TC_QWR_QW_LEN 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* DMA register offsets */ +#define DMA_MR 0x3000 +#define DMA_SBMR 0x3004 +#define DMA_ISR 0x3008 +#define DMA_DSR0 0x3020 +#define DMA_DSR1 0x3024 + +/* DMA register entry bit positions and sizes */ +#define DMA_ISR_MACIS_POS 17 +#define DMA_ISR_MACIS_LEN 1 +#define DMA_ISR_MTLIS_POS 16 +#define DMA_ISR_MTLIS_LEN 1 +#define DMA_MR_SWR_POS 0 +#define DMA_MR_SWR_LEN 1 +#define DMA_SBMR_EAME_POS 11 +#define DMA_SBMR_EAME_LEN 1 +#define DMA_SBMR_BLEN_64_POS 5 +#define DMA_SBMR_BLEN_64_LEN 1 +#define DMA_SBMR_BLEN_128_POS 6 +#define DMA_SBMR_BLEN_128_LEN 1 +#define DMA_SBMR_BLEN_256_POS 7 +#define DMA_SBMR_BLEN_256_LEN 1 +#define DMA_SBMR_UNDEF_POS 0 +#define DMA_SBMR_UNDEF_LEN 1 + +/* DMA register values */ +#define DMA_DSR_RPS_LEN 4 +#define DMA_DSR_TPS_LEN 4 +#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN) +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x3100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x3100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x24 +#define DMA_CH_RDTR_LO 0x2c +#define DMA_CH_TDRLR 0x30 +#define DMA_CH_RDRLR 0x34 +#define DMA_CH_IER 0x38 +#define DMA_CH_RIWT 0x3c +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_POS 16 +#define DMA_CH_CR_PBLX8_LEN 1 +#define DMA_CH_CR_SPH_POS 24 +#define DMA_CH_CR_SPH_LEN 1 +#define DMA_CH_IER_AIE_POS 15 +#define DMA_CH_IER_AIE_LEN 1 +#define DMA_CH_IER_FBEE_POS 12 +#define DMA_CH_IER_FBEE_LEN 1 +#define DMA_CH_IER_NIE_POS 16 +#define DMA_CH_IER_NIE_LEN 1 +#define DMA_CH_IER_RBUE_POS 7 +#define DMA_CH_IER_RBUE_LEN 1 +#define DMA_CH_IER_RIE_POS 6 +#define DMA_CH_IER_RIE_LEN 1 +#define DMA_CH_IER_RSE_POS 8 +#define DMA_CH_IER_RSE_LEN 1 +#define DMA_CH_IER_TBUE_POS 2 +#define DMA_CH_IER_TBUE_LEN 1 +#define DMA_CH_IER_TIE_POS 0 +#define DMA_CH_IER_TIE_LEN 1 +#define DMA_CH_IER_TXSE_POS 1 +#define DMA_CH_IER_TXSE_LEN 1 +#define DMA_CH_RCR_PBL_POS 16 +#define DMA_CH_RCR_PBL_LEN 6 +#define DMA_CH_RCR_RBSZ_POS 1 +#define DMA_CH_RCR_RBSZ_LEN 14 +#define DMA_CH_RCR_SR_POS 0 +#define DMA_CH_RCR_SR_LEN 1 +#define DMA_CH_RIWT_RWT_POS 0 +#define DMA_CH_RIWT_RWT_LEN 8 +#define DMA_CH_SR_FBE_POS 12 +#define DMA_CH_SR_FBE_LEN 1 +#define DMA_CH_SR_RBU_POS 7 +#define DMA_CH_SR_RBU_LEN 1 +#define DMA_CH_SR_RI_POS 6 +#define DMA_CH_SR_RI_LEN 1 +#define DMA_CH_SR_RPS_POS 8 +#define DMA_CH_SR_RPS_LEN 1 +#define DMA_CH_SR_TBU_POS 2 +#define DMA_CH_SR_TBU_LEN 1 +#define DMA_CH_SR_TI_POS 0 +#define DMA_CH_SR_TI_LEN 1 +#define DMA_CH_SR_TPS_POS 1 +#define DMA_CH_SR_TPS_LEN 1 +#define DMA_CH_TCR_OSP_POS 4 +#define DMA_CH_TCR_OSP_LEN 1 +#define DMA_CH_TCR_PBL_POS 16 +#define DMA_CH_TCR_PBL_LEN 6 +#define DMA_CH_TCR_ST_POS 0 +#define DMA_CH_TCR_ST_LEN 1 +#define DMA_CH_TCR_TSE_POS 12 +#define DMA_CH_TCR_TSE_LEN 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 +#define DMA_PBL_128 128 +#define DMA_PBL_256 256 +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_POS 2 +#define RX_PACKET_ERRORS_CRC_LEN 1 +#define RX_PACKET_ERRORS_FRAME_POS 3 +#define RX_PACKET_ERRORS_FRAME_LEN 1 +#define RX_PACKET_ERRORS_LENGTH_POS 0 +#define RX_PACKET_ERRORS_LENGTH_LEN 1 +#define RX_PACKET_ERRORS_OVERRUN_POS 1 +#define RX_PACKET_ERRORS_OVERRUN_LEN 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1 + +#define RX_NORMAL_DESC0_OVT_POS 0 +#define RX_NORMAL_DESC0_OVT_LEN 16 +#define RX_NORMAL_DESC2_HL_POS 0 +#define RX_NORMAL_DESC2_HL_LEN 10 +#define RX_NORMAL_DESC3_CDA_POS 27 +#define RX_NORMAL_DESC3_CDA_LEN 1 +#define RX_NORMAL_DESC3_CTXT_POS 30 +#define RX_NORMAL_DESC3_CTXT_LEN 1 +#define RX_NORMAL_DESC3_ES_POS 15 +#define RX_NORMAL_DESC3_ES_LEN 1 +#define RX_NORMAL_DESC3_ETLT_POS 16 +#define RX_NORMAL_DESC3_ETLT_LEN 4 +#define RX_NORMAL_DESC3_FD_POS 29 +#define RX_NORMAL_DESC3_FD_LEN 1 +#define RX_NORMAL_DESC3_INTE_POS 30 +#define RX_NORMAL_DESC3_INTE_LEN 1 +#define RX_NORMAL_DESC3_L34T_POS 20 +#define RX_NORMAL_DESC3_L34T_LEN 4 +#define RX_NORMAL_DESC3_LD_POS 28 +#define RX_NORMAL_DESC3_LD_LEN 1 +#define RX_NORMAL_DESC3_OWN_POS 31 +#define RX_NORMAL_DESC3_OWN_LEN 1 +#define RX_NORMAL_DESC3_PL_POS 0 +#define RX_NORMAL_DESC3_PL_LEN 14 +#define RX_NORMAL_DESC3_RSV_POS 26 +#define RX_NORMAL_DESC3_RSV_LEN 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_CONTEXT_DESC3_TSA_POS 4 +#define RX_CONTEXT_DESC3_TSA_LEN 1 +#define RX_CONTEXT_DESC3_TSD_POS 6 +#define RX_CONTEXT_DESC3_TSD_LEN 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define TX_PACKET_ATTRIBUTES_PTP_POS 3 +#define TX_PACKET_ATTRIBUTES_PTP_LEN 1 + +#define TX_CONTEXT_DESC2_MSS_POS 0 +#define TX_CONTEXT_DESC2_MSS_LEN 15 +#define TX_CONTEXT_DESC3_CTXT_POS 30 +#define TX_CONTEXT_DESC3_CTXT_LEN 1 +#define TX_CONTEXT_DESC3_TCMSSV_POS 26 +#define TX_CONTEXT_DESC3_TCMSSV_LEN 1 +#define TX_CONTEXT_DESC3_VLTV_POS 16 +#define TX_CONTEXT_DESC3_VLTV_LEN 1 +#define TX_CONTEXT_DESC3_VT_POS 0 +#define TX_CONTEXT_DESC3_VT_LEN 16 + +#define TX_NORMAL_DESC2_HL_B1L_POS 0 +#define TX_NORMAL_DESC2_HL_B1L_LEN 14 +#define TX_NORMAL_DESC2_IC_POS 31 +#define TX_NORMAL_DESC2_IC_LEN 1 +#define TX_NORMAL_DESC2_TTSE_POS 30 +#define TX_NORMAL_DESC2_TTSE_LEN 1 +#define TX_NORMAL_DESC2_VTIR_POS 14 +#define TX_NORMAL_DESC2_VTIR_LEN 2 +#define TX_NORMAL_DESC3_CIC_POS 16 +#define TX_NORMAL_DESC3_CIC_LEN 2 +#define TX_NORMAL_DESC3_CPC_POS 26 +#define TX_NORMAL_DESC3_CPC_LEN 2 +#define TX_NORMAL_DESC3_CTXT_POS 30 +#define TX_NORMAL_DESC3_CTXT_LEN 1 +#define TX_NORMAL_DESC3_FD_POS 29 +#define TX_NORMAL_DESC3_FD_LEN 1 +#define TX_NORMAL_DESC3_FL_POS 0 +#define TX_NORMAL_DESC3_FL_LEN 15 +#define TX_NORMAL_DESC3_LD_POS 28 +#define TX_NORMAL_DESC3_LD_LEN 1 +#define TX_NORMAL_DESC3_OWN_POS 31 +#define TX_NORMAL_DESC3_OWN_LEN 1 +#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4 +#define TX_NORMAL_DESC3_TCPPL_POS 0 +#define TX_NORMAL_DESC3_TCPPL_LEN 18 +#define TX_NORMAL_DESC3_TSE_POS 18 +#define TX_NORMAL_DESC3_TSE_LEN 1 + +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +#define XLGMAC_MTL_REG(pdata, n, reg) \ + ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg)) + +#define XLGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg)) + +#endif /* __DWC_XLGMAC_REG_H__ */ diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h new file mode 100644 index 0000000000000000000000000000000000000000..cab3e40a86b93c70d56c24313263ea8342c9394a --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h @@ -0,0 +1,660 @@ +/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver + * + * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) + * + * This program is dual-licensed; you may select either version 2 of + * the GNU General Public License ("GPL") or BSD license ("BSD"). + * + * This Synopsys DWC XLGMAC software driver and associated documentation + * (hereinafter the "Software") is an unsupported proprietary work of + * Synopsys, Inc. unless otherwise expressly agreed to in writing between + * Synopsys and you. The Software IS NOT an item of Licensed Software or a + * Licensed Product under any End User Software License Agreement or + * Agreement for Licensed Products with Synopsys or any supplement thereto. + * Synopsys is a registered trademark of Synopsys, Inc. Other names included + * in the SOFTWARE may be the trademarks of their respective owners. + */ + +#ifndef __DWC_XLGMAC_H__ +#define __DWC_XLGMAC_H__ + +#include +#include +#include +#include +#include +#include +#include + +#define XLGMAC_DRV_NAME "dwc-xlgmac" +#define XLGMAC_DRV_VERSION "1.0.0" +#define XLGMAC_DRV_DESC "Synopsys DWC XLGMAC Driver" + +/* Descriptor related parameters */ +#define XLGMAC_TX_DESC_CNT 1024 +#define XLGMAC_TX_DESC_MIN_FREE (XLGMAC_TX_DESC_CNT >> 3) +#define XLGMAC_TX_DESC_MAX_PROC (XLGMAC_TX_DESC_CNT >> 1) +#define XLGMAC_RX_DESC_CNT 1024 +#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3) + +/* Descriptors required for maximum contiguous TSO/GSO packet */ +#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for a SKB */ +#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2) + +#define XLGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define XLGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define XLGMAC_RX_BUF_ALIGN 64 + +/* Maximum Size for Splitting the Header Data + * Keep in sync with SKB_ALLOC_SIZE + * 3'b000: 64 bytes, 3'b001: 128 bytes + * 3'b010: 256 bytes, 3'b011: 512 bytes + * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved + */ +#define XLGMAC_SPH_HDSMS_SIZE 3 +#define XLGMAC_SKB_ALLOC_SIZE 512 + +#define XLGMAC_MAX_FIFO 81920 + +#define XLGMAC_MAX_DMA_CHANNELS 16 +#define XLGMAC_DMA_STOP_TIMEOUT 5 +#define XLGMAC_DMA_INTERRUPT_MASK 0x31c7 + +/* Default coalescing parameters */ +#define XLGMAC_INIT_DMA_TX_USECS 1000 +#define XLGMAC_INIT_DMA_TX_FRAMES 25 +#define XLGMAC_INIT_DMA_RX_USECS 30 +#define XLGMAC_INIT_DMA_RX_FRAMES 25 +#define XLGMAC_MAX_DMA_RIWT 0xff +#define XLGMAC_MIN_DMA_RIWT 0x01 + +/* Flow control queue count */ +#define XLGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* System clock is 125 MHz */ +#define XLGMAC_SYSCLOCK 125000000 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define XLGMAC_MAC_HASH_TABLE_SIZE 8 + +/* Receive Side Scaling */ +#define XLGMAC_RSS_HASH_KEY_SIZE 40 +#define XLGMAC_RSS_MAX_TABLE_SIZE 256 +#define XLGMAC_RSS_LOOKUP_TABLE_TYPE 0 +#define XLGMAC_RSS_HASH_KEY_TYPE 1 + +#define XLGMAC_STD_PACKET_MTU 1500 +#define XLGMAC_JUMBO_PACKET_MTU 9000 + +/* Helper macro for descriptor handling + * Always use XLGMAC_GET_DESC_DATA to access the descriptor data + */ +#define XLGMAC_GET_DESC_DATA(ring, idx) ({ \ + typeof(ring) _ring = (ring); \ + ((_ring)->desc_data_head + \ + ((idx) & ((_ring)->dma_desc_count - 1))); \ +}) + +#define XLGMAC_GET_REG_BITS(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define XLGMAC_GET_REG_BITS_LE(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(var) _var = le32_to_cpu((var)); \ + ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define XLGMAC_SET_REG_BITS(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ +}) + +#define XLGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ + cpu_to_le32(_var); \ +}) + +struct xlgmac_pdata; + +enum xlgmac_int { + XLGMAC_INT_DMA_CH_SR_TI, + XLGMAC_INT_DMA_CH_SR_TPS, + XLGMAC_INT_DMA_CH_SR_TBU, + XLGMAC_INT_DMA_CH_SR_RI, + XLGMAC_INT_DMA_CH_SR_RBU, + XLGMAC_INT_DMA_CH_SR_RPS, + XLGMAC_INT_DMA_CH_SR_TI_RI, + XLGMAC_INT_DMA_CH_SR_FBE, + XLGMAC_INT_DMA_ALL, +}; + +struct xlgmac_stats { + /* MMC TX counters */ + u64 txoctetcount_gb; + u64 txframecount_gb; + u64 txbroadcastframes_g; + u64 txmulticastframes_g; + u64 tx64octets_gb; + u64 tx65to127octets_gb; + u64 tx128to255octets_gb; + u64 tx256to511octets_gb; + u64 tx512to1023octets_gb; + u64 tx1024tomaxoctets_gb; + u64 txunicastframes_gb; + u64 txmulticastframes_gb; + u64 txbroadcastframes_gb; + u64 txunderflowerror; + u64 txoctetcount_g; + u64 txframecount_g; + u64 txpauseframes; + u64 txvlanframes_g; + + /* MMC RX counters */ + u64 rxframecount_gb; + u64 rxoctetcount_gb; + u64 rxoctetcount_g; + u64 rxbroadcastframes_g; + u64 rxmulticastframes_g; + u64 rxcrcerror; + u64 rxrunterror; + u64 rxjabbererror; + u64 rxundersize_g; + u64 rxoversize_g; + u64 rx64octets_gb; + u64 rx65to127octets_gb; + u64 rx128to255octets_gb; + u64 rx256to511octets_gb; + u64 rx512to1023octets_gb; + u64 rx1024tomaxoctets_gb; + u64 rxunicastframes_g; + u64 rxlengtherror; + u64 rxoutofrangetype; + u64 rxpauseframes; + u64 rxfifooverflow; + u64 rxvlanframes_gb; + u64 rxwatchdogerror; + + /* Extra counters */ + u64 tx_tso_packets; + u64 rx_split_header_packets; + u64 tx_process_stopped; + u64 rx_process_stopped; + u64 tx_buffer_unavailable; + u64 rx_buffer_unavailable; + u64 fatal_bus_error; + u64 tx_vlan_packets; + u64 rx_vlan_packets; + u64 napi_poll_isr; + u64 napi_poll_txtimer; +}; + +struct xlgmac_ring_buf { + struct sk_buff *skb; + dma_addr_t skb_dma; + unsigned int skb_len; +}; + +/* Common Tx and Rx DMA hardware descriptor */ +struct xlgmac_dma_desc { + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct xlgmac_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + + dma_addr_t pages_dma; +}; + +/* Ring entry buffer data */ +struct xlgmac_buffer_data { + struct xlgmac_page_alloc pa; + struct xlgmac_page_alloc pa_unmap; + + dma_addr_t dma_base; + unsigned long dma_off; + unsigned int dma_len; +}; + +/* Tx-related desc data */ +struct xlgmac_tx_desc_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related desc data */ +struct xlgmac_rx_desc_data { + struct xlgmac_buffer_data hdr; /* Header locations */ + struct xlgmac_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ +}; + +struct xlgmac_pkt_info { + struct sk_buff *skb; + + unsigned int attributes; + + unsigned int errors; + + /* descriptors needed for this packet */ + unsigned int desc_count; + unsigned int length; + + unsigned int tx_packets; + unsigned int tx_bytes; + + unsigned int header_len; + unsigned int tcp_header_len; + unsigned int tcp_payload_len; + unsigned short mss; + + unsigned short vlan_ctag; + + u64 rx_tstamp; + + u32 rss_hash; + enum pkt_hash_types rss_hash_type; +}; + +struct xlgmac_desc_data { + /* dma_desc: Virtual address of descriptor + * dma_desc_addr: DMA address of descriptor + */ + struct xlgmac_dma_desc *dma_desc; + dma_addr_t dma_desc_addr; + + /* skb: Virtual address of SKB + * skb_dma: DMA address of SKB data + * skb_dma_len: Length of SKB DMA area + */ + struct sk_buff *skb; + dma_addr_t skb_dma; + unsigned int skb_dma_len; + + /* Tx/Rx -related data */ + struct xlgmac_tx_desc_data tx; + struct xlgmac_rx_desc_data rx; + + unsigned int mapped_as_page; + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct xlgmac_ring { + /* Per packet related information */ + struct xlgmac_pkt_info pkt_info; + + /* Virtual/DMA addresses of DMA descriptor list and the total count */ + struct xlgmac_dma_desc *dma_desc_head; + dma_addr_t dma_desc_head_addr; + unsigned int dma_desc_count; + + /* Array of descriptor data corresponding the DMA descriptor + * (always use the XLGMAC_GET_DESC_DATA macro to access this data) + */ + struct xlgmac_desc_data *desc_data_head; + + /* Page allocation for RX buffers */ + struct xlgmac_page_alloc rx_hdr_pa; + struct xlgmac_page_alloc rx_buf_pa; + + /* Ring index values + * cur - Tx: index of descriptor to be used for current transfer + * Rx: index of descriptor to check for packet availability + * dirty - Tx: index of descriptor to check for transfer complete + * Rx: index of descriptor to check for buffer reallocation + */ + unsigned int cur; + unsigned int dirty; + + /* Coalesce frame count used for interrupt bit setting */ + unsigned int coalesce_count; + + union { + struct { + unsigned int xmit_more; + unsigned int queue_stopped; + unsigned short cur_mss; + unsigned short cur_vlan_ctag; + } tx; + }; +} ____cacheline_aligned; + +struct xlgmac_channel { + char name[16]; + + /* Address of private data area for device */ + struct xlgmac_pdata *pdata; + + /* Queue index and base address of queue's DMA registers */ + unsigned int queue_index; + void __iomem *dma_regs; + + /* Per channel interrupt irq number */ + int dma_irq; + char dma_irq_name[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi; + + unsigned int saved_ier; + + unsigned int tx_timer_active; + struct timer_list tx_timer; + + struct xlgmac_ring *tx_ring; + struct xlgmac_ring *rx_ring; +} ____cacheline_aligned; + +struct xlgmac_desc_ops { + int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata); + void (*free_channels_and_rings)(struct xlgmac_pdata *pdata); + int (*map_tx_skb)(struct xlgmac_channel *channel, + struct sk_buff *skb); + int (*map_rx_buffer)(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + struct xlgmac_desc_data *desc_data); + void (*unmap_desc_data)(struct xlgmac_pdata *pdata, + struct xlgmac_desc_data *desc_data); + void (*tx_desc_init)(struct xlgmac_pdata *pdata); + void (*rx_desc_init)(struct xlgmac_pdata *pdata); +}; + +struct xlgmac_hw_ops { + int (*init)(struct xlgmac_pdata *pdata); + int (*exit)(struct xlgmac_pdata *pdata); + + int (*tx_complete)(struct xlgmac_dma_desc *dma_desc); + + void (*enable_tx)(struct xlgmac_pdata *pdata); + void (*disable_tx)(struct xlgmac_pdata *pdata); + void (*enable_rx)(struct xlgmac_pdata *pdata); + void (*disable_rx)(struct xlgmac_pdata *pdata); + + int (*enable_int)(struct xlgmac_channel *channel, + enum xlgmac_int int_id); + int (*disable_int)(struct xlgmac_channel *channel, + enum xlgmac_int int_id); + void (*dev_xmit)(struct xlgmac_channel *channel); + int (*dev_read)(struct xlgmac_channel *channel); + + int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr); + int (*config_rx_mode)(struct xlgmac_pdata *pdata); + int (*enable_rx_csum)(struct xlgmac_pdata *pdata); + int (*disable_rx_csum)(struct xlgmac_pdata *pdata); + + /* For MII speed configuration */ + int (*set_xlgmii_25000_speed)(struct xlgmac_pdata *pdata); + int (*set_xlgmii_40000_speed)(struct xlgmac_pdata *pdata); + int (*set_xlgmii_50000_speed)(struct xlgmac_pdata *pdata); + int (*set_xlgmii_100000_speed)(struct xlgmac_pdata *pdata); + + /* For descriptor related operation */ + void (*tx_desc_init)(struct xlgmac_channel *channel); + void (*rx_desc_init)(struct xlgmac_channel *channel); + void (*tx_desc_reset)(struct xlgmac_desc_data *desc_data); + void (*rx_desc_reset)(struct xlgmac_pdata *pdata, + struct xlgmac_desc_data *desc_data, + unsigned int index); + int (*is_last_desc)(struct xlgmac_dma_desc *dma_desc); + int (*is_context_desc)(struct xlgmac_dma_desc *dma_desc); + void (*tx_start_xmit)(struct xlgmac_channel *channel, + struct xlgmac_ring *ring); + + /* For Flow Control */ + int (*config_tx_flow_control)(struct xlgmac_pdata *pdata); + int (*config_rx_flow_control)(struct xlgmac_pdata *pdata); + + /* For Vlan related config */ + int (*enable_rx_vlan_stripping)(struct xlgmac_pdata *pdata); + int (*disable_rx_vlan_stripping)(struct xlgmac_pdata *pdata); + int (*enable_rx_vlan_filtering)(struct xlgmac_pdata *pdata); + int (*disable_rx_vlan_filtering)(struct xlgmac_pdata *pdata); + int (*update_vlan_hash_table)(struct xlgmac_pdata *pdata); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct xlgmac_pdata *pdata); + int (*config_tx_coalesce)(struct xlgmac_pdata *pdata); + unsigned int (*usec_to_riwt)(struct xlgmac_pdata *pdata, + unsigned int usec); + unsigned int (*riwt_to_usec)(struct xlgmac_pdata *pdata, + unsigned int riwt); + + /* For RX and TX threshold config */ + int (*config_rx_threshold)(struct xlgmac_pdata *pdata, + unsigned int val); + int (*config_tx_threshold)(struct xlgmac_pdata *pdata, + unsigned int val); + + /* For RX and TX Store and Forward Mode config */ + int (*config_rsf_mode)(struct xlgmac_pdata *pdata, + unsigned int val); + int (*config_tsf_mode)(struct xlgmac_pdata *pdata, + unsigned int val); + + /* For TX DMA Operate on Second Frame config */ + int (*config_osp_mode)(struct xlgmac_pdata *pdata); + + /* For RX and TX PBL config */ + int (*config_rx_pbl_val)(struct xlgmac_pdata *pdata); + int (*get_rx_pbl_val)(struct xlgmac_pdata *pdata); + int (*config_tx_pbl_val)(struct xlgmac_pdata *pdata); + int (*get_tx_pbl_val)(struct xlgmac_pdata *pdata); + int (*config_pblx8)(struct xlgmac_pdata *pdata); + + /* For MMC statistics */ + void (*rx_mmc_int)(struct xlgmac_pdata *pdata); + void (*tx_mmc_int)(struct xlgmac_pdata *pdata); + void (*read_mmc_stats)(struct xlgmac_pdata *pdata); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct xlgmac_pdata *pdata); + int (*disable_rss)(struct xlgmac_pdata *pdata); + int (*set_rss_hash_key)(struct xlgmac_pdata *pdata, + const u8 *key); + int (*set_rss_lookup_table)(struct xlgmac_pdata *pdata, + const u32 *table); +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct xlgmac_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int phyifsel; /* PHY interface support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ +}; + +struct xlgmac_resources { + void __iomem *addr; + int irq; +}; + +struct xlgmac_pdata { + struct net_device *netdev; + struct device *dev; + + struct xlgmac_hw_ops hw_ops; + struct xlgmac_desc_ops desc_ops; + + /* Device statistics */ + struct xlgmac_stats stats; + + u32 msg_enable; + + /* MAC registers base */ + void __iomem *mac_regs; + + /* Hardware features of the device */ + struct xlgmac_hw_features hw_feat; + + struct work_struct restart_work; + + /* Rings for Tx/Rx on a DMA channel */ + struct xlgmac_channel *channel_head; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_desc_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + + /* Tx coalescing settings */ + unsigned int tx_usecs; + unsigned int tx_frames; + + /* Rx coalescing settings */ + unsigned int rx_riwt; + unsigned int rx_usecs; + unsigned int rx_frames; + + /* Current Rx buffer size */ + unsigned int rx_buf_size; + + /* Flow control settings */ + unsigned int tx_pause; + unsigned int rx_pause; + + /* Device interrupt number */ + int dev_irq; + unsigned int per_channel_irq; + int channel_irq[XLGMAC_MAX_DMA_CHANNELS]; + + /* Netdev related settings */ + unsigned char mac_addr[ETH_ALEN]; + netdev_features_t netdev_features; + struct napi_struct napi; + + /* Filtering support */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* Device clocks */ + unsigned long sysclk_rate; + + /* RSS addressing mutex */ + struct mutex rss_mutex; + + /* Receive Side Scaling settings */ + u8 rss_key[XLGMAC_RSS_HASH_KEY_SIZE]; + u32 rss_table[XLGMAC_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + + int phy_speed; + + char drv_name[32]; + char drv_ver[32]; +}; + +void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops); +void xlgmac_init_hw_ops(struct xlgmac_hw_ops *hw_ops); +const struct net_device_ops *xlgmac_get_netdev_ops(void); +const struct ethtool_ops *xlgmac_get_ethtool_ops(void); +void xlgmac_dump_tx_desc(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + unsigned int idx, + unsigned int count, + unsigned int flag); +void xlgmac_dump_rx_desc(struct xlgmac_pdata *pdata, + struct xlgmac_ring *ring, + unsigned int idx); +void xlgmac_print_pkt(struct net_device *netdev, + struct sk_buff *skb, bool tx_rx); +void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata); +void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata); +int xlgmac_drv_probe(struct device *dev, + struct xlgmac_resources *res); +int xlgmac_drv_remove(struct device *dev); + +/* For debug prints */ +#ifdef XLGMAC_DEBUG +#define XLGMAC_PR(fmt, args...) \ + pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args) +#else +#define XLGMAC_PR(x...) do { } while (0) +#endif + +#endif /* __DWC_XLGMAC_H__ */ diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index f864fd0663dbf830bd94b1db4daa7a7b5ad3ce1d..711fbbbc4b1f724fcebdaec80eda2eec1eef8551 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2124,33 +2124,26 @@ static const char }; /* - * bdx_get_settings - get device-specific settings + * bdx_get_link_ksettings - get device-specific settings * @netdev * @ecmd */ -static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - u32 rdintcm; - u32 tdintcm; - struct bdx_priv *priv = netdev_priv(netdev); - - rdintcm = priv->rdintcm; - tdintcm = priv->tdintcm; - - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ethtool_cmd_speed_set(ecmd, SPEED_10000); - ecmd->duplex = DUPLEX_FULL; - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ - ecmd->autoneg = AUTONEG_DISABLE; - - /* PCK_TH measures in multiples of FIFO bytes - We translate to packets */ - ecmd->maxtxpkt = - ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); - ecmd->maxrxpkt = - ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); +static int bdx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) +{ + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE); + + ecmd->base.speed = SPEED_10000; + ecmd->base.duplex = DUPLEX_FULL; + ecmd->base.port = PORT_FIBRE; + ecmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -2384,7 +2377,6 @@ static void bdx_get_ethtool_stats(struct net_device *netdev, static void bdx_set_ethtool_ops(struct net_device *netdev) { static const struct ethtool_ops bdx_ethtool_ops = { - .get_settings = bdx_get_settings, .get_drvinfo = bdx_get_drvinfo, .get_link = ethtool_op_get_link, .get_coalesce = bdx_get_coalesce, @@ -2394,6 +2386,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev) .get_strings = bdx_get_strings, .get_sset_count = bdx_get_sset_count, .get_ethtool_stats = bdx_get_ethtool_stats, + .get_link_ksettings = bdx_get_link_ksettings, }; netdev->ethtool_ops = &bdx_ethtool_ops; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 7c7ae0890e90c450c2228e44cf618cdfdedcceec..729a7da90b5bed279d58d72cfc53cfa6fc2e9c80 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1134,7 +1134,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; u32 desc_dma_32; - u32 pkt_info; dma_addr = dma_map_page(dev, page, page_offset, buf_len, DMA_TO_DEVICE); @@ -1151,9 +1150,6 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) } desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc); - pkt_info = - (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << - KNAV_DMA_DESC_RETQ_SHIFT; set_pkt_info(dma_addr, buf_len, 0, ndesc); desc_dma_32 = (u32)desc_dma; set_words(&desc_dma_32, 1, &pdesc->next_desc); @@ -1882,6 +1878,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { + u8 num_tc; int i; /* setup tc must be called under rtnl lock */ @@ -1890,15 +1887,18 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = tc->mqprio->num_tc; + /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || - (dev->real_num_tx_queues < tc->tc)) + (dev->real_num_tx_queues < num_tc)) return -EINVAL; /* Configure traffic class to queue mappings */ - if (tc->tc) { - netdev_set_num_tc(dev, tc->tc); - for (i = 0; i < tc->tc; i++) + if (num_tc) { + netdev_set_num_tc(dev, num_tc); + for (i = 0; i < num_tc; i++) netdev_set_tc_queue(dev, i, 1, i); } else { netdev_reset_tc(dev); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index eece3e2eec14199809b1b55219835662bad1d012..897176fc5043bf0da12fe5dcb7a659c269fdf615 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3048,8 +3048,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_child_of_node(node, port) { slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); if (!slave) { - dev_err(dev, - "memomry alloc failed for secondary port(%s), skipping...\n", + dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n", port->name); continue; } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 72013314bba81fbbbb0f7d37da6143a9b0754b0c..fa6a06571187ed0d4f30a7001983b7f5fcbea018 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1206,61 +1206,68 @@ void gelic_net_get_drvinfo(struct net_device *netdev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int gelic_ether_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int gelic_ether_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct gelic_card *card = netdev_card(netdev); + u32 supported, advertising; gelic_card_get_ether_port_status(card, 0); if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { case GELIC_LV1_ETHER_SPEED_10: - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; break; case GELIC_LV1_ETHER_SPEED_100: - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; break; case GELIC_LV1_ETHER_SPEED_1000: - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; break; default: pr_info("%s: speed unknown\n", __func__); - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; break; } - cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | + supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full; - cmd->advertising = cmd->supported; + advertising = supported; if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) { - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; } else { - cmd->autoneg = AUTONEG_DISABLE; - cmd->advertising &= ~ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_DISABLE; + advertising &= ~ADVERTISED_Autoneg; } - cmd->port = PORT_TP; + cmd->base.port = PORT_TP; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int gelic_ether_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int +gelic_ether_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct gelic_card *card = netdev_card(netdev); u64 mode; int ret; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { mode = GELIC_LV1_ETHER_AUTO_NEG; } else { - switch (cmd->speed) { + switch (cmd->base.speed) { case SPEED_10: mode = GELIC_LV1_ETHER_SPEED_10; break; @@ -1273,9 +1280,9 @@ static int gelic_ether_set_settings(struct net_device *netdev, default: return -EINVAL; } - if (cmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) { mode |= GELIC_LV1_ETHER_FULL_DUPLEX; - else if (cmd->speed == SPEED_1000) { + } else if (cmd->base.speed == SPEED_1000) { pr_info("1000 half duplex is not supported.\n"); return -EINVAL; } @@ -1370,11 +1377,11 @@ static int gelic_net_set_wol(struct net_device *netdev, static const struct ethtool_ops gelic_ether_ethtool_ops = { .get_drvinfo = gelic_net_get_drvinfo, - .get_settings = gelic_ether_get_settings, - .set_settings = gelic_ether_set_settings, .get_link = ethtool_op_get_link, .get_wol = gelic_net_get_wol, .set_wol = gelic_net_set_wol, + .get_link_ksettings = gelic_ether_get_link_ksettings, + .set_link_ksettings = gelic_ether_set_link_ksettings, }; /** diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c index ffe519382e111a04dd37c904a6080cee561e2d47..16bd036d06820e9139d04f05355a5a4e5127509c 100644 --- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c +++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c @@ -47,19 +47,23 @@ static struct { }; static int -spider_net_ethtool_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +spider_net_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct spider_net_card *card; card = netdev_priv(netdev); - cmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE); - cmd->port = PORT_FIBRE; - ethtool_cmd_speed_set(cmd, card->phy.speed); - cmd->duplex = DUPLEX_FULL; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + cmd->base.port = PORT_FIBRE; + cmd->base.speed = card->phy.speed; + cmd->base.duplex = DUPLEX_FULL; return 0; } @@ -166,7 +170,6 @@ static void spider_net_get_strings(struct net_device *netdev, u32 stringset, } const struct ethtool_ops spider_net_ethtool_ops = { - .get_settings = spider_net_ethtool_get_settings, .get_drvinfo = spider_net_ethtool_get_drvinfo, .get_wol = spider_net_ethtool_get_wol, .get_msglevel = spider_net_ethtool_get_msglevel, @@ -177,5 +180,6 @@ const struct ethtool_ops spider_net_ethtool_ops = { .get_strings = spider_net_get_strings, .get_sset_count = spider_net_get_sset_count, .get_ethtool_stats = spider_net_get_ethtool_stats, + .get_link_ksettings = spider_net_ethtool_get_link_ksettings, }; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c5583991da4aa462652b5236992c7731529422db..5ac6eaa9e78510a2c28cf2506dd791ece82b3009 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1499,27 +1499,29 @@ static void tsi108_init_mac(struct net_device *dev) TSI_WRITE(TSI108_EC_INTMASK, ~0); } -static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int tsi108_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct tsi108_prv_data *data = netdev_priv(dev); unsigned long flags; int rc; spin_lock_irqsave(&data->txlock, flags); - rc = mii_ethtool_gset(&data->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&data->mii_if, cmd); spin_unlock_irqrestore(&data->txlock, flags); return rc; } -static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int tsi108_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct tsi108_prv_data *data = netdev_priv(dev); unsigned long flags; int rc; spin_lock_irqsave(&data->txlock, flags); - rc = mii_ethtool_sset(&data->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd); spin_unlock_irqrestore(&data->txlock, flags); return rc; @@ -1535,8 +1537,8 @@ static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static const struct ethtool_ops tsi108_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_settings = tsi108_get_settings, - .set_settings = tsi108_set_settings, + .get_link_ksettings = tsi108_get_link_ksettings, + .set_link_ksettings = tsi108_set_link_ksettings, }; static const struct net_device_ops tsi108_netdev_ops = { diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index c068c58428f7611ddcd526010cb07f8ada61b760..4cf41f779d0ef4ef8d6cf2fa688abd97b3e8264d 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2303,25 +2303,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct rhine_private *rp = netdev_priv(dev); int rc; mutex_lock(&rp->task_lock); - rc = mii_ethtool_gset(&rp->mii_if, cmd); + rc = mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); mutex_unlock(&rp->task_lock); return rc; } -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct rhine_private *rp = netdev_priv(dev); int rc; mutex_lock(&rp->task_lock); - rc = mii_ethtool_sset(&rp->mii_if, cmd); + rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd); rhine_set_carrier(&rp->mii_if); mutex_unlock(&rp->task_lock); @@ -2391,14 +2393,14 @@ static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .nway_reset = netdev_nway_reset, .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, .get_wol = rhine_get_wol, .set_wol = rhine_set_wol, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d088788b27a751286f7556b7f478b210c0ab68a5..ef9538ee53d0db7f43eae4298dd39258b4c39122 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -3291,15 +3291,17 @@ static void velocity_ethtool_down(struct net_device *dev) velocity_set_power_state(vptr, PCI_D3hot); } -static int velocity_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int velocity_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct velocity_info *vptr = netdev_priv(dev); struct mac_regs __iomem *regs = vptr->mac_regs; u32 status; + u32 supported, advertising; + status = check_connection_type(vptr->mac_regs); - cmd->supported = SUPPORTED_TP | + supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | @@ -3308,9 +3310,9 @@ static int velocity_get_settings(struct net_device *dev, SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; + advertising = ADVERTISED_TP | ADVERTISED_Autoneg; if (vptr->options.spd_dpx == SPD_DPX_AUTO) { - cmd->advertising |= + advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | @@ -3320,19 +3322,19 @@ static int velocity_get_settings(struct net_device *dev, } else { switch (vptr->options.spd_dpx) { case SPD_DPX_1000_FULL: - cmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; break; case SPD_DPX_100_HALF: - cmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; break; case SPD_DPX_100_FULL: - cmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; break; case SPD_DPX_10_HALF: - cmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; break; case SPD_DPX_10_FULL: - cmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; break; default: break; @@ -3340,30 +3342,35 @@ static int velocity_get_settings(struct net_device *dev, } if (status & VELOCITY_SPEED_1000) - ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->base.speed = SPEED_1000; else if (status & VELOCITY_SPEED_100) - ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->base.speed = SPEED_100; else - ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->base.speed = SPEED_10; - cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; - cmd->port = PORT_TP; - cmd->transceiver = XCVR_INTERNAL; - cmd->phy_address = readb(®s->MIIADR) & 0x1F; + cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->base.port = PORT_TP; + cmd->base.phy_address = readb(®s->MIIADR) & 0x1F; if (status & VELOCITY_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } -static int velocity_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int velocity_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct velocity_info *vptr = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(cmd); + u32 speed = cmd->base.speed; u32 curr_status; u32 new_status = 0; int ret = 0; @@ -3371,11 +3378,12 @@ static int velocity_set_settings(struct net_device *dev, curr_status = check_connection_type(vptr->mac_regs); curr_status &= (~VELOCITY_LINK_FAIL); - new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); - new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); + new_status |= ((cmd->base.duplex == DUPLEX_FULL) ? + VELOCITY_DUPLEX_FULL : 0); if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { @@ -3644,8 +3652,6 @@ static void velocity_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops velocity_ethtool_ops = { - .get_settings = velocity_get_settings, - .set_settings = velocity_set_settings, .get_drvinfo = velocity_get_drvinfo, .get_wol = velocity_ethtool_get_wol, .set_wol = velocity_ethtool_set_wol, @@ -3658,7 +3664,9 @@ static const struct ethtool_ops velocity_ethtool_ops = { .get_coalesce = velocity_get_coalesce, .set_coalesce = velocity_set_coalesce, .begin = velocity_ethtool_up, - .complete = velocity_ethtool_down + .complete = velocity_ethtool_down, + .get_link_ksettings = velocity_get_link_ksettings, + .set_link_ksettings = velocity_set_link_ksettings, }; #if defined(CONFIG_PM) && defined(CONFIG_INET) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index f90267f0519feebb7c9d944535cb81f53c6bb9fe..2bdfb39215e9ce749ecc5fac8f4a22226c081bc4 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1152,7 +1152,8 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, if (err < 0) goto err_register; - priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0); + priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, + netdev_name(ndev)); if (!priv->xfer_wq) { err = -ENOMEM; goto err_wq; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b96e96919e31d2da791da73aeeaad6e85fd8c581..33c595f4691d93e3f2b73f85f9e92dba9dbead7f 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -301,7 +301,7 @@ static void axienet_set_mac_address(struct net_device *ndev, if (address) memcpy(ndev->dev_addr, address, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) - eth_random_addr(ndev->dev_addr); + eth_hw_addr_random(ndev); /* Set up unicast MAC address filter set its mac address */ axienet_iow(lp, XAE_UAW0_OFFSET, diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c index 6575f880f1be52fb9daa91ea7f88eb3199b614f5..7d101714c2ef4cc38201971d18b4b8f8bac1b8cd 100644 --- a/drivers/net/fjes/fjes_ethtool.c +++ b/drivers/net/fjes/fjes_ethtool.c @@ -175,16 +175,15 @@ static void fjes_get_drvinfo(struct net_device *netdev, "platform:%s", plat_dev->name); } -static int fjes_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int fjes_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { - ecmd->supported = 0; - ecmd->advertising = 0; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->transceiver = XCVR_DUMMY1; - ecmd->port = PORT_NONE; - ethtool_cmd_speed_set(ecmd, 20000); /* 20Gb/s */ + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ecmd->base.duplex = DUPLEX_FULL; + ecmd->base.autoneg = AUTONEG_DISABLE; + ecmd->base.port = PORT_NONE; + ecmd->base.speed = 20000; /* 20Gb/s */ return 0; } @@ -296,7 +295,6 @@ static int fjes_get_dump_data(struct net_device *netdev, } static const struct ethtool_ops fjes_ethtool_ops = { - .get_settings = fjes_get_settings, .get_drvinfo = fjes_get_drvinfo, .get_ethtool_stats = fjes_get_ethtool_stats, .get_strings = fjes_get_strings, @@ -306,6 +304,7 @@ static const struct ethtool_ops fjes_ethtool_ops = { .set_dump = fjes_set_dump, .get_dump_flag = fjes_get_dump_flag, .get_dump_data = fjes_get_dump_data, + .get_link_ksettings = fjes_get_link_ksettings, }; void fjes_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 7074b40ebd7f8e8cb0bcf0def7085fd91b8b45d6..dec5d563ab19091cdadd422eed9af8268ce5805e 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1244,7 +1244,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, metadata = true; if (data[IFLA_GENEVE_UDP_CSUM] && - !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) info.key.tun_flags |= TUNNEL_CSUM; if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 89698741682f41e2ea70868715ffcb4f4784e258..4fea1b3dfbb4457247c4bc98dc9378591c36ecd3 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -56,7 +56,10 @@ struct pdp_ctx { u16 af; struct in_addr ms_addr_ip4; - struct in_addr sgsn_addr_ip4; + struct in_addr peer_addr_ip4; + + struct sock *sk; + struct net_device *dev; atomic_t tx_seq; struct rcu_head rcu_head; @@ -66,11 +69,12 @@ struct pdp_ctx { struct gtp_dev { struct list_head list; - struct socket *sock0; - struct socket *sock1u; + struct sock *sk0; + struct sock *sk1u; struct net_device *dev; + unsigned int role; unsigned int hash_size; struct hlist_head *tid_hash; struct hlist_head *addr_hash; @@ -84,6 +88,8 @@ struct gtp_net { static u32 gtp_h_initval; +static void pdp_context_delete(struct pdp_ctx *pctx); + static inline u32 gtp0_hashfn(u64 tid) { u32 *tid32 = (u32 *) &tid; @@ -149,8 +155,8 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) return NULL; } -static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, - unsigned int hdrlen) +static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen, unsigned int role) { struct iphdr *iph; @@ -159,25 +165,62 @@ static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, iph = (struct iphdr *)(skb->data + hdrlen); - return iph->saddr == pctx->ms_addr_ip4.s_addr; + if (role == GTP_ROLE_SGSN) + return iph->daddr == pctx->ms_addr_ip4.s_addr; + else + return iph->saddr == pctx->ms_addr_ip4.s_addr; } -/* Check if the inner IP source address in this packet is assigned to any +/* Check if the inner IP address in this packet is assigned to any * existing mobile subscriber. */ -static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx, - unsigned int hdrlen) +static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen, unsigned int role) { switch (ntohs(skb->protocol)) { case ETH_P_IP: - return gtp_check_src_ms_ipv4(skb, pctx, hdrlen); + return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); } return false; } +static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, + unsigned int hdrlen, unsigned int role) +{ + struct pcpu_sw_netstats *stats; + + if (!gtp_check_ms(skb, pctx, hdrlen, role)) { + netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); + return 1; + } + + /* Get rid of the GTP + UDP headers. */ + if (iptunnel_pull_header(skb, hdrlen, skb->protocol, + !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) + return -1; + + netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); + + /* Now that the UDP and the GTP header have been removed, set up the + * new network header. This is required by the upper layer to + * calculate the transport header. + */ + skb_reset_network_header(skb); + + skb->dev = pctx->dev; + + stats = this_cpu_ptr(pctx->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + netif_rx(skb); + return 0; +} + /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ -static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, - bool xnet) +static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp0_header); @@ -201,17 +244,10 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, return 1; } - if (!gtp_check_src_ms(skb, pctx, hdrlen)) { - netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - return 1; - } - - /* Get rid of the GTP + UDP headers. */ - return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); + return gtp_rx(pctx, skb, hdrlen, gtp->role); } -static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, - bool xnet) +static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp1_header); @@ -250,37 +286,33 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb, return 1; } - if (!gtp_check_src_ms(skb, pctx, hdrlen)) { - netdev_dbg(gtp->dev, "No PDP ctx for this MS\n"); - return 1; - } - - /* Get rid of the GTP + UDP headers. */ - return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet); + return gtp_rx(pctx, skb, hdrlen, gtp->role); } -static void gtp_encap_disable(struct gtp_dev *gtp) +static void gtp_encap_destroy(struct sock *sk) { - if (gtp->sock0 && gtp->sock0->sk) { - udp_sk(gtp->sock0->sk)->encap_type = 0; - rcu_assign_sk_user_data(gtp->sock0->sk, NULL); - } - if (gtp->sock1u && gtp->sock1u->sk) { - udp_sk(gtp->sock1u->sk)->encap_type = 0; - rcu_assign_sk_user_data(gtp->sock1u->sk, NULL); - } + struct gtp_dev *gtp; - gtp->sock0 = NULL; - gtp->sock1u = NULL; + gtp = rcu_dereference_sk_user_data(sk); + if (gtp) { + udp_sk(sk)->encap_type = 0; + rcu_assign_sk_user_data(sk, NULL); + sock_put(sk); + } } -static void gtp_encap_destroy(struct sock *sk) +static void gtp_encap_disable_sock(struct sock *sk) { - struct gtp_dev *gtp; + if (!sk) + return; - gtp = rcu_dereference_sk_user_data(sk); - if (gtp) - gtp_encap_disable(gtp); + gtp_encap_destroy(sk); +} + +static void gtp_encap_disable(struct gtp_dev *gtp) +{ + gtp_encap_disable_sock(gtp->sk0); + gtp_encap_disable_sock(gtp->sk1u); } /* UDP encapsulation receive handler. See net/ipv4/udp.c. @@ -288,10 +320,8 @@ static void gtp_encap_destroy(struct sock *sk) */ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) { - struct pcpu_sw_netstats *stats; struct gtp_dev *gtp; - bool xnet; - int ret; + int ret = 0; gtp = rcu_dereference_sk_user_data(sk); if (!gtp) @@ -299,16 +329,14 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); - xnet = !net_eq(sock_net(sk), dev_net(gtp->dev)); - switch (udp_sk(sk)->encap_type) { case UDP_ENCAP_GTP0: netdev_dbg(gtp->dev, "received GTP0 packet\n"); - ret = gtp0_udp_encap_recv(gtp, skb, xnet); + ret = gtp0_udp_encap_recv(gtp, skb); break; case UDP_ENCAP_GTP1U: netdev_dbg(gtp->dev, "received GTP1U packet\n"); - ret = gtp1u_udp_encap_recv(gtp, skb, xnet); + ret = gtp1u_udp_encap_recv(gtp, skb); break; default: ret = -1; /* Shouldn't happen. */ @@ -317,33 +345,17 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) switch (ret) { case 1: netdev_dbg(gtp->dev, "pass up to the process\n"); - return 1; + break; case 0: - netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n"); break; case -1: netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); kfree_skb(skb); - return 0; + ret = 0; + break; } - /* Now that the UDP and the GTP header have been removed, set up the - * new network header. This is required by the upper layer to - * calculate the transport header. - */ - skb_reset_network_header(skb); - - skb->dev = gtp->dev; - - stats = this_cpu_ptr(gtp->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - netif_rx(skb); - - return 0; + return ret; } static int gtp_dev_init(struct net_device *dev) @@ -367,8 +379,9 @@ static void gtp_dev_uninit(struct net_device *dev) free_percpu(dev->tstats); } -static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4, - const struct sock *sk, __be32 daddr) +static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, + const struct sock *sk, + __be32 daddr) { memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_oif = sk->sk_bound_dev_if; @@ -377,7 +390,7 @@ static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4, fl4->flowi4_tos = RT_CONN_FLAGS(sk); fl4->flowi4_proto = sk->sk_protocol; - return ip_route_output_key(net, fl4); + return ip_route_output_key(sock_net(sk), fl4); } static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) @@ -466,7 +479,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; - struct sock *sk; __be16 df; int mtu; @@ -474,7 +486,11 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, * Prepend PDP header with TEI/TID from PDP ctx. */ iph = ip_hdr(skb); - pctx = ipv4_pdp_find(gtp, iph->daddr); + if (gtp->role == GTP_ROLE_SGSN) + pctx = ipv4_pdp_find(gtp, iph->saddr); + else + pctx = ipv4_pdp_find(gtp, iph->daddr); + if (!pctx) { netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", &iph->daddr); @@ -482,40 +498,17 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, } netdev_dbg(dev, "found PDP context %p\n", pctx); - switch (pctx->gtp_version) { - case GTP_V0: - if (gtp->sock0) - sk = gtp->sock0->sk; - else - sk = NULL; - break; - case GTP_V1: - if (gtp->sock1u) - sk = gtp->sock1u->sk; - else - sk = NULL; - break; - default: - return -ENOENT; - } - - if (!sk) { - netdev_dbg(dev, "no userspace socket is available, skip\n"); - return -ENOENT; - } - - rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk, - pctx->sgsn_addr_ip4.s_addr); + rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI4\n", - &pctx->sgsn_addr_ip4.s_addr); + &pctx->peer_addr_ip4.s_addr); dev->stats.tx_carrier_errors++; goto err; } if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to SSGN %pI4\n", - &pctx->sgsn_addr_ip4.s_addr); + &pctx->peer_addr_ip4.s_addr); dev->stats.collisions++; goto err_rt; } @@ -550,7 +543,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, goto err_rt; } - gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev); + gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); return 0; @@ -640,27 +633,23 @@ static void gtp_link_setup(struct net_device *dev) static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static void gtp_hashtable_free(struct gtp_dev *gtp); -static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1); +static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - int hashsize, err, fd0, fd1; struct gtp_dev *gtp; struct gtp_net *gn; + int hashsize, err; - if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1]) + if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) return -EINVAL; gtp = netdev_priv(dev); - fd0 = nla_get_u32(data[IFLA_GTP_FD0]); - fd1 = nla_get_u32(data[IFLA_GTP_FD1]); - - err = gtp_encap_enable(dev, gtp, fd0, fd1); + err = gtp_encap_enable(gtp, data); if (err < 0) - goto out_err; + return err; if (!data[IFLA_GTP_PDP_HASHSIZE]) hashsize = 1024; @@ -688,7 +677,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gtp_hashtable_free(gtp); out_encap: gtp_encap_disable(gtp); -out_err: return err; } @@ -706,6 +694,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_FD0] = { .type = NLA_U32 }, [IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, + [IFLA_GTP_ROLE] = { .type = NLA_U32 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -747,21 +736,6 @@ static struct rtnl_link_ops gtp_link_ops __read_mostly = { .fill_info = gtp_fill_info, }; -static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[]) -{ - struct net *net; - - /* Examine the link attributes and figure out which network namespace - * we are talking about. - */ - if (tb[GTPA_NET_NS_FD]) - net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD])); - else - net = get_net(src_net); - - return net; -} - static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) { int i; @@ -791,93 +765,127 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) struct pdp_ctx *pctx; int i; - for (i = 0; i < gtp->hash_size; i++) { - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { - hlist_del_rcu(&pctx->hlist_tid); - hlist_del_rcu(&pctx->hlist_addr); - kfree_rcu(pctx, rcu_head); - } - } + for (i = 0; i < gtp->hash_size; i++) + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); + synchronize_rcu(); kfree(gtp->addr_hash); kfree(gtp->tid_hash); } -static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, - int fd_gtp0, int fd_gtp1) +static struct sock *gtp_encap_enable_socket(int fd, int type, + struct gtp_dev *gtp) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; - struct socket *sock0, *sock1u; + struct socket *sock; + struct sock *sk; int err; - netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1); - - sock0 = sockfd_lookup(fd_gtp0, &err); - if (sock0 == NULL) { - netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0); - return -ENOENT; - } + pr_debug("enable gtp on %d, %d\n", fd, type); - if (sock0->sk->sk_protocol != IPPROTO_UDP) { - netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0); - err = -EINVAL; - goto err1; + sock = sockfd_lookup(fd, &err); + if (!sock) { + pr_debug("gtp socket fd=%d not found\n", fd); + return NULL; } - sock1u = sockfd_lookup(fd_gtp1, &err); - if (sock1u == NULL) { - netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1); - err = -ENOENT; - goto err1; + if (sock->sk->sk_protocol != IPPROTO_UDP) { + pr_debug("socket fd=%d not UDP\n", fd); + sk = ERR_PTR(-EINVAL); + goto out_sock; } - if (sock1u->sk->sk_protocol != IPPROTO_UDP) { - netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1); - err = -EINVAL; - goto err2; + if (rcu_dereference_sk_user_data(sock->sk)) { + sk = ERR_PTR(-EBUSY); + goto out_sock; } - netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u); - - gtp->sock0 = sock0; - gtp->sock1u = sock1u; + sk = sock->sk; + sock_hold(sk); tuncfg.sk_user_data = gtp; + tuncfg.encap_type = type; tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = gtp_encap_destroy; - tuncfg.encap_type = UDP_ENCAP_GTP0; - setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg); - - tuncfg.encap_type = UDP_ENCAP_GTP1U; - setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg); + setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); - err = 0; -err2: - sockfd_put(sock1u); -err1: - sockfd_put(sock0); - return err; +out_sock: + sockfd_put(sock); + return sk; } -static struct net_device *gtp_find_dev(struct net *net, int ifindex) +static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) { - struct gtp_net *gn = net_generic(net, gtp_net_id); - struct gtp_dev *gtp; + struct sock *sk1u = NULL; + struct sock *sk0 = NULL; + unsigned int role = GTP_ROLE_GGSN; - list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { - if (ifindex == gtp->dev->ifindex) - return gtp->dev; + if (data[IFLA_GTP_FD0]) { + u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); + + sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); + if (IS_ERR(sk0)) + return PTR_ERR(sk0); } - return NULL; + + if (data[IFLA_GTP_FD1]) { + u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); + + sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); + if (IS_ERR(sk1u)) { + if (sk0) + gtp_encap_disable_sock(sk0); + return PTR_ERR(sk1u); + } + } + + if (data[IFLA_GTP_ROLE]) { + role = nla_get_u32(data[IFLA_GTP_ROLE]); + if (role > GTP_ROLE_SGSN) + return -EINVAL; + } + + gtp->sk0 = sk0; + gtp->sk1u = sk1u; + gtp->role = role; + + return 0; +} + +static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) +{ + struct gtp_dev *gtp = NULL; + struct net_device *dev; + struct net *net; + + /* Examine the link attributes and figure out which network namespace + * we are talking about. + */ + if (nla[GTPA_NET_NS_FD]) + net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD])); + else + net = get_net(src_net); + + if (IS_ERR(net)) + return NULL; + + /* Check if there's an existing gtpX device to configure */ + dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); + if (dev->netdev_ops == >p_netdev_ops) + gtp = netdev_priv(dev); + + put_net(net); + return gtp; } static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); pctx->af = AF_INET; - pctx->sgsn_addr_ip4.s_addr = - nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]); + pctx->peer_addr_ip4.s_addr = + nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); pctx->ms_addr_ip4.s_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); @@ -899,9 +907,10 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } -static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) +static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { - struct gtp_dev *gtp = netdev_priv(dev); + struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; struct pdp_ctx *pctx; bool found = false; @@ -940,6 +949,9 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) if (pctx == NULL) return -ENOMEM; + sock_hold(sk); + pctx->sk = sk; + pctx->dev = gtp->dev; ipv4_pdp_fill(pctx, info); atomic_set(&pctx->tx_seq, 0); @@ -963,31 +975,50 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info) switch (pctx->gtp_version) { case GTP_V0: netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", - pctx->u.v0.tid, &pctx->sgsn_addr_ip4, + pctx->u.v0.tid, &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); break; case GTP_V1: netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, - &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx); + &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); break; } return 0; } +static void pdp_context_free(struct rcu_head *head) +{ + struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); + + sock_put(pctx->sk); + kfree(pctx); +} + +static void pdp_context_delete(struct pdp_ctx *pctx) +{ + hlist_del_rcu(&pctx->hlist_tid); + hlist_del_rcu(&pctx->hlist_addr); + call_rcu(&pctx->rcu_head, pdp_context_free); +} + static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) { - struct net_device *dev; - struct net *net; + unsigned int version; + struct gtp_dev *gtp; + struct sock *sk; + int err; if (!info->attrs[GTPA_VERSION] || !info->attrs[GTPA_LINK] || - !info->attrs[GTPA_SGSN_ADDRESS] || + !info->attrs[GTPA_PEER_ADDRESS] || !info->attrs[GTPA_MS_ADDRESS]) return -EINVAL; - switch (nla_get_u32(info->attrs[GTPA_VERSION])) { + version = nla_get_u32(info->attrs[GTPA_VERSION]); + + switch (version) { case GTP_V0: if (!info->attrs[GTPA_TID] || !info->attrs[GTPA_FLOW]) @@ -1003,77 +1034,101 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); - if (IS_ERR(net)) - return PTR_ERR(net); + rcu_read_lock(); - /* Check if there's an existing gtpX device to configure */ - dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) { - put_net(net); - return -ENODEV; + gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); + if (!gtp) { + err = -ENODEV; + goto out_unlock; } - put_net(net); - return ipv4_pdp_add(dev, info); + if (version == GTP_V0) + sk = gtp->sk0; + else if (version == GTP_V1) + sk = gtp->sk1u; + else + sk = NULL; + + if (!sk) { + err = -ENODEV; + goto out_unlock; + } + + err = ipv4_pdp_add(gtp, sk, info); + +out_unlock: + rcu_read_unlock(); + return err; } -static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) +static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, + struct nlattr *nla[]) { - struct net_device *dev; - struct pdp_ctx *pctx; struct gtp_dev *gtp; - struct net *net; - if (!info->attrs[GTPA_VERSION] || - !info->attrs[GTPA_LINK]) - return -EINVAL; + gtp = gtp_find_dev(net, nla); + if (!gtp) + return ERR_PTR(-ENODEV); - net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); - if (IS_ERR(net)) - return PTR_ERR(net); + if (nla[GTPA_MS_ADDRESS]) { + __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); - /* Check if there's an existing gtpX device to configure */ - dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) { - put_net(net); - return -ENODEV; + return ipv4_pdp_find(gtp, ip); + } else if (nla[GTPA_VERSION]) { + u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); + + if (gtp_version == GTP_V0 && nla[GTPA_TID]) + return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID])); + else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) + return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI])); } - put_net(net); - gtp = netdev_priv(dev); + return ERR_PTR(-EINVAL); +} - switch (nla_get_u32(info->attrs[GTPA_VERSION])) { - case GTP_V0: - if (!info->attrs[GTPA_TID]) - return -EINVAL; - pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID])); - break; - case GTP_V1: - if (!info->attrs[GTPA_I_TEI]) - return -EINVAL; - pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI])); - break; +static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[]) +{ + struct pdp_ctx *pctx; - default: + if (nla[GTPA_LINK]) + pctx = gtp_find_pdp_by_link(net, nla); + else + pctx = ERR_PTR(-EINVAL); + + if (!pctx) + pctx = ERR_PTR(-ENOENT); + + return pctx; +} + +static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) +{ + struct pdp_ctx *pctx; + int err = 0; + + if (!info->attrs[GTPA_VERSION]) return -EINVAL; - } - if (pctx == NULL) - return -ENOENT; + rcu_read_lock(); + + pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); + if (IS_ERR(pctx)) { + err = PTR_ERR(pctx); + goto out_unlock; + } if (pctx->gtp_version == GTP_V0) - netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", + netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", pctx->u.v0.tid, pctx); else if (pctx->gtp_version == GTP_V1) - netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", + netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); - hlist_del_rcu(&pctx->hlist_tid); - hlist_del_rcu(&pctx->hlist_addr); - kfree_rcu(pctx, rcu_head); + pdp_context_delete(pctx); - return 0; +out_unlock: + rcu_read_unlock(); + return err; } static struct genl_family gtp_genl_family; @@ -1089,7 +1144,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || - nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) || + nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; @@ -1117,59 +1172,17 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx = NULL; - struct net_device *dev; struct sk_buff *skb2; - struct gtp_dev *gtp; - u32 gtp_version; - struct net *net; int err; - if (!info->attrs[GTPA_VERSION] || - !info->attrs[GTPA_LINK]) - return -EINVAL; - - gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); - switch (gtp_version) { - case GTP_V0: - case GTP_V1: - break; - default: + if (!info->attrs[GTPA_VERSION]) return -EINVAL; - } - - net = gtp_genl_get_net(sock_net(skb->sk), info->attrs); - if (IS_ERR(net)) - return PTR_ERR(net); - - /* Check if there's an existing gtpX device to configure */ - dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK])); - if (dev == NULL) { - put_net(net); - return -ENODEV; - } - put_net(net); - - gtp = netdev_priv(dev); rcu_read_lock(); - if (gtp_version == GTP_V0 && - info->attrs[GTPA_TID]) { - u64 tid = nla_get_u64(info->attrs[GTPA_TID]); - - pctx = gtp0_pdp_find(gtp, tid); - } else if (gtp_version == GTP_V1 && - info->attrs[GTPA_I_TEI]) { - u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]); - - pctx = gtp1_pdp_find(gtp, tid); - } else if (info->attrs[GTPA_MS_ADDRESS]) { - __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); - - pctx = ipv4_pdp_find(gtp, ip); - } - if (pctx == NULL) { - err = -ENOENT; + pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); + if (IS_ERR(pctx)) { + err = PTR_ERR(pctx); goto err_unlock; } @@ -1242,7 +1255,7 @@ static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, - [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, }, + [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, [GTPA_FLOW] = { .type = NLA_U16, }, [GTPA_NET_NS_FD] = { .type = NLA_U32, }, diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index db23cb36ae5cb9ec50493b00329f276ecdd22ae9..262b2ea576a38e4bb7d1c442f2dfcbc2c40fa302 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -196,6 +196,7 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); +int netvsc_poll(struct napi_struct *napi, int budget); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, @@ -632,7 +633,7 @@ struct nvsp_message { #define NETVSC_PACKET_SIZE 4096 -#define VRSS_SEND_TAB_SIZE 16 +#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_DEFAULT 8 @@ -685,7 +686,7 @@ struct net_device_context { /* point back to our device context */ struct hv_device *device_ctx; /* netvsc_device */ - struct netvsc_device *nvdev; + struct netvsc_device __rcu *nvdev; /* reconfigure work */ struct delayed_work dwork; /* last reconfig time */ @@ -707,9 +708,6 @@ struct net_device_context { u32 speed; struct netvsc_ethtool_stats eth_stats; - /* the device is going away */ - bool start_remove; - /* State to manage the associated VF interface. */ struct net_device __rcu *vf_netdev; @@ -722,6 +720,8 @@ struct net_device_context { /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; + const struct vmpacket_descriptor *desc; + struct napi_struct napi; struct multi_send_data msd; struct multi_recv_comp mrc; atomic_t queue_sends; @@ -760,8 +760,8 @@ struct netvsc_device { u32 max_chn; u32 num_chn; - spinlock_t sc_lock; /* Protects num_sc_offered variable */ - u32 num_sc_offered; + + refcount_t sc_offered; /* Holds rndis device info */ void *extension; @@ -776,6 +776,8 @@ struct netvsc_device { atomic_t open_cnt; struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; + + struct rcu_head rcu; }; static inline struct netvsc_device * @@ -1424,9 +1426,6 @@ struct rndis_message { ((void *) rndis_msg) -#define __struct_bcount(x) - - #define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container)) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 15ef713d96c0887ec7929ff8d4be3ec3a6cac291..15749d359e600062efd659c04966d316d586a110 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void) return net_device; } -static void free_netvsc_device(struct netvsc_device *nvdev) +static void free_netvsc_device(struct rcu_head *head) { + struct netvsc_device *nvdev + = container_of(head, struct netvsc_device, rcu); int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) @@ -90,14 +92,9 @@ static void free_netvsc_device(struct netvsc_device *nvdev) kfree(nvdev); } - -static inline bool netvsc_channel_idle(const struct netvsc_device *net_device, - u16 q_idx) +static void free_netvsc_device_rcu(struct netvsc_device *nvdev) { - const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; - - return atomic_read(&net_device->num_outstanding_recvs) == 0 && - atomic_read(&nvchan->queue_sends) == 0; + call_rcu(&nvdev->rcu, free_netvsc_device); } static struct netvsc_device *get_outbound_net_device(struct hv_device *device) @@ -138,6 +135,13 @@ static void netvsc_destroy_buf(struct hv_device *device) sizeof(struct nvsp_message), (unsigned long)revoke_packet, VM_PKT_DATA_INBAND, 0); + /* If the failure is because the channel is rescinded; + * ignore the failure since we cannot send on a rescinded + * channel. This would allow us to properly cleanup + * even when the channel is rescinded. + */ + if (device->channel->rescind) + ret = 0; /* * If we failed here, we might as well return and * have a leak rather than continue and a bugchk @@ -198,6 +202,15 @@ static void netvsc_destroy_buf(struct hv_device *device) sizeof(struct nvsp_message), (unsigned long)revoke_packet, VM_PKT_DATA_INBAND, 0); + + /* If the failure is because the channel is rescinded; + * ignore the failure since we cannot send on a rescinded + * channel. This would allow us to properly cleanup + * even when the channel is rescinded. + */ + if (device->channel->rescind) + ret = 0; + /* If we failed here, we might as well return and * have a leak rather than continue and a bugchk */ @@ -555,10 +568,11 @@ void netvsc_device_remove(struct hv_device *device) struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); struct netvsc_device *net_device = net_device_ctx->nvdev; + int i; netvsc_disconnect_vsp(device); - net_device_ctx->nvdev = NULL; + RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); /* * At this point, no one should be accessing net_device @@ -569,8 +583,12 @@ void netvsc_device_remove(struct hv_device *device) /* Now, we can close the channel safely */ vmbus_close(device->channel); + /* And dissassociate NAPI context from device */ + for (i = 0; i < net_device->num_chn; i++) + netif_napi_del(&net_device->chan_table[i].napi); + /* Release all resources */ - free_netvsc_device(net_device); + free_netvsc_device_rcu(net_device); } #define RING_AVAIL_PERCENT_HIWATER 20 @@ -599,11 +617,11 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, static void netvsc_send_tx_complete(struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, struct hv_device *device, - struct vmpacket_descriptor *packet) + const struct vmpacket_descriptor *desc, + int budget) { - struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id; + struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); struct vmbus_channel *channel = device->channel; u16 q_idx = 0; int queue_sends; @@ -627,7 +645,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, tx_stats->bytes += packet->total_bytes; u64_stats_update_end(&tx_stats->syncp); - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); } queue_sends = @@ -637,7 +655,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, wake_up(&net_device->wait_drain); if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - !net_device_ctx->start_remove && (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); @@ -646,14 +663,12 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, static void netvsc_send_completion(struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, struct hv_device *device, - struct vmpacket_descriptor *packet) + const struct vmpacket_descriptor *desc, + int budget) { - struct nvsp_message *nvsp_packet; + struct nvsp_message *nvsp_packet = hv_pkt_data(desc); struct net_device *ndev = hv_get_drvdata(device); - nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); - switch (nvsp_packet->hdr.msg_type) { case NVSP_MSG_TYPE_INIT_COMPLETE: case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: @@ -667,7 +682,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device, case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: netvsc_send_tx_complete(net_device, incoming_channel, - device, packet); + device, desc, budget); break; default: @@ -709,8 +724,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, packet->page_buf_cnt; /* Add padding */ - if (skb && skb->xmit_more && remain && - !packet->cp_partial) { + if (skb->xmit_more && remain && !packet->cp_partial) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -868,9 +882,7 @@ int netvsc_send(struct hv_device *device, if (msdp->pkt) msd_len = msdp->pkt->total_data_buflen; - try_batch = (skb != NULL) && msd_len > 0 && msdp->count < - net_device->max_pkt; - + try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; if (try_batch && msd_len + pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = msdp->pkt->send_buf_index; @@ -880,7 +892,7 @@ int netvsc_send(struct hv_device *device, section_index = msdp->pkt->send_buf_index; packet->cp_partial = true; - } else if ((skb != NULL) && pktlen + net_device->pkt_align < + } else if (pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); if (section_index != NETVSC_INVALID_INDEX) { @@ -1065,28 +1077,29 @@ static inline struct recv_comp_data *get_recv_comp_slot( return rcd; } -static void netvsc_receive(struct net_device *ndev, +static int netvsc_receive(struct net_device *ndev, struct netvsc_device *net_device, struct net_device_context *net_device_ctx, struct hv_device *device, struct vmbus_channel *channel, - struct vmtransfer_page_packet_header *vmxferpage_packet, + const struct vmpacket_descriptor *desc, struct nvsp_message *nvsp) { + const struct vmtransfer_page_packet_header *vmxferpage_packet + = container_of(desc, const struct vmtransfer_page_packet_header, d); + u16 q_idx = channel->offermsg.offer.sub_channel_index; char *recv_buf = net_device->recv_buf; u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; int ret; - struct recv_comp_data *rcd; - u16 q_idx = channel->offermsg.offer.sub_channel_index; /* Make sure this is a valid nvsp packet */ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { netif_err(net_device_ctx, rx_err, ndev, "Unknown nvsp packet type received %u\n", nvsp->hdr.msg_type); - return; + return 0; } if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { @@ -1094,7 +1107,7 @@ static void netvsc_receive(struct net_device *ndev, "Invalid xfer page set id - expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID, vmxferpage_packet->xfer_pageset_id); - return; + return 0; } count = vmxferpage_packet->range_cnt; @@ -1110,26 +1123,26 @@ static void netvsc_receive(struct net_device *ndev, channel, data, buflen); } - if (!net_device->chan_table[q_idx].mrc.buf) { + if (net_device->chan_table[q_idx].mrc.buf) { + struct recv_comp_data *rcd; + + rcd = get_recv_comp_slot(net_device, channel, q_idx); + if (rcd) { + rcd->tid = vmxferpage_packet->d.trans_id; + rcd->status = status; + } else { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, vmxferpage_packet->d.trans_id); + } + } else { ret = netvsc_send_recv_completion(channel, vmxferpage_packet->d.trans_id, status); if (ret) netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", q_idx, vmxferpage_packet->d.trans_id, ret); - return; } - - rcd = get_recv_comp_slot(net_device, channel, q_idx); - - if (!rcd) { - netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", - q_idx, vmxferpage_packet->d.trans_id); - return; - } - - rcd->tid = vmxferpage_packet->d.trans_id; - rcd->status = status; + return count; } static void netvsc_send_table(struct hv_device *hdev, @@ -1175,28 +1188,25 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, } } -static void netvsc_process_raw_pkt(struct hv_device *device, - struct vmbus_channel *channel, - struct netvsc_device *net_device, - struct net_device *ndev, - u64 request_id, - struct vmpacket_descriptor *desc) +static int netvsc_process_raw_pkt(struct hv_device *device, + struct vmbus_channel *channel, + struct netvsc_device *net_device, + struct net_device *ndev, + const struct vmpacket_descriptor *desc, + int budget) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct nvsp_message *nvmsg - = (struct nvsp_message *)((unsigned long)desc - + (desc->offset8 << 3)); + struct nvsp_message *nvmsg = hv_pkt_data(desc); switch (desc->type) { case VM_PKT_COMP: - netvsc_send_completion(net_device, channel, device, desc); + netvsc_send_completion(net_device, channel, device, + desc, budget); break; case VM_PKT_DATA_USING_XFER_PAGES: - netvsc_receive(ndev, net_device, net_device_ctx, - device, channel, - (struct vmtransfer_page_packet_header *)desc, - nvmsg); + return netvsc_receive(ndev, net_device, net_device_ctx, + device, channel, desc, nvmsg); break; case VM_PKT_DATA_INBAND: @@ -1205,53 +1215,74 @@ static void netvsc_process_raw_pkt(struct hv_device *device, default: netdev_err(ndev, "unhandled packet type %d, tid %llx\n", - desc->type, request_id); + desc->type, desc->trans_id); break; } + + return 0; } -void netvsc_channel_cb(void *context) +static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel) { - struct vmbus_channel *channel = context; - u16 q_idx = channel->offermsg.offer.sub_channel_index; - struct hv_device *device; - struct netvsc_device *net_device; - struct vmpacket_descriptor *desc; - struct net_device *ndev; - bool need_to_commit = false; + struct vmbus_channel *primary = channel->primary_channel; - if (channel->primary_channel != NULL) - device = channel->primary_channel->device_obj; - else - device = channel->device_obj; + return primary ? primary->device_obj : channel->device_obj; +} - ndev = hv_get_drvdata(device); - if (unlikely(!ndev)) - return; +/* Network processing softirq + * Process data in incoming ring buffer from host + * Stops when ring is empty or budget is met or exceeded. + */ +int netvsc_poll(struct napi_struct *napi, int budget) +{ + struct netvsc_channel *nvchan + = container_of(napi, struct netvsc_channel, napi); + struct vmbus_channel *channel = nvchan->channel; + struct hv_device *device = netvsc_channel_to_device(channel); + u16 q_idx = channel->offermsg.offer.sub_channel_index; + struct net_device *ndev = hv_get_drvdata(device); + struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + int work_done = 0; - net_device = net_device_to_netvsc_device(ndev); - if (unlikely(!net_device)) - return; + /* If starting a new interval */ + if (!nvchan->desc) + nvchan->desc = hv_pkt_iter_first(channel); - if (unlikely(net_device->destroy && - netvsc_channel_idle(net_device, q_idx))) - return; + while (nvchan->desc && work_done < budget) { + work_done += netvsc_process_raw_pkt(device, channel, net_device, + ndev, nvchan->desc, budget); + nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); + } - /* commit_rd_index() -> hv_signal_on_read() needs this. */ - init_cached_read_index(channel); + /* If receive ring was exhausted + * and not doing busy poll + * then re-enable host interrupts + * and reschedule if ring is not empty. + */ + if (work_done < budget && + napi_complete_done(napi, work_done) && + hv_end_read(&channel->inbound) != 0) + napi_reschedule(napi); - while ((desc = get_next_pkt_raw(channel)) != NULL) { - netvsc_process_raw_pkt(device, channel, net_device, - ndev, desc->trans_id, desc); + netvsc_chk_recv_comp(net_device, channel, q_idx); - put_pkt_raw(channel, desc); - need_to_commit = true; - } + /* Driver may overshoot since multiple packets per descriptor */ + return min(work_done, budget); +} - if (need_to_commit) - commit_rd_index(channel); +/* Call back when data is available in host ring buffer. + * Processing is deferred until network softirq (NAPI) + */ +void netvsc_channel_cb(void *context) +{ + struct netvsc_channel *nvchan = context; - netvsc_chk_recv_comp(net_device, channel, q_idx); + if (napi_schedule_prep(&nvchan->napi)) { + /* disable interupts from host */ + hv_begin_read(&nvchan->channel->inbound); + + __napi_schedule(&nvchan->napi); + } } /* @@ -1273,10 +1304,29 @@ int netvsc_device_add(struct hv_device *device, net_device->ring_size = ring_size; + /* Because the device uses NAPI, all the interrupt batching and + * control is done via Net softirq, not the channel handling + */ + set_channel_read_mode(device->channel, HV_CALL_ISR); + + /* If we're reopening the device we may have multiple queues, fill the + * chn_table with the default channel to use it before subchannels are + * opened. + * Initialize the channel state before we open; + * we can be interrupted as soon as we open the channel. + */ + + for (i = 0; i < VRSS_CHANNEL_MAX; i++) { + struct netvsc_channel *nvchan = &net_device->chan_table[i]; + + nvchan->channel = device->channel; + } + /* Open the channel */ ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, ring_size * PAGE_SIZE, NULL, 0, - netvsc_channel_cb, device->channel); + netvsc_channel_cb, + net_device->chan_table); if (ret != 0) { netdev_err(ndev, "unable to open channel: %d\n", ret); @@ -1286,19 +1336,15 @@ int netvsc_device_add(struct hv_device *device, /* Channel is opened */ netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); - /* If we're reopening the device we may have multiple queues, fill the - * chn_table with the default channel to use it before subchannels are - * opened. - */ - for (i = 0; i < VRSS_CHANNEL_MAX; i++) - net_device->chan_table[i].channel = device->channel; + /* Enable NAPI handler for init callbacks */ + netif_napi_add(ndev, &net_device->chan_table[0].napi, + netvsc_poll, NAPI_POLL_WEIGHT); + napi_enable(&net_device->chan_table[0].napi); /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is * populated. */ - wmb(); - - net_device_ctx->nvdev = net_device; + rcu_assign_pointer(net_device_ctx->nvdev, net_device); /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device); @@ -1311,11 +1357,13 @@ int netvsc_device_add(struct hv_device *device, return ret; close: + netif_napi_del(&net_device->chan_table[0].napi); + /* Now, we can close the channel safely */ vmbus_close(device->channel); cleanup: - free_netvsc_device(net_device); + free_netvsc_device(&net_device->rcu); return ret; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5ede87f30463e8211ef2828a8f74d4951c4166a6..4421a6d0037579bbff80ee909d42510947f77690 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -62,7 +62,7 @@ static void do_set_multicast(struct work_struct *w) container_of(w, struct net_device_context, work); struct hv_device *device_obj = ndevctx->device_ctx; struct net_device *ndev = hv_get_drvdata(device_obj); - struct netvsc_device *nvdev = ndevctx->nvdev; + struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); struct rndis_device *rdev; if (!nvdev) @@ -116,7 +116,7 @@ static int netvsc_open(struct net_device *net) static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; @@ -191,6 +191,54 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } +/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute + * hash for non-TCP traffic with only IP numbers. + */ +static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) +{ + struct flow_keys flow; + u32 hash; + static u32 hashrnd __read_mostly; + + net_get_random_once(&hashrnd, sizeof(hashrnd)); + + if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) + return 0; + + if (flow.basic.ip_proto == IPPROTO_TCP) { + return skb_get_hash(skb); + } else { + if (flow.basic.n_proto == htons(ETH_P_IP)) + hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); + else + hash = 0; + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); + } + + return hash; +} + +static inline int netvsc_get_tx_queue(struct net_device *ndev, + struct sk_buff *skb, int old_idx) +{ + const struct net_device_context *ndc = netdev_priv(ndev); + struct sock *sk = skb->sk; + int q_idx; + + q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) & + (VRSS_SEND_TAB_SIZE - 1)]; + + /* If queue index changed record the new value */ + if (q_idx != old_idx && + sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) + sk_tx_queue_set(sk, q_idx); + + return q_idx; +} + /* * Select queue for transmit. * @@ -205,24 +253,22 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { - struct net_device_context *net_device_ctx = netdev_priv(ndev); unsigned int num_tx_queues = ndev->real_num_tx_queues; - struct sock *sk = skb->sk; - int q_idx = sk_tx_queue_get(sk); - - if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) { - u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); - int new_idx; - - new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues; + int q_idx = sk_tx_queue_get(skb->sk); - if (q_idx != new_idx && sk && - sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) - sk_tx_queue_set(sk, new_idx); - - q_idx = new_idx; + if (q_idx < 0 || skb->ooo_okay) { + /* If forwarding a packet, we use the recorded queue when + * available for better cache locality. + */ + if (skb_rx_queue_recorded(skb)) + q_idx = skb_get_rx_queue(skb); + else + q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); } + while (unlikely(q_idx >= num_tx_queues)) + q_idx -= num_tx_queues; + return q_idx; } @@ -584,13 +630,14 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, } static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, + struct napi_struct *napi, const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_pkt_8021q_info *vlan, void *data, u32 buflen) { struct sk_buff *skb; - skb = netdev_alloc_skb_ip_align(net, buflen); + skb = napi_alloc_skb(napi, buflen); if (!skb) return skb; @@ -636,12 +683,12 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device; + u16 q_idx = channel->offermsg.offer.sub_channel_index; + struct netvsc_channel *nvchan; struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; - u16 q_idx = channel->offermsg.offer.sub_channel_index; - if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; @@ -654,13 +701,20 @@ int netvsc_recv_callback(struct net_device *net, * interface in the guest. */ rcu_read_lock(); + net_device = rcu_dereference(net_device_ctx->nvdev); + if (unlikely(!net_device)) + goto drop; + + nvchan = &net_device->chan_table[q_idx]; vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); if (vf_netdev && (vf_netdev->flags & IFF_UP)) net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ - skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len); + skb = netvsc_alloc_recv_skb(net, &nvchan->napi, + csum_info, vlan, data, len); if (unlikely(!skb)) { +drop: ++net->stats.rx_dropped; rcu_read_unlock(); return NVSP_STAT_FAIL; @@ -674,7 +728,7 @@ int netvsc_recv_callback(struct net_device *net, * on the synthetic device because modifying the VF device * statistics will not work correctly. */ - rx_stats = &net_device->chan_table[q_idx].rx_stats; + rx_stats = &nvchan->rx_stats; u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += len; @@ -685,12 +739,7 @@ int netvsc_recv_callback(struct net_device *net, ++rx_stats->multicast; u64_stats_update_end(&rx_stats->syncp); - /* - * Pass the skb back up. Network stack will deallocate the skb when it - * is done. - * TODO - use NAPI? - */ - netif_receive_skb(skb); + napi_gro_receive(&nvchan->napi, skb); rcu_read_unlock(); return 0; @@ -707,7 +756,7 @@ static void netvsc_get_channels(struct net_device *net, struct ethtool_channels *channel) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); if (nvdev) { channel->max_combined = nvdev->max_chn; @@ -744,8 +793,9 @@ static int netvsc_set_channels(struct net_device *net, { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int count = channels->combined_count; + bool was_running; int ret; /* We do not support separate count for rx, tx, or other */ @@ -756,7 +806,7 @@ static int netvsc_set_channels(struct net_device *net, if (count > net->num_tx_queues || count > net->num_rx_queues) return -EINVAL; - if (net_device_ctx->start_remove || !nvdev || nvdev->destroy) + if (!nvdev || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) @@ -765,11 +815,13 @@ static int netvsc_set_channels(struct net_device *net, if (count > nvdev->max_chn) return -EINVAL; - ret = netvsc_close(net); - if (ret) - return ret; + was_running = netif_running(net); + if (was_running) { + ret = netvsc_close(net); + if (ret) + return ret; + } - net_device_ctx->start_remove = true; rndis_filter_device_remove(dev, nvdev); ret = netvsc_set_queues(net, dev, count); @@ -778,8 +830,8 @@ static int netvsc_set_channels(struct net_device *net, else netvsc_set_queues(net, dev, nvdev->num_chn); - netvsc_open(net); - net_device_ctx->start_remove = false; + if (was_running) + ret = netvsc_open(net); /* We may have missed link change notifications */ schedule_delayed_work(&net_device_ctx->dwork, 0); @@ -787,18 +839,19 @@ static int netvsc_set_channels(struct net_device *net, return ret; } -static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) +static bool +netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd diff1 = *cmd; - struct ethtool_cmd diff2 = {}; + struct ethtool_link_ksettings diff1 = *cmd; + struct ethtool_link_ksettings diff2 = {}; - ethtool_cmd_speed_set(&diff1, 0); - diff1.duplex = 0; + diff1.base.speed = 0; + diff1.base.duplex = 0; /* advertising and cmd are usually set */ - diff1.advertising = 0; - diff1.cmd = 0; + ethtool_link_ksettings_zero_link_mode(&diff1, advertising); + diff1.base.cmd = 0; /* We set port to PORT_OTHER */ - diff2.port = PORT_OTHER; + diff2.base.port = PORT_OTHER; return !memcmp(&diff1, &diff2, sizeof(diff1)); } @@ -808,33 +861,35 @@ static void netvsc_init_settings(struct net_device *dev) struct net_device_context *ndc = netdev_priv(dev); ndc->speed = SPEED_UNKNOWN; - ndc->duplex = DUPLEX_UNKNOWN; + ndc->duplex = DUPLEX_FULL; } -static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netvsc_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct net_device_context *ndc = netdev_priv(dev); - ethtool_cmd_speed_set(cmd, ndc->speed); - cmd->duplex = ndc->duplex; - cmd->port = PORT_OTHER; + cmd->base.speed = ndc->speed; + cmd->base.duplex = ndc->duplex; + cmd->base.port = PORT_OTHER; return 0; } -static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int netvsc_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct net_device_context *ndc = netdev_priv(dev); u32 speed; - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; if (!ethtool_validate_speed(speed) || - !ethtool_validate_duplex(cmd->duplex) || + !ethtool_validate_duplex(cmd->base.duplex) || !netvsc_validate_ethtool_ss_cmd(cmd)) return -EINVAL; ndc->speed = speed; - ndc->duplex = cmd->duplex; + ndc->duplex = cmd->base.duplex; return 0; } @@ -842,24 +897,27 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); - struct netvsc_device *nvdev = ndevctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - int ret; + bool was_running; + int ret = 0; - if (ndevctx->start_remove || !nvdev || nvdev->destroy) + if (!nvdev || nvdev->destroy) return -ENODEV; - ret = netvsc_close(ndev); - if (ret) - goto out; + was_running = netif_running(ndev); + if (was_running) { + ret = netvsc_close(ndev); + if (ret) + return ret; + } memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.max_num_vrss_chns = nvdev->num_chn; - ndevctx->start_remove = true; rndis_filter_device_remove(hdev, nvdev); /* 'nvdev' has been freed in rndis_filter_device_remove() -> @@ -872,9 +930,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) rndis_filter_device_add(hdev, &device_info); -out: - netvsc_open(ndev); - ndevctx->start_remove = false; + if (was_running) + ret = netvsc_open(ndev); /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); @@ -886,7 +943,7 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = ndev_ctx->nvdev; + struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); int i; if (!nvdev) @@ -971,7 +1028,10 @@ static const struct { static int netvsc_get_sset_count(struct net_device *dev, int string_set) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = ndc->nvdev; + struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + + if (!nvdev) + return -ENODEV; switch (string_set) { case ETH_SS_STATS: @@ -985,13 +1045,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = ndc->nvdev; + struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; unsigned int start; u64 packets, bytes; int i, j; + if (!nvdev) + return; + for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); @@ -1020,10 +1083,13 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = ndc->nvdev; + struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); u8 *p = data; int i; + if (!nvdev) + return; + switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) @@ -1075,7 +1141,10 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = ndc->nvdev; + struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + + if (!nvdev) + return -ENODEV; switch (info->cmd) { case ETHTOOL_GRXRINGS: @@ -1111,13 +1180,17 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev = ndc->nvdev; - struct rndis_device *rndis_dev = ndev->extension; + struct netvsc_device *ndev = rcu_dereference(ndc->nvdev); + struct rndis_device *rndis_dev; int i; + if (!ndev) + return -ENODEV; + if (hfunc) *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ + rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) indir[i] = rndis_dev->ind_table[i]; @@ -1133,13 +1206,17 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev = ndc->nvdev; - struct rndis_device *rndis_dev = ndev->extension; + struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); + struct rndis_device *rndis_dev; int i; + if (!ndev) + return -ENODEV; + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; + rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) if (indir[i] >= dev->num_rx_queues) @@ -1168,13 +1245,13 @@ static const struct ethtool_ops ethtool_ops = { .get_channels = netvsc_get_channels, .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, - .get_settings = netvsc_get_settings, - .set_settings = netvsc_set_settings, .get_rxnfc = netvsc_get_rxnfc, .get_rxfh_key_size = netvsc_get_rxfh_key_size, .get_rxfh_indir_size = netvsc_rss_indir_size, .get_rxfh = netvsc_get_rxfh, .set_rxfh = netvsc_set_rxfh, + .get_link_ksettings = netvsc_get_link_ksettings, + .set_link_ksettings = netvsc_set_link_ksettings, }; static const struct net_device_ops device_ops = { @@ -1210,10 +1287,10 @@ static void netvsc_link_change(struct work_struct *w) unsigned long flags, next_reconfig, delay; rtnl_lock(); - if (ndev_ctx->start_remove) + net_device = rtnl_dereference(ndev_ctx->nvdev); + if (!net_device) goto out_unlock; - net_device = ndev_ctx->nvdev; rdev = net_device->extension; next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; @@ -1354,7 +1431,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; + netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; @@ -1380,7 +1457,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; + netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); netdev_info(ndev, "VF up: %s\n", vf_netdev->name); @@ -1414,7 +1491,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = net_device_ctx->nvdev; + netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); netdev_info(ndev, "VF down: %s\n", vf_netdev->name); netvsc_switch_datapath(ndev, false); @@ -1474,8 +1551,6 @@ static int netvsc_probe(struct hv_device *dev, hv_set_drvdata(dev, net); - net_device_ctx->start_remove = false; - INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); @@ -1492,8 +1567,7 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT, - num_online_cpus()); + device_info.num_chn = VRSS_CHANNEL_DEFAULT; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); @@ -1509,6 +1583,7 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; + /* RCU not necessary here, device not registered */ nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); @@ -1544,26 +1619,20 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); - /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels() - * removing the device. - */ - rtnl_lock(); - ndev_ctx->start_remove = true; - rtnl_unlock(); + netif_device_detach(net); cancel_delayed_work_sync(&ndev_ctx->dwork); cancel_work_sync(&ndev_ctx->work); - /* Stop outbound asap */ - netif_tx_disable(net); - - unregister_netdev(net); - /* * Call to the vsc driver to let it know that the device is being - * removed + * removed. Also blocks mtu and channel changes. */ + rtnl_lock(); rndis_filter_device_remove(dev, ndev_ctx->nvdev); + rtnl_unlock(); + + unregister_netdev(net); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 19356f56b7b144f40184c5766a8f9be33b4a3079..ab92c3c9595178c6e8ca47c332d153f79bd38672 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) { struct rndis_request *request; struct rndis_set_request *set; - struct rndis_set_complete *set_complete; int ret; request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32)); - if (!request) { - ret = -ENOMEM; - goto cleanup; - } + if (!request) + return -ENOMEM; + /* Setup the rndis set */ set = &request->request_msg.msg.set_req; @@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) &new_filter, sizeof(u32)); ret = rndis_filter_send_request(dev, request); - if (ret != 0) - goto cleanup; + if (ret == 0) + wait_for_completion(&request->wait_event); - wait_for_completion(&request->wait_event); + put_rndis_request(dev, request); - set_complete = &request->response_msg.msg.set_complete; -cleanup: - if (request) - put_rndis_request(dev, request); return ret; } @@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev) struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); struct netvsc_device *nvdev = net_device_ctx->nvdev; - struct hv_device *hdev = net_device_ctx->device_ctx; - ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: - spin_lock_irqsave(&hdev->channel->inbound_lock, flags); nvdev->destroy = true; - spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Force flag to be ordered before waiting */ + wmb(); /* Wait for all send completions */ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); @@ -996,26 +989,38 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) hv_get_drvdata(new_sc->primary_channel->device_obj); struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); u16 chn_index = new_sc->offermsg.offer.sub_channel_index; + struct netvsc_channel *nvchan; int ret; - unsigned long flags; if (chn_index >= nvscdev->num_chn) return; - nvscdev->chan_table[chn_index].mrc.buf + nvchan = nvscdev->chan_table + chn_index; + nvchan->mrc.buf = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); + if (!nvchan->mrc.buf) + return; + + /* Because the device uses NAPI, all the interrupt batching and + * control is done via Net softirq, not the channel handling + */ + set_channel_read_mode(new_sc, HV_CALL_ISR); + + /* Set the channel before opening.*/ + nvchan->channel = new_sc; + netif_napi_add(ndev, &nvchan->napi, + netvsc_poll, NAPI_POLL_WEIGHT); + ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, - netvsc_channel_cb, new_sc); - + netvsc_channel_cb, nvchan); if (ret == 0) - nvscdev->chan_table[chn_index].channel = new_sc; + napi_enable(&nvchan->napi); + else + netdev_err(ndev, "sub channel open failed (%d)\n", ret); - spin_lock_irqsave(&nvscdev->sc_lock, flags); - nvscdev->num_sc_offered--; - spin_unlock_irqrestore(&nvscdev->sc_lock, flags); - if (nvscdev->num_sc_offered == 0) + if (refcount_dec_and_test(&nvscdev->sc_offered)) complete(&nvscdev->channel_init_wait); } @@ -1032,12 +1037,9 @@ int rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; - u32 mtu, size; - u32 num_rss_qs; - u32 sc_delta; + u32 mtu, size, num_rss_qs; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; - unsigned long flags; int i, ret; rndis_device = get_rndis_device(); @@ -1060,7 +1062,7 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->max_chn = 1; net_device->num_chn = 1; - spin_lock_init(&net_device->sc_lock); + refcount_set(&net_device->sc_offered, 0); net_device->extension = rndis_device; rndis_device->ndev = net; @@ -1174,34 +1176,30 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; - net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que); - - num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn); - /* * We will limit the VRSS channels to the number CPUs in the NUMA node * the primary channel is currently bound to. + * + * This also guarantees that num_possible_rss_qs <= num_online_cpus */ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); - num_possible_rss_qs = cpumask_weight(node_cpu_mask); + num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask), + rsscap.num_recv_que); - /* We will use the given number of channels if available. */ - if (device_info->num_chn && device_info->num_chn < net_device->max_chn) - net_device->num_chn = device_info->num_chn; - else - net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); - num_rss_qs = net_device->num_chn - 1; + /* We will use the given number of channels if available. */ + net_device->num_chn = min(net_device->max_chn, device_info->num_chn); for (i = 0; i < ITAB_NUM; i++) rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, net_device->num_chn); - net_device->num_sc_offered = num_rss_qs; - - if (net_device->num_chn == 1) - goto out; + num_rss_qs = net_device->num_chn - 1; + if (num_rss_qs == 0) + return 0; + refcount_set(&net_device->sc_offered, num_rss_qs); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); init_packet = &net_device->channel_init_pkt; @@ -1217,32 +1215,23 @@ int rndis_filter_device_add(struct hv_device *dev, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) goto out; - wait_for_completion(&net_device->channel_init_wait); - if (init_packet->msg.v5_msg.subchn_comp.status != - NVSP_STAT_SUCCESS) { + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { ret = -ENODEV; goto out; } + wait_for_completion(&net_device->channel_init_wait); + net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; - ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, - net_device->num_chn); - - /* - * Set the number of sub-channels to be received. - */ - spin_lock_irqsave(&net_device->sc_lock, flags); - sc_delta = num_rss_qs - (net_device->num_chn - 1); - net_device->num_sc_offered -= sc_delta; - spin_unlock_irqrestore(&net_device->sc_lock, flags); - + /* ignore failues from setting rss parameters, still have channels */ + rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, + net_device->num_chn); out: if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; - net_device->num_sc_offered = 0; } return 0; /* return 0 because primary channel can be used alone */ @@ -1257,12 +1246,6 @@ void rndis_filter_device_remove(struct hv_device *dev, { struct rndis_device *rndis_dev = net_dev->extension; - /* If not all subchannel offers are complete, wait for them until - * completion to avoid race. - */ - if (net_dev->num_sc_offered > 0) - wait_for_completion(&net_dev->channel_init_wait); - /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 3057a8df4ce93fe09f43bffc59b11ea298df1097..303ba413392003047e208db65f7df85d382dd785 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -82,3 +82,25 @@ config IEEE802154_ADF7242 This driver can also be built as a module. To do so, say M here. the module will be called 'adf7242'. + +config IEEE802154_CA8210 + tristate "Cascoda CA8210 transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on COMMON_CLK + depends on SPI + ---help--- + Say Y here to enable the CA8210 SPI 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so, say M here. + the module will be called 'ca8210'. + +config IEEE802154_CA8210_DEBUGFS + bool "CA8210 debugfs interface" + depends on IEEE802154_CA8210 + depends on DEBUG_FS + ---help--- + This option compiles debugfs code for the ca8210 driver. This + exposes a debugfs node for each CA8210 instance which allows + direct use of the Cascoda API, exposing the 802.15.4 MAC + management entities. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index 3a923d339497eb19b94ddec89cc7d7c24dd68e5b..8374bb44a145be7014dabf7470645cb20183733a 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o +obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c new file mode 100644 index 0000000000000000000000000000000000000000..25fd3b04b3c0cdd07a536cc32c1f270599dafcb5 --- /dev/null +++ b/drivers/net/ieee802154/ca8210.c @@ -0,0 +1,3242 @@ +/* + * http://www.cascoda.com/products/ca-821x/ + * Copyright (c) 2016, Cascoda, Ltd. + * All rights reserved. + * + * This code is dual-licensed under both GPLv2 and 3-clause BSD. What follows is + * the license notice for both respectively. + * + ******************************************************************************* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ******************************************************************************* + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "ca8210" + +/* external clock frequencies */ +#define ONE_MHZ 1000000 +#define TWO_MHZ (2 * ONE_MHZ) +#define FOUR_MHZ (4 * ONE_MHZ) +#define EIGHT_MHZ (8 * ONE_MHZ) +#define SIXTEEN_MHZ (16 * ONE_MHZ) + +/* spi constants */ +#define CA8210_SPI_BUF_SIZE 256 +#define CA8210_SYNC_TIMEOUT 1000 /* Timeout for synchronous commands [ms] */ + +/* test interface constants */ +#define CA8210_TEST_INT_FILE_NAME "ca8210_test" +#define CA8210_TEST_INT_FIFO_SIZE 256 + +/* MAC status enumerations */ +#define MAC_SUCCESS (0x00) +#define MAC_ERROR (0x01) +#define MAC_CANCELLED (0x02) +#define MAC_READY_FOR_POLL (0x03) +#define MAC_COUNTER_ERROR (0xDB) +#define MAC_IMPROPER_KEY_TYPE (0xDC) +#define MAC_IMPROPER_SECURITY_LEVEL (0xDD) +#define MAC_UNSUPPORTED_LEGACY (0xDE) +#define MAC_UNSUPPORTED_SECURITY (0xDF) +#define MAC_BEACON_LOST (0xE0) +#define MAC_CHANNEL_ACCESS_FAILURE (0xE1) +#define MAC_DENIED (0xE2) +#define MAC_DISABLE_TRX_FAILURE (0xE3) +#define MAC_SECURITY_ERROR (0xE4) +#define MAC_FRAME_TOO_LONG (0xE5) +#define MAC_INVALID_GTS (0xE6) +#define MAC_INVALID_HANDLE (0xE7) +#define MAC_INVALID_PARAMETER (0xE8) +#define MAC_NO_ACK (0xE9) +#define MAC_NO_BEACON (0xEA) +#define MAC_NO_DATA (0xEB) +#define MAC_NO_SHORT_ADDRESS (0xEC) +#define MAC_OUT_OF_CAP (0xED) +#define MAC_PAN_ID_CONFLICT (0xEE) +#define MAC_REALIGNMENT (0xEF) +#define MAC_TRANSACTION_EXPIRED (0xF0) +#define MAC_TRANSACTION_OVERFLOW (0xF1) +#define MAC_TX_ACTIVE (0xF2) +#define MAC_UNAVAILABLE_KEY (0xF3) +#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4) +#define MAC_INVALID_ADDRESS (0xF5) +#define MAC_ON_TIME_TOO_LONG (0xF6) +#define MAC_PAST_TIME (0xF7) +#define MAC_TRACKING_OFF (0xF8) +#define MAC_INVALID_INDEX (0xF9) +#define MAC_LIMIT_REACHED (0xFA) +#define MAC_READ_ONLY (0xFB) +#define MAC_SCAN_IN_PROGRESS (0xFC) +#define MAC_SUPERFRAME_OVERLAP (0xFD) +#define MAC_SYSTEM_ERROR (0xFF) + +/* HWME attribute IDs */ +#define HWME_EDTHRESHOLD (0x04) +#define HWME_EDVALUE (0x06) +#define HWME_SYSCLKOUT (0x0F) +#define HWME_LQILIMIT (0x11) + +/* TDME attribute IDs */ +#define TDME_CHANNEL (0x00) +#define TDME_ATM_CONFIG (0x06) + +#define MAX_HWME_ATTRIBUTE_SIZE 16 +#define MAX_TDME_ATTRIBUTE_SIZE 2 + +/* PHY/MAC PIB Attribute Enumerations */ +#define PHY_CURRENT_CHANNEL (0x00) +#define PHY_TRANSMIT_POWER (0x02) +#define PHY_CCA_MODE (0x03) +#define MAC_ASSOCIATION_PERMIT (0x41) +#define MAC_AUTO_REQUEST (0x42) +#define MAC_BATT_LIFE_EXT (0x43) +#define MAC_BATT_LIFE_EXT_PERIODS (0x44) +#define MAC_BEACON_PAYLOAD (0x45) +#define MAC_BEACON_PAYLOAD_LENGTH (0x46) +#define MAC_BEACON_ORDER (0x47) +#define MAC_GTS_PERMIT (0x4d) +#define MAC_MAX_CSMA_BACKOFFS (0x4e) +#define MAC_MIN_BE (0x4f) +#define MAC_PAN_ID (0x50) +#define MAC_PROMISCUOUS_MODE (0x51) +#define MAC_RX_ON_WHEN_IDLE (0x52) +#define MAC_SHORT_ADDRESS (0x53) +#define MAC_SUPERFRAME_ORDER (0x54) +#define MAC_ASSOCIATED_PAN_COORD (0x56) +#define MAC_MAX_BE (0x57) +#define MAC_MAX_FRAME_RETRIES (0x59) +#define MAC_RESPONSE_WAIT_TIME (0x5A) +#define MAC_SECURITY_ENABLED (0x5D) + +#define MAC_AUTO_REQUEST_SECURITY_LEVEL (0x78) +#define MAC_AUTO_REQUEST_KEY_ID_MODE (0x79) + +#define NS_IEEE_ADDRESS (0xFF) /* Non-standard IEEE address */ + +/* MAC Address Mode Definitions */ +#define MAC_MODE_NO_ADDR (0x00) +#define MAC_MODE_SHORT_ADDR (0x02) +#define MAC_MODE_LONG_ADDR (0x03) + +/* MAC constants */ +#define MAX_BEACON_OVERHEAD (75) +#define MAX_BEACON_PAYLOAD_LENGTH (IEEE802154_MTU - MAX_BEACON_OVERHEAD) + +#define MAX_ATTRIBUTE_SIZE (122) +#define MAX_DATA_SIZE (114) + +#define CA8210_VALID_CHANNELS (0x07FFF800) + +/* MAC workarounds for V1.1 and MPW silicon (V0.x) */ +#define CA8210_MAC_WORKAROUNDS (0) +#define CA8210_MAC_MPW (0) + +/* memory manipulation macros */ +#define LS_BYTE(x) ((u8)((x) & 0xFF)) +#define MS_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) + +/* message ID codes in SPI commands */ +/* downstream */ +#define MCPS_DATA_REQUEST (0x00) +#define MLME_ASSOCIATE_REQUEST (0x02) +#define MLME_ASSOCIATE_RESPONSE (0x03) +#define MLME_DISASSOCIATE_REQUEST (0x04) +#define MLME_GET_REQUEST (0x05) +#define MLME_ORPHAN_RESPONSE (0x06) +#define MLME_RESET_REQUEST (0x07) +#define MLME_RX_ENABLE_REQUEST (0x08) +#define MLME_SCAN_REQUEST (0x09) +#define MLME_SET_REQUEST (0x0A) +#define MLME_START_REQUEST (0x0B) +#define MLME_POLL_REQUEST (0x0D) +#define HWME_SET_REQUEST (0x0E) +#define HWME_GET_REQUEST (0x0F) +#define TDME_SETSFR_REQUEST (0x11) +#define TDME_GETSFR_REQUEST (0x12) +#define TDME_SET_REQUEST (0x14) +/* upstream */ +#define MCPS_DATA_INDICATION (0x00) +#define MCPS_DATA_CONFIRM (0x01) +#define MLME_RESET_CONFIRM (0x0A) +#define MLME_SET_CONFIRM (0x0E) +#define MLME_START_CONFIRM (0x0F) +#define HWME_SET_CONFIRM (0x12) +#define HWME_GET_CONFIRM (0x13) +#define HWME_WAKEUP_INDICATION (0x15) +#define TDME_SETSFR_CONFIRM (0x17) + +/* SPI command IDs */ +/* bit indicating a confirm or indication from slave to master */ +#define SPI_S2M (0x20) +/* bit indicating a synchronous message */ +#define SPI_SYN (0x40) + +/* SPI command definitions */ +#define SPI_IDLE (0xFF) +#define SPI_NACK (0xF0) + +#define SPI_MCPS_DATA_REQUEST (MCPS_DATA_REQUEST) +#define SPI_MCPS_DATA_INDICATION (MCPS_DATA_INDICATION + SPI_S2M) +#define SPI_MCPS_DATA_CONFIRM (MCPS_DATA_CONFIRM + SPI_S2M) + +#define SPI_MLME_ASSOCIATE_REQUEST (MLME_ASSOCIATE_REQUEST) +#define SPI_MLME_RESET_REQUEST (MLME_RESET_REQUEST + SPI_SYN) +#define SPI_MLME_SET_REQUEST (MLME_SET_REQUEST + SPI_SYN) +#define SPI_MLME_START_REQUEST (MLME_START_REQUEST + SPI_SYN) +#define SPI_MLME_RESET_CONFIRM (MLME_RESET_CONFIRM + SPI_S2M + SPI_SYN) +#define SPI_MLME_SET_CONFIRM (MLME_SET_CONFIRM + SPI_S2M + SPI_SYN) +#define SPI_MLME_START_CONFIRM (MLME_START_CONFIRM + SPI_S2M + SPI_SYN) + +#define SPI_HWME_SET_REQUEST (HWME_SET_REQUEST + SPI_SYN) +#define SPI_HWME_GET_REQUEST (HWME_GET_REQUEST + SPI_SYN) +#define SPI_HWME_SET_CONFIRM (HWME_SET_CONFIRM + SPI_S2M + SPI_SYN) +#define SPI_HWME_GET_CONFIRM (HWME_GET_CONFIRM + SPI_S2M + SPI_SYN) +#define SPI_HWME_WAKEUP_INDICATION (HWME_WAKEUP_INDICATION + SPI_S2M) + +#define SPI_TDME_SETSFR_REQUEST (TDME_SETSFR_REQUEST + SPI_SYN) +#define SPI_TDME_SET_REQUEST (TDME_SET_REQUEST + SPI_SYN) +#define SPI_TDME_SETSFR_CONFIRM (TDME_SETSFR_CONFIRM + SPI_S2M + SPI_SYN) + +/* TDME SFR addresses */ +/* Page 0 */ +#define CA8210_SFR_PACFG (0xB1) +#define CA8210_SFR_MACCON (0xD8) +#define CA8210_SFR_PACFGIB (0xFE) +/* Page 1 */ +#define CA8210_SFR_LOTXCAL (0xBF) +#define CA8210_SFR_PTHRH (0xD1) +#define CA8210_SFR_PRECFG (0xD3) +#define CA8210_SFR_LNAGX40 (0xE1) +#define CA8210_SFR_LNAGX41 (0xE2) +#define CA8210_SFR_LNAGX42 (0xE3) +#define CA8210_SFR_LNAGX43 (0xE4) +#define CA8210_SFR_LNAGX44 (0xE5) +#define CA8210_SFR_LNAGX45 (0xE6) +#define CA8210_SFR_LNAGX46 (0xE7) +#define CA8210_SFR_LNAGX47 (0xE9) + +#define PACFGIB_DEFAULT_CURRENT (0x3F) +#define PTHRH_DEFAULT_THRESHOLD (0x5A) +#define LNAGX40_DEFAULT_GAIN (0x29) /* 10dB */ +#define LNAGX41_DEFAULT_GAIN (0x54) /* 21dB */ +#define LNAGX42_DEFAULT_GAIN (0x6C) /* 27dB */ +#define LNAGX43_DEFAULT_GAIN (0x7A) /* 30dB */ +#define LNAGX44_DEFAULT_GAIN (0x84) /* 33dB */ +#define LNAGX45_DEFAULT_GAIN (0x8B) /* 34dB */ +#define LNAGX46_DEFAULT_GAIN (0x92) /* 36dB */ +#define LNAGX47_DEFAULT_GAIN (0x96) /* 37dB */ + +#define CA8210_IOCTL_HARD_RESET (0x00) + +/* Structs/Enums */ + +/** + * struct cas_control - spi transfer structure + * @msg: spi_message for each exchange + * @transfer: spi_transfer for each exchange + * @tx_buf: source array for transmission + * @tx_in_buf: array storing bytes received during transmission + * @priv: pointer to private data + * + * This structure stores all the necessary data passed around during a single + * spi exchange. + */ +struct cas_control { + struct spi_message msg; + struct spi_transfer transfer; + + u8 tx_buf[CA8210_SPI_BUF_SIZE]; + u8 tx_in_buf[CA8210_SPI_BUF_SIZE]; + + struct ca8210_priv *priv; +}; + +/** + * struct ca8210_test - ca8210 test interface structure + * @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device + * @up_fifo: fifo for upstream messages + * + * This structure stores all the data pertaining to the debug interface + */ +struct ca8210_test { + struct dentry *ca8210_dfs_spi_int; + struct kfifo up_fifo; + wait_queue_head_t readq; +}; + +/** + * struct ca8210_priv - ca8210 private data structure + * @spi: pointer to the ca8210 spi device object + * @hw: pointer to the ca8210 ieee802154_hw object + * @hw_registered: true if hw has been registered with ieee802154 + * @lock: spinlock protecting the private data area + * @mlme_workqueue: workqueue for triggering MLME Reset + * @irq_workqueue: workqueue for irq processing + * @tx_skb: current socket buffer to transmit + * @nextmsduhandle: msdu handle to pass to the 15.4 MAC layer for the + * next transmission + * @clk: external clock provided by the ca8210 + * @last_dsn: sequence number of last data packet received, for + * resend detection + * @test: test interface data section for this instance + * @async_tx_pending: true if an asynchronous transmission was started and + * is not complete + * @sync_command_response: pointer to buffer to fill with sync response + * @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms + * @sync_down: counts number of downstream synchronous commands + * @sync_up: counts number of upstream synchronous commands + * @spi_transfer_complete completion object for a single spi_transfer + * @sync_exchange_complete completion object for a complete synchronous API + * exchange + * @promiscuous whether the ca8210 is in promiscuous mode or not + * @retries: records how many times the current pending spi + * transfer has been retried + */ +struct ca8210_priv { + struct spi_device *spi; + struct ieee802154_hw *hw; + bool hw_registered; + spinlock_t lock; + struct workqueue_struct *mlme_workqueue; + struct workqueue_struct *irq_workqueue; + struct sk_buff *tx_skb; + u8 nextmsduhandle; + struct clk *clk; + int last_dsn; + struct ca8210_test test; + bool async_tx_pending; + u8 *sync_command_response; + struct completion ca8210_is_awake; + int sync_down, sync_up; + struct completion spi_transfer_complete, sync_exchange_complete; + bool promiscuous; + int retries; +}; + +/** + * struct work_priv_container - link between a work object and the relevant + * device's private data + * @work: work object being executed + * @priv: device's private data section + * + */ +struct work_priv_container { + struct work_struct work; + struct ca8210_priv *priv; +}; + +/** + * struct ca8210_platform_data - ca8210 platform data structure + * @extclockenable: true if the external clock is to be enabled + * @extclockfreq: frequency of the external clock + * @extclockgpio: ca8210 output gpio of the external clock + * @gpio_reset: gpio number of ca8210 reset line + * @gpio_irq: gpio number of ca8210 interrupt line + * @irq_id: identifier for the ca8210 irq + * + */ +struct ca8210_platform_data { + bool extclockenable; + unsigned int extclockfreq; + unsigned int extclockgpio; + int gpio_reset; + int gpio_irq; + int irq_id; +}; + +/** + * struct fulladdr - full MAC addressing information structure + * @mode: address mode (none, short, extended) + * @pan_id: 16-bit LE pan id + * @address: LE address, variable length as specified by mode + * + */ +struct fulladdr { + u8 mode; + u8 pan_id[2]; + u8 address[8]; +}; + +/** + * union macaddr: generic MAC address container + * @short_addr: 16-bit short address + * @ieee_address: 64-bit extended address as LE byte array + * + */ +union macaddr { + u16 short_address; + u8 ieee_address[8]; +}; + +/** + * struct secspec: security specification for SAP commands + * @security_level: 0-7, controls level of authentication & encryption + * @key_id_mode: 0-3, specifies how to obtain key + * @key_source: extended key retrieval data + * @key_index: single-byte key identifier + * + */ +struct secspec { + u8 security_level; + u8 key_id_mode; + u8 key_source[8]; + u8 key_index; +}; + +/* downlink functions parameter set definitions */ +struct mcps_data_request_pset { + u8 src_addr_mode; + struct fulladdr dst; + u8 msdu_length; + u8 msdu_handle; + u8 tx_options; + u8 msdu[MAX_DATA_SIZE]; +}; + +struct mlme_set_request_pset { + u8 pib_attribute; + u8 pib_attribute_index; + u8 pib_attribute_length; + u8 pib_attribute_value[MAX_ATTRIBUTE_SIZE]; +}; + +struct hwme_set_request_pset { + u8 hw_attribute; + u8 hw_attribute_length; + u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; +}; + +struct hwme_get_request_pset { + u8 hw_attribute; +}; + +struct tdme_setsfr_request_pset { + u8 sfr_page; + u8 sfr_address; + u8 sfr_value; +}; + +/* uplink functions parameter set definitions */ +struct hwme_set_confirm_pset { + u8 status; + u8 hw_attribute; +}; + +struct hwme_get_confirm_pset { + u8 status; + u8 hw_attribute; + u8 hw_attribute_length; + u8 hw_attribute_value[MAX_HWME_ATTRIBUTE_SIZE]; +}; + +struct tdme_setsfr_confirm_pset { + u8 status; + u8 sfr_page; + u8 sfr_address; +}; + +struct mac_message { + u8 command_id; + u8 length; + union { + struct mcps_data_request_pset data_req; + struct mlme_set_request_pset set_req; + struct hwme_set_request_pset hwme_set_req; + struct hwme_get_request_pset hwme_get_req; + struct tdme_setsfr_request_pset tdme_set_sfr_req; + struct hwme_set_confirm_pset hwme_set_cnf; + struct hwme_get_confirm_pset hwme_get_cnf; + struct tdme_setsfr_confirm_pset tdme_set_sfr_cnf; + u8 u8param; + u8 status; + u8 payload[148]; + } pdata; +}; + +union pa_cfg_sfr { + struct { + u8 bias_current_trim : 3; + u8 /* reserved */ : 1; + u8 buffer_capacitor_trim : 3; + u8 boost : 1; + }; + u8 paib; +}; + +struct preamble_cfg_sfr { + u8 timeout_symbols : 3; + u8 acquisition_symbols : 3; + u8 search_symbols : 2; +}; + +static int (*cascoda_api_upstream)( + const u8 *buf, + size_t len, + void *device_ref +); + +/** + * link_to_linux_err() - Translates an 802.15.4 return code into the closest + * linux error + * @link_status: 802.15.4 status code + * + * Return: 0 or Linux error code + */ +static int link_to_linux_err(int link_status) +{ + if (link_status < 0) { + /* status is already a Linux code */ + return link_status; + } + switch (link_status) { + case MAC_SUCCESS: + case MAC_REALIGNMENT: + return 0; + case MAC_IMPROPER_KEY_TYPE: + return -EKEYREJECTED; + case MAC_IMPROPER_SECURITY_LEVEL: + case MAC_UNSUPPORTED_LEGACY: + case MAC_DENIED: + return -EACCES; + case MAC_BEACON_LOST: + case MAC_NO_ACK: + case MAC_NO_BEACON: + return -ENETUNREACH; + case MAC_CHANNEL_ACCESS_FAILURE: + case MAC_TX_ACTIVE: + case MAC_SCAN_IN_PROGRESS: + return -EBUSY; + case MAC_DISABLE_TRX_FAILURE: + case MAC_OUT_OF_CAP: + return -EAGAIN; + case MAC_FRAME_TOO_LONG: + return -EMSGSIZE; + case MAC_INVALID_GTS: + case MAC_PAST_TIME: + return -EBADSLT; + case MAC_INVALID_HANDLE: + return -EBADMSG; + case MAC_INVALID_PARAMETER: + case MAC_UNSUPPORTED_ATTRIBUTE: + case MAC_ON_TIME_TOO_LONG: + case MAC_INVALID_INDEX: + return -EINVAL; + case MAC_NO_DATA: + return -ENODATA; + case MAC_NO_SHORT_ADDRESS: + return -EFAULT; + case MAC_PAN_ID_CONFLICT: + return -EADDRINUSE; + case MAC_TRANSACTION_EXPIRED: + return -ETIME; + case MAC_TRANSACTION_OVERFLOW: + return -ENOBUFS; + case MAC_UNAVAILABLE_KEY: + return -ENOKEY; + case MAC_INVALID_ADDRESS: + return -ENXIO; + case MAC_TRACKING_OFF: + case MAC_SUPERFRAME_OVERLAP: + return -EREMOTEIO; + case MAC_LIMIT_REACHED: + return -EDQUOT; + case MAC_READ_ONLY: + return -EROFS; + default: + return -EPROTO; + } +} + +/** + * ca8210_test_int_driver_write() - Writes a message to the test interface to be + * read by the userspace + * @buf: Buffer containing upstream message + * @len: length of message to write + * @spi: SPI device of message originator + * + * Return: 0 or linux error code + */ +static int ca8210_test_int_driver_write( + const u8 *buf, + size_t len, + void *spi +) +{ + struct ca8210_priv *priv = spi_get_drvdata(spi); + struct ca8210_test *test = &priv->test; + char *fifo_buffer; + int i; + + dev_dbg( + &priv->spi->dev, + "test_interface: Buffering upstream message:\n" + ); + for (i = 0; i < len; i++) + dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); + + fifo_buffer = kmalloc(len, GFP_KERNEL); + if (!fifo_buffer) + return -ENOMEM; + memcpy(fifo_buffer, buf, len); + kfifo_in(&test->up_fifo, &fifo_buffer, 4); + wake_up_interruptible(&priv->test.readq); + + return 0; +} + +/* SPI Operation */ + +static int ca8210_net_rx( + struct ieee802154_hw *hw, + u8 *command, + size_t len +); +static u8 mlme_reset_request_sync( + u8 set_default_pib, + void *device_ref +); +static int ca8210_spi_transfer( + struct spi_device *spi, + const u8 *buf, + size_t len +); + +/** + * ca8210_reset_send() - Hard resets the ca8210 for a given time + * @spi: Pointer to target ca8210 spi device + * @ms: Milliseconds to hold the reset line low for + */ +static void ca8210_reset_send(struct spi_device *spi, unsigned int ms) +{ + struct ca8210_platform_data *pdata = spi->dev.platform_data; + struct ca8210_priv *priv = spi_get_drvdata(spi); + long status; + + gpio_set_value(pdata->gpio_reset, 0); + reinit_completion(&priv->ca8210_is_awake); + msleep(ms); + gpio_set_value(pdata->gpio_reset, 1); + priv->promiscuous = false; + + /* Wait until wakeup indication seen */ + status = wait_for_completion_interruptible_timeout( + &priv->ca8210_is_awake, + msecs_to_jiffies(CA8210_SYNC_TIMEOUT) + ); + if (status == 0) { + dev_crit( + &spi->dev, + "Fatal: No wakeup from ca8210 after reset!\n" + ); + } + + dev_dbg(&spi->dev, "Reset the device\n"); +} + +/** + * ca8210_mlme_reset_worker() - Resets the MLME, Called when the MAC OVERFLOW + * condition happens. + * @work: Pointer to work being executed + */ +static void ca8210_mlme_reset_worker(struct work_struct *work) +{ + struct work_priv_container *wpc = container_of( + work, + struct work_priv_container, + work + ); + struct ca8210_priv *priv = wpc->priv; + + mlme_reset_request_sync(0, priv->spi); + kfree(wpc); +} + +/** + * ca8210_rx_done() - Calls various message dispatches responding to a received + * command + * @arg: Pointer to the cas_control object for the relevant spi transfer + * + * Presents a received SAP command from the ca8210 to the Cascoda EVBME, test + * interface and network driver. + */ +static void ca8210_rx_done(struct cas_control *cas_ctl) +{ + u8 *buf; + u8 len; + struct work_priv_container *mlme_reset_wpc; + struct ca8210_priv *priv = cas_ctl->priv; + + buf = cas_ctl->tx_in_buf; + len = buf[1] + 2; + if (len > CA8210_SPI_BUF_SIZE) { + dev_crit( + &priv->spi->dev, + "Received packet len (%d) erroneously long\n", + len + ); + goto finish; + } + + if (buf[0] & SPI_SYN) { + if (priv->sync_command_response) { + memcpy(priv->sync_command_response, buf, len); + complete(&priv->sync_exchange_complete); + } else { + if (cascoda_api_upstream) + cascoda_api_upstream(buf, len, priv->spi); + priv->sync_up++; + } + } else { + if (cascoda_api_upstream) + cascoda_api_upstream(buf, len, priv->spi); + } + + ca8210_net_rx(priv->hw, buf, len); + if (buf[0] == SPI_MCPS_DATA_CONFIRM) { + if (buf[3] == MAC_TRANSACTION_OVERFLOW) { + dev_info( + &priv->spi->dev, + "Waiting for transaction overflow to stabilise...\n"); + msleep(2000); + dev_info( + &priv->spi->dev, + "Resetting MAC...\n"); + + mlme_reset_wpc = kmalloc(sizeof(*mlme_reset_wpc), + GFP_KERNEL); + if (!mlme_reset_wpc) + goto finish; + INIT_WORK( + &mlme_reset_wpc->work, + ca8210_mlme_reset_worker + ); + mlme_reset_wpc->priv = priv; + queue_work(priv->mlme_workqueue, &mlme_reset_wpc->work); + } + } else if (buf[0] == SPI_HWME_WAKEUP_INDICATION) { + dev_notice( + &priv->spi->dev, + "Wakeup indication received, reason:\n" + ); + switch (buf[2]) { + case 0: + dev_notice( + &priv->spi->dev, + "Transceiver woken up from Power Up / System Reset\n" + ); + break; + case 1: + dev_notice( + &priv->spi->dev, + "Watchdog Timer Time-Out\n" + ); + break; + case 2: + dev_notice( + &priv->spi->dev, + "Transceiver woken up from Power-Off by Sleep Timer Time-Out\n"); + break; + case 3: + dev_notice( + &priv->spi->dev, + "Transceiver woken up from Power-Off by GPIO Activity\n" + ); + break; + case 4: + dev_notice( + &priv->spi->dev, + "Transceiver woken up from Standby by Sleep Timer Time-Out\n" + ); + break; + case 5: + dev_notice( + &priv->spi->dev, + "Transceiver woken up from Standby by GPIO Activity\n" + ); + break; + case 6: + dev_notice( + &priv->spi->dev, + "Sleep-Timer Time-Out in Active Mode\n" + ); + break; + default: + dev_warn(&priv->spi->dev, "Wakeup reason unknown\n"); + break; + } + complete(&priv->ca8210_is_awake); + } + +finish:; +} + +static int ca8210_remove(struct spi_device *spi_device); + +/** + * ca8210_spi_transfer_complete() - Called when a single spi transfer has + * completed + * @context: Pointer to the cas_control object for the finished transfer + */ +static void ca8210_spi_transfer_complete(void *context) +{ + struct cas_control *cas_ctl = context; + struct ca8210_priv *priv = cas_ctl->priv; + bool duplex_rx = false; + int i; + u8 retry_buffer[CA8210_SPI_BUF_SIZE]; + + if ( + cas_ctl->tx_in_buf[0] == SPI_NACK || + (cas_ctl->tx_in_buf[0] == SPI_IDLE && + cas_ctl->tx_in_buf[1] == SPI_NACK) + ) { + /* ca8210 is busy */ + dev_info(&priv->spi->dev, "ca8210 was busy during attempted write\n"); + if (cas_ctl->tx_buf[0] == SPI_IDLE) { + dev_warn( + &priv->spi->dev, + "IRQ servicing NACKd, dropping transfer\n" + ); + kfree(cas_ctl); + return; + } + if (priv->retries > 3) { + dev_err(&priv->spi->dev, "too many retries!\n"); + kfree(cas_ctl); + ca8210_remove(priv->spi); + return; + } + memcpy(retry_buffer, cas_ctl->tx_buf, CA8210_SPI_BUF_SIZE); + kfree(cas_ctl); + ca8210_spi_transfer( + priv->spi, + retry_buffer, + CA8210_SPI_BUF_SIZE + ); + priv->retries++; + dev_info(&priv->spi->dev, "retried spi write\n"); + return; + } else if ( + cas_ctl->tx_in_buf[0] != SPI_IDLE && + cas_ctl->tx_in_buf[0] != SPI_NACK + ) { + duplex_rx = true; + } + + if (duplex_rx) { + dev_dbg(&priv->spi->dev, "READ CMD DURING TX\n"); + for (i = 0; i < cas_ctl->tx_in_buf[1] + 2; i++) + dev_dbg( + &priv->spi->dev, + "%#03x\n", + cas_ctl->tx_in_buf[i] + ); + ca8210_rx_done(cas_ctl); + } + complete(&priv->spi_transfer_complete); + kfree(cas_ctl); + priv->retries = 0; +} + +/** + * ca8210_spi_transfer() - Initiate duplex spi transfer with ca8210 + * @spi: Pointer to spi device for transfer + * @buf: Octet array to send + * @len: length of the buffer being sent + * + * Return: 0 or linux error code + */ +static int ca8210_spi_transfer( + struct spi_device *spi, + const u8 *buf, + size_t len +) +{ + int i, status = 0; + struct ca8210_priv *priv = spi_get_drvdata(spi); + struct cas_control *cas_ctl; + + if (!spi) { + dev_crit( + &spi->dev, + "NULL spi device passed to ca8210_spi_transfer\n" + ); + return -ENODEV; + } + + reinit_completion(&priv->spi_transfer_complete); + + dev_dbg(&spi->dev, "ca8210_spi_transfer called\n"); + + cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC); + if (!cas_ctl) + return -ENOMEM; + + cas_ctl->priv = priv; + memset(cas_ctl->tx_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); + memset(cas_ctl->tx_in_buf, SPI_IDLE, CA8210_SPI_BUF_SIZE); + memcpy(cas_ctl->tx_buf, buf, len); + + for (i = 0; i < len; i++) + dev_dbg(&spi->dev, "%#03x\n", cas_ctl->tx_buf[i]); + + spi_message_init(&cas_ctl->msg); + + cas_ctl->transfer.tx_nbits = 1; /* 1 MOSI line */ + cas_ctl->transfer.rx_nbits = 1; /* 1 MISO line */ + cas_ctl->transfer.speed_hz = 0; /* Use device setting */ + cas_ctl->transfer.bits_per_word = 0; /* Use device setting */ + cas_ctl->transfer.tx_buf = cas_ctl->tx_buf; + cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf; + cas_ctl->transfer.delay_usecs = 0; + cas_ctl->transfer.cs_change = 0; + cas_ctl->transfer.len = sizeof(struct mac_message); + cas_ctl->msg.complete = ca8210_spi_transfer_complete; + cas_ctl->msg.context = cas_ctl; + + spi_message_add_tail( + &cas_ctl->transfer, + &cas_ctl->msg + ); + + status = spi_async(spi, &cas_ctl->msg); + if (status < 0) { + dev_crit( + &spi->dev, + "status %d from spi_sync in write\n", + status + ); + } + + return status; +} + +/** + * ca8210_spi_exchange() - Exchange API/SAP commands with the radio + * @buf: Octet array of command being sent downstream + * @len: length of buf + * @response: buffer for storing synchronous response + * @device_ref: spi_device pointer for ca8210 + * + * Effectively calls ca8210_spi_transfer to write buf[] to the spi, then for + * synchronous commands waits for the corresponding response to be read from + * the spi before returning. The response is written to the response parameter. + * + * Return: 0 or linux error code + */ +static int ca8210_spi_exchange( + const u8 *buf, + size_t len, + u8 *response, + void *device_ref +) +{ + int status = 0; + struct spi_device *spi = device_ref; + struct ca8210_priv *priv = spi->dev.driver_data; + long wait_remaining; + + if ((buf[0] & SPI_SYN) && response) { /* if sync wait for confirm */ + reinit_completion(&priv->sync_exchange_complete); + priv->sync_command_response = response; + } + + do { + reinit_completion(&priv->spi_transfer_complete); + status = ca8210_spi_transfer(priv->spi, buf, len); + if (status) { + dev_warn( + &spi->dev, + "spi write failed, returned %d\n", + status + ); + if (status == -EBUSY) + continue; + if (((buf[0] & SPI_SYN) && response)) + complete(&priv->sync_exchange_complete); + goto cleanup; + } + + wait_remaining = wait_for_completion_interruptible_timeout( + &priv->spi_transfer_complete, + msecs_to_jiffies(1000) + ); + if (wait_remaining == -ERESTARTSYS) { + status = -ERESTARTSYS; + } else if (wait_remaining == 0) { + dev_err( + &spi->dev, + "SPI downstream transfer timed out!\n" + ); + status = -ETIME; + goto cleanup; + } + } while (status < 0); + + if (!((buf[0] & SPI_SYN) && response)) + goto cleanup; + + wait_remaining = wait_for_completion_interruptible_timeout( + &priv->sync_exchange_complete, + msecs_to_jiffies(CA8210_SYNC_TIMEOUT) + ); + if (wait_remaining == -ERESTARTSYS) { + status = -ERESTARTSYS; + } else if (wait_remaining == 0) { + dev_err( + &spi->dev, + "Synchronous confirm timeout\n" + ); + status = -ETIME; + } + +cleanup: + priv->sync_command_response = NULL; + return status; +} + +/** + * ca8210_interrupt_handler() - Called when an irq is received from the ca8210 + * @irq: Id of the irq being handled + * @dev_id: Pointer passed by the system, pointing to the ca8210's private data + * + * This function is called when the irq line from the ca8210 is asserted, + * signifying that the ca8210 has a message to send upstream to us. Starts the + * asynchronous spi read. + * + * Return: irq return code + */ +static irqreturn_t ca8210_interrupt_handler(int irq, void *dev_id) +{ + struct ca8210_priv *priv = dev_id; + int status; + + dev_dbg(&priv->spi->dev, "irq: Interrupt occurred\n"); + do { + status = ca8210_spi_transfer(priv->spi, NULL, 0); + if (status && (status != -EBUSY)) { + dev_warn( + &priv->spi->dev, + "spi read failed, returned %d\n", + status + ); + } + } while (status == -EBUSY); + return IRQ_HANDLED; +} + +static int (*cascoda_api_downstream)( + const u8 *buf, + size_t len, + u8 *response, + void *device_ref +) = ca8210_spi_exchange; + +/* Cascoda API / 15.4 SAP Primitives */ + +/** + * tdme_setsfr_request_sync() - TDME_SETSFR_request/confirm according to API + * @sfr_page: SFR Page + * @sfr_address: SFR Address + * @sfr_value: SFR Value + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of TDME-SETSFR.confirm + */ +static u8 tdme_setsfr_request_sync( + u8 sfr_page, + u8 sfr_address, + u8 sfr_value, + void *device_ref +) +{ + int ret; + struct mac_message command, response; + struct spi_device *spi = device_ref; + + command.command_id = SPI_TDME_SETSFR_REQUEST; + command.length = 3; + command.pdata.tdme_set_sfr_req.sfr_page = sfr_page; + command.pdata.tdme_set_sfr_req.sfr_address = sfr_address; + command.pdata.tdme_set_sfr_req.sfr_value = sfr_value; + response.command_id = SPI_IDLE; + ret = cascoda_api_downstream( + &command.command_id, + command.length + 2, + &response.command_id, + device_ref + ); + if (ret) { + dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret); + return MAC_SYSTEM_ERROR; + } + + if (response.command_id != SPI_TDME_SETSFR_CONFIRM) { + dev_crit( + &spi->dev, + "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n", + response.command_id + ); + return MAC_SYSTEM_ERROR; + } + + return response.pdata.tdme_set_sfr_cnf.status; +} + +/** + * tdme_chipinit() - TDME Chip Register Default Initialisation Macro + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of API calls + */ +static u8 tdme_chipinit(void *device_ref) +{ + u8 status = MAC_SUCCESS; + u8 sfr_address; + struct spi_device *spi = device_ref; + struct preamble_cfg_sfr pre_cfg_value = { + .timeout_symbols = 3, + .acquisition_symbols = 3, + .search_symbols = 1, + }; + /* LNA Gain Settings */ + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX40), + LNAGX40_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX41), + LNAGX41_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX42), + LNAGX42_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX43), + LNAGX43_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX44), + LNAGX44_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX45), + LNAGX45_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX46), + LNAGX46_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_LNAGX47), + LNAGX47_DEFAULT_GAIN, device_ref); + if (status) + goto finish; + /* Preamble Timing Config */ + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_PRECFG), + *((u8 *)&pre_cfg_value), device_ref); + if (status) + goto finish; + /* Preamble Threshold High */ + status = tdme_setsfr_request_sync( + 1, (sfr_address = CA8210_SFR_PTHRH), + PTHRH_DEFAULT_THRESHOLD, device_ref); + if (status) + goto finish; + /* Tx Output Power 8 dBm */ + status = tdme_setsfr_request_sync( + 0, (sfr_address = CA8210_SFR_PACFGIB), + PACFGIB_DEFAULT_CURRENT, device_ref); + if (status) + goto finish; + +finish: + if (status != MAC_SUCCESS) { + dev_err( + &spi->dev, + "failed to set sfr at %#03x, status = %#03x\n", + sfr_address, + status + ); + } + return status; +} + +/** + * tdme_channelinit() - TDME Channel Register Default Initialisation Macro (Tx) + * @channel: 802.15.4 channel to initialise chip for + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of API calls + */ +static u8 tdme_channelinit(u8 channel, void *device_ref) +{ + /* Transceiver front-end local oscillator tx two-point calibration + * value. Tuned for the hardware. + */ + u8 txcalval; + + if (channel >= 25) + txcalval = 0xA7; + else if (channel >= 23) + txcalval = 0xA8; + else if (channel >= 22) + txcalval = 0xA9; + else if (channel >= 20) + txcalval = 0xAA; + else if (channel >= 17) + txcalval = 0xAB; + else if (channel >= 16) + txcalval = 0xAC; + else if (channel >= 14) + txcalval = 0xAD; + else if (channel >= 12) + txcalval = 0xAE; + else + txcalval = 0xAF; + + return tdme_setsfr_request_sync( + 1, + CA8210_SFR_LOTXCAL, + txcalval, + device_ref + ); /* LO Tx Cal */ +} + +/** + * tdme_checkpibattribute() - Checks Attribute Values that are not checked in + * MAC + * @pib_attribute: Attribute Number + * @pib_attribute_length: Attribute length + * @pib_attribute_value: Pointer to Attribute Value + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of checks + */ +static u8 tdme_checkpibattribute( + u8 pib_attribute, + u8 pib_attribute_length, + const void *pib_attribute_value +) +{ + u8 status = MAC_SUCCESS; + u8 value; + + value = *((u8 *)pib_attribute_value); + + switch (pib_attribute) { + /* PHY */ + case PHY_TRANSMIT_POWER: + if (value > 0x3F) + status = MAC_INVALID_PARAMETER; + break; + case PHY_CCA_MODE: + if (value > 0x03) + status = MAC_INVALID_PARAMETER; + break; + /* MAC */ + case MAC_BATT_LIFE_EXT_PERIODS: + if ((value < 6) || (value > 41)) + status = MAC_INVALID_PARAMETER; + break; + case MAC_BEACON_PAYLOAD: + if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH) + status = MAC_INVALID_PARAMETER; + break; + case MAC_BEACON_PAYLOAD_LENGTH: + if (value > MAX_BEACON_PAYLOAD_LENGTH) + status = MAC_INVALID_PARAMETER; + break; + case MAC_BEACON_ORDER: + if (value > 15) + status = MAC_INVALID_PARAMETER; + break; + case MAC_MAX_BE: + if ((value < 3) || (value > 8)) + status = MAC_INVALID_PARAMETER; + break; + case MAC_MAX_CSMA_BACKOFFS: + if (value > 5) + status = MAC_INVALID_PARAMETER; + break; + case MAC_MAX_FRAME_RETRIES: + if (value > 7) + status = MAC_INVALID_PARAMETER; + break; + case MAC_MIN_BE: + if (value > 8) + status = MAC_INVALID_PARAMETER; + break; + case MAC_RESPONSE_WAIT_TIME: + if ((value < 2) || (value > 64)) + status = MAC_INVALID_PARAMETER; + break; + case MAC_SUPERFRAME_ORDER: + if (value > 15) + status = MAC_INVALID_PARAMETER; + break; + /* boolean */ + case MAC_ASSOCIATED_PAN_COORD: + case MAC_ASSOCIATION_PERMIT: + case MAC_AUTO_REQUEST: + case MAC_BATT_LIFE_EXT: + case MAC_GTS_PERMIT: + case MAC_PROMISCUOUS_MODE: + case MAC_RX_ON_WHEN_IDLE: + case MAC_SECURITY_ENABLED: + if (value > 1) + status = MAC_INVALID_PARAMETER; + break; + /* MAC SEC */ + case MAC_AUTO_REQUEST_SECURITY_LEVEL: + if (value > 7) + status = MAC_INVALID_PARAMETER; + break; + case MAC_AUTO_REQUEST_KEY_ID_MODE: + if (value > 3) + status = MAC_INVALID_PARAMETER; + break; + default: + break; + } + + return status; +} + +/** + * tdme_settxpower() - Sets the tx power for MLME_SET phyTransmitPower + * @txp: Transmit Power + * @device_ref: Nondescript pointer to target device + * + * Normalised to 802.15.4 Definition (6-bit, signed): + * Bit 7-6: not used + * Bit 5-0: tx power (-32 - +31 dB) + * + * Return: 802.15.4 status code of api calls + */ +static u8 tdme_settxpower(u8 txp, void *device_ref) +{ + u8 status; + s8 txp_val; + u8 txp_ext; + union pa_cfg_sfr pa_cfg_val; + + /* extend from 6 to 8 bit */ + txp_ext = 0x3F & txp; + if (txp_ext & 0x20) + txp_ext += 0xC0; + txp_val = (s8)txp_ext; + + if (CA8210_MAC_MPW) { + if (txp_val > 0) { + /* 8 dBm: ptrim = 5, itrim = +3 => +4 dBm */ + pa_cfg_val.bias_current_trim = 3; + pa_cfg_val.buffer_capacitor_trim = 5; + pa_cfg_val.boost = 1; + } else { + /* 0 dBm: ptrim = 7, itrim = +3 => -6 dBm */ + pa_cfg_val.bias_current_trim = 3; + pa_cfg_val.buffer_capacitor_trim = 7; + pa_cfg_val.boost = 0; + } + /* write PACFG */ + status = tdme_setsfr_request_sync( + 0, + CA8210_SFR_PACFG, + pa_cfg_val.paib, + device_ref + ); + } else { + /* Look-Up Table for Setting Current and Frequency Trim values + * for desired Output Power + */ + if (txp_val > 8) { + pa_cfg_val.paib = 0x3F; + } else if (txp_val == 8) { + pa_cfg_val.paib = 0x32; + } else if (txp_val == 7) { + pa_cfg_val.paib = 0x22; + } else if (txp_val == 6) { + pa_cfg_val.paib = 0x18; + } else if (txp_val == 5) { + pa_cfg_val.paib = 0x10; + } else if (txp_val == 4) { + pa_cfg_val.paib = 0x0C; + } else if (txp_val == 3) { + pa_cfg_val.paib = 0x08; + } else if (txp_val == 2) { + pa_cfg_val.paib = 0x05; + } else if (txp_val == 1) { + pa_cfg_val.paib = 0x03; + } else if (txp_val == 0) { + pa_cfg_val.paib = 0x01; + } else { /* < 0 */ + pa_cfg_val.paib = 0x00; + } + /* write PACFGIB */ + status = tdme_setsfr_request_sync( + 0, + CA8210_SFR_PACFGIB, + pa_cfg_val.paib, + device_ref + ); + } + + return status; +} + +/** + * mcps_data_request() - mcps_data_request (Send Data) according to API Spec + * @src_addr_mode: Source Addressing Mode + * @dst_address_mode: Destination Addressing Mode + * @dst_pan_id: Destination PAN ID + * @dst_addr: Pointer to Destination Address + * @msdu_length: length of Data + * @msdu: Pointer to Data + * @msdu_handle: Handle of Data + * @tx_options: Tx Options Bit Field + * @security: Pointer to Security Structure or NULL + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of action + */ +static u8 mcps_data_request( + u8 src_addr_mode, + u8 dst_address_mode, + u16 dst_pan_id, + union macaddr *dst_addr, + u8 msdu_length, + u8 *msdu, + u8 msdu_handle, + u8 tx_options, + struct secspec *security, + void *device_ref +) +{ + struct secspec *psec; + struct mac_message command; + + command.command_id = SPI_MCPS_DATA_REQUEST; + command.pdata.data_req.src_addr_mode = src_addr_mode; + command.pdata.data_req.dst.mode = dst_address_mode; + if (dst_address_mode != MAC_MODE_NO_ADDR) { + command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); + command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); + if (dst_address_mode == MAC_MODE_SHORT_ADDR) { + command.pdata.data_req.dst.address[0] = LS_BYTE( + dst_addr->short_address + ); + command.pdata.data_req.dst.address[1] = MS_BYTE( + dst_addr->short_address + ); + } else { /* MAC_MODE_LONG_ADDR*/ + memcpy( + command.pdata.data_req.dst.address, + dst_addr->ieee_address, + 8 + ); + } + } + command.pdata.data_req.msdu_length = msdu_length; + command.pdata.data_req.msdu_handle = msdu_handle; + command.pdata.data_req.tx_options = tx_options; + memcpy(command.pdata.data_req.msdu, msdu, msdu_length); + psec = (struct secspec *)(command.pdata.data_req.msdu + msdu_length); + command.length = sizeof(struct mcps_data_request_pset) - + MAX_DATA_SIZE + msdu_length; + if (!security || (security->security_level == 0)) { + psec->security_level = 0; + command.length += 1; + } else { + *psec = *security; + command.length += sizeof(struct secspec); + } + + if (ca8210_spi_transfer(device_ref, &command.command_id, + command.length + 2)) + return MAC_SYSTEM_ERROR; + + return MAC_SUCCESS; +} + +/** + * mlme_reset_request_sync() - MLME_RESET_request/confirm according to API Spec + * @set_default_pib: Set defaults in PIB + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of MLME-RESET.confirm + */ +static u8 mlme_reset_request_sync( + u8 set_default_pib, + void *device_ref +) +{ + u8 status; + struct mac_message command, response; + struct spi_device *spi = device_ref; + + command.command_id = SPI_MLME_RESET_REQUEST; + command.length = 1; + command.pdata.u8param = set_default_pib; + + if (cascoda_api_downstream( + &command.command_id, + command.length + 2, + &response.command_id, + device_ref)) { + dev_err(&spi->dev, "cascoda_api_downstream failed\n"); + return MAC_SYSTEM_ERROR; + } + + if (response.command_id != SPI_MLME_RESET_CONFIRM) + return MAC_SYSTEM_ERROR; + + status = response.pdata.status; + + /* reset COORD Bit for Channel Filtering as Coordinator */ + if (CA8210_MAC_WORKAROUNDS && set_default_pib && (!status)) { + status = tdme_setsfr_request_sync( + 0, + CA8210_SFR_MACCON, + 0, + device_ref + ); + } + + return status; +} + +/** + * mlme_set_request_sync() - MLME_SET_request/confirm according to API Spec + * @pib_attribute: Attribute Number + * @pib_attribute_index: Index within Attribute if an Array + * @pib_attribute_length: Attribute length + * @pib_attribute_value: Pointer to Attribute Value + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of MLME-SET.confirm + */ +static u8 mlme_set_request_sync( + u8 pib_attribute, + u8 pib_attribute_index, + u8 pib_attribute_length, + const void *pib_attribute_value, + void *device_ref +) +{ + u8 status; + struct mac_message command, response; + + /* pre-check the validity of pib_attribute values that are not checked + * in MAC + */ + if (tdme_checkpibattribute( + pib_attribute, pib_attribute_length, pib_attribute_value)) { + return MAC_INVALID_PARAMETER; + } + + if (pib_attribute == PHY_CURRENT_CHANNEL) { + status = tdme_channelinit( + *((u8 *)pib_attribute_value), + device_ref + ); + if (status) + return status; + } + + if (pib_attribute == PHY_TRANSMIT_POWER) { + return tdme_settxpower( + *((u8 *)pib_attribute_value), + device_ref + ); + } + + command.command_id = SPI_MLME_SET_REQUEST; + command.length = sizeof(struct mlme_set_request_pset) - + MAX_ATTRIBUTE_SIZE + pib_attribute_length; + command.pdata.set_req.pib_attribute = pib_attribute; + command.pdata.set_req.pib_attribute_index = pib_attribute_index; + command.pdata.set_req.pib_attribute_length = pib_attribute_length; + memcpy( + command.pdata.set_req.pib_attribute_value, + pib_attribute_value, + pib_attribute_length + ); + + if (cascoda_api_downstream( + &command.command_id, + command.length + 2, + &response.command_id, + device_ref)) { + return MAC_SYSTEM_ERROR; + } + + if (response.command_id != SPI_MLME_SET_CONFIRM) + return MAC_SYSTEM_ERROR; + + return response.pdata.status; +} + +/** + * hwme_set_request_sync() - HWME_SET_request/confirm according to API Spec + * @hw_attribute: Attribute Number + * @hw_attribute_length: Attribute length + * @hw_attribute_value: Pointer to Attribute Value + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of HWME-SET.confirm + */ +static u8 hwme_set_request_sync( + u8 hw_attribute, + u8 hw_attribute_length, + u8 *hw_attribute_value, + void *device_ref +) +{ + struct mac_message command, response; + + command.command_id = SPI_HWME_SET_REQUEST; + command.length = 2 + hw_attribute_length; + command.pdata.hwme_set_req.hw_attribute = hw_attribute; + command.pdata.hwme_set_req.hw_attribute_length = hw_attribute_length; + memcpy( + command.pdata.hwme_set_req.hw_attribute_value, + hw_attribute_value, + hw_attribute_length + ); + + if (cascoda_api_downstream( + &command.command_id, + command.length + 2, + &response.command_id, + device_ref)) { + return MAC_SYSTEM_ERROR; + } + + if (response.command_id != SPI_HWME_SET_CONFIRM) + return MAC_SYSTEM_ERROR; + + return response.pdata.hwme_set_cnf.status; +} + +/** + * hwme_get_request_sync() - HWME_GET_request/confirm according to API Spec + * @hw_attribute: Attribute Number + * @hw_attribute_length: Attribute length + * @hw_attribute_value: Pointer to Attribute Value + * @device_ref: Nondescript pointer to target device + * + * Return: 802.15.4 status code of HWME-GET.confirm + */ +static u8 hwme_get_request_sync( + u8 hw_attribute, + u8 *hw_attribute_length, + u8 *hw_attribute_value, + void *device_ref +) +{ + struct mac_message command, response; + + command.command_id = SPI_HWME_GET_REQUEST; + command.length = 1; + command.pdata.hwme_get_req.hw_attribute = hw_attribute; + + if (cascoda_api_downstream( + &command.command_id, + command.length + 2, + &response.command_id, + device_ref)) { + return MAC_SYSTEM_ERROR; + } + + if (response.command_id != SPI_HWME_GET_CONFIRM) + return MAC_SYSTEM_ERROR; + + if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) { + *hw_attribute_length = + response.pdata.hwme_get_cnf.hw_attribute_length; + memcpy( + hw_attribute_value, + response.pdata.hwme_get_cnf.hw_attribute_value, + *hw_attribute_length + ); + } + + return response.pdata.hwme_get_cnf.status; +} + +/* Network driver operation */ + +/** + * ca8210_async_xmit_complete() - Called to announce that an asynchronous + * transmission has finished + * @hw: ieee802154_hw of ca8210 that has finished exchange + * @msduhandle: Identifier of transmission that has completed + * @status: Returned 802.15.4 status code of the transmission + * + * Return: 0 or linux error code + */ +static int ca8210_async_xmit_complete( + struct ieee802154_hw *hw, + u8 msduhandle, + u8 status) +{ + struct ca8210_priv *priv = hw->priv; + + if (priv->nextmsduhandle != msduhandle) { + dev_err( + &priv->spi->dev, + "Unexpected msdu_handle on data confirm, Expected %d, got %d\n", + priv->nextmsduhandle, + msduhandle + ); + return -EIO; + } + + priv->async_tx_pending = false; + priv->nextmsduhandle++; + + if (status) { + dev_err( + &priv->spi->dev, + "Link transmission unsuccessful, status = %d\n", + status + ); + if (status != MAC_TRANSACTION_OVERFLOW) { + ieee802154_wake_queue(priv->hw); + return 0; + } + } + ieee802154_xmit_complete(priv->hw, priv->tx_skb, true); + + return 0; +} + +/** + * ca8210_skb_rx() - Contructs a properly framed socket buffer from a received + * MCPS_DATA_indication + * @hw: ieee802154_hw that MCPS_DATA_indication was received by + * @len: length of MCPS_DATA_indication + * @data_ind: Octet array of MCPS_DATA_indication + * + * Called by the spi driver whenever a SAP command is received, this function + * will ascertain whether the command is of interest to the network driver and + * take necessary action. + * + * Return: 0 or linux error code + */ +static int ca8210_skb_rx( + struct ieee802154_hw *hw, + size_t len, + u8 *data_ind +) +{ + struct ieee802154_hdr hdr; + int msdulen; + int hlen; + u8 mpdulinkquality = data_ind[23]; + struct sk_buff *skb; + struct ca8210_priv *priv = hw->priv; + + /* Allocate mtu size buffer for every rx packet */ + skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr)); + if (!skb) { + dev_crit(&priv->spi->dev, "dev_alloc_skb failed\n"); + return -ENOMEM; + } + skb_reserve(skb, sizeof(hdr)); + + msdulen = data_ind[22]; /* msdu_length */ + if (msdulen > IEEE802154_MTU) { + dev_err( + &priv->spi->dev, + "received erroneously large msdu length!\n" + ); + kfree_skb(skb); + return -EMSGSIZE; + } + dev_dbg(&priv->spi->dev, "skb buffer length = %d\n", msdulen); + + if (priv->promiscuous) + goto copy_payload; + + /* Populate hdr */ + hdr.sec.level = data_ind[29 + msdulen]; + dev_dbg(&priv->spi->dev, "security level: %#03x\n", hdr.sec.level); + if (hdr.sec.level > 0) { + hdr.sec.key_id_mode = data_ind[30 + msdulen]; + memcpy(&hdr.sec.extended_src, &data_ind[31 + msdulen], 8); + hdr.sec.key_id = data_ind[39 + msdulen]; + } + hdr.source.mode = data_ind[0]; + dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); + hdr.source.pan_id = *(u16 *)&data_ind[1]; + dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); + memcpy(&hdr.source.extended_addr, &data_ind[3], 8); + hdr.dest.mode = data_ind[11]; + dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); + hdr.dest.pan_id = *(u16 *)&data_ind[12]; + dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); + memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); + + /* Fill in FC implicitly */ + hdr.fc.type = 1; /* Data frame */ + if (hdr.sec.level) + hdr.fc.security_enabled = 1; + else + hdr.fc.security_enabled = 0; + if (data_ind[1] != data_ind[12] || data_ind[2] != data_ind[13]) + hdr.fc.intra_pan = 1; + else + hdr.fc.intra_pan = 0; + hdr.fc.dest_addr_mode = hdr.dest.mode; + hdr.fc.source_addr_mode = hdr.source.mode; + + /* Add hdr to front of buffer */ + hlen = ieee802154_hdr_push(skb, &hdr); + + if (hlen < 0) { + dev_crit(&priv->spi->dev, "failed to push mac hdr onto skb!\n"); + kfree_skb(skb); + return hlen; + } + + skb_reset_mac_header(skb); + skb->mac_len = hlen; + +copy_payload: + /* Add bytes of space to the back of the buffer */ + /* Copy msdu to skb */ + memcpy(skb_put(skb, msdulen), &data_ind[29], msdulen); + + ieee802154_rx_irqsafe(hw, skb, mpdulinkquality); + return 0; +} + +/** + * ca8210_net_rx() - Acts upon received SAP commands relevant to the network + * driver + * @hw: ieee802154_hw that command was received by + * @command: Octet array of received command + * @len: length of the received command + * + * Called by the spi driver whenever a SAP command is received, this function + * will ascertain whether the command is of interest to the network driver and + * take necessary action. + * + * Return: 0 or linux error code + */ +static int ca8210_net_rx(struct ieee802154_hw *hw, u8 *command, size_t len) +{ + struct ca8210_priv *priv = hw->priv; + unsigned long flags; + u8 status; + + dev_dbg(&priv->spi->dev, "ca8210_net_rx(), CmdID = %d\n", command[0]); + + if (command[0] == SPI_MCPS_DATA_INDICATION) { + /* Received data */ + spin_lock_irqsave(&priv->lock, flags); + if (command[26] == priv->last_dsn) { + dev_dbg( + &priv->spi->dev, + "DSN %d resend received, ignoring...\n", + command[26] + ); + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + priv->last_dsn = command[26]; + spin_unlock_irqrestore(&priv->lock, flags); + return ca8210_skb_rx(hw, len - 2, command + 2); + } else if (command[0] == SPI_MCPS_DATA_CONFIRM) { + status = command[3]; + if (priv->async_tx_pending) { + return ca8210_async_xmit_complete( + hw, + command[2], + status + ); + } + } + + return 0; +} + +/** + * ca8210_skb_tx() - Transmits a given socket buffer using the ca8210 + * @skb: Socket buffer to transmit + * @msduhandle: Data identifier to pass to the 802.15.4 MAC + * @priv: Pointer to private data section of target ca8210 + * + * Return: 0 or linux error code + */ +static int ca8210_skb_tx( + struct sk_buff *skb, + u8 msduhandle, + struct ca8210_priv *priv +) +{ + int status; + struct ieee802154_hdr header = { 0 }; + struct secspec secspec; + unsigned int mac_len; + + dev_dbg(&priv->spi->dev, "ca8210_skb_tx() called\n"); + + /* Get addressing info from skb - ieee802154 layer creates a full + * packet + */ + mac_len = ieee802154_hdr_peek_addrs(skb, &header); + + secspec.security_level = header.sec.level; + secspec.key_id_mode = header.sec.key_id_mode; + if (secspec.key_id_mode == 2) + memcpy(secspec.key_source, &header.sec.short_src, 4); + else if (secspec.key_id_mode == 3) + memcpy(secspec.key_source, &header.sec.extended_src, 8); + secspec.key_index = header.sec.key_id; + + /* Pass to Cascoda API */ + status = mcps_data_request( + header.source.mode, + header.dest.mode, + header.dest.pan_id, + (union macaddr *)&header.dest.extended_addr, + skb->len - mac_len, + &skb->data[mac_len], + msduhandle, + header.fc.ack_request, + &secspec, + priv->spi + ); + return link_to_linux_err(status); +} + +/** + * ca8210_start() - Starts the network driver + * @hw: ieee802154_hw of ca8210 being started + * + * Return: 0 or linux error code + */ +static int ca8210_start(struct ieee802154_hw *hw) +{ + int status; + u8 rx_on_when_idle; + u8 lqi_threshold = 0; + struct ca8210_priv *priv = hw->priv; + + priv->last_dsn = -1; + /* Turn receiver on when idle for now just to test rx */ + rx_on_when_idle = 1; + status = mlme_set_request_sync( + MAC_RX_ON_WHEN_IDLE, + 0, + 1, + &rx_on_when_idle, + priv->spi + ); + if (status) { + dev_crit( + &priv->spi->dev, + "Setting rx_on_when_idle failed, status = %d\n", + status + ); + return link_to_linux_err(status); + } + status = hwme_set_request_sync( + HWME_LQILIMIT, + 1, + &lqi_threshold, + priv->spi + ); + if (status) { + dev_crit( + &priv->spi->dev, + "Setting lqilimit failed, status = %d\n", + status + ); + return link_to_linux_err(status); + } + + return 0; +} + +/** + * ca8210_stop() - Stops the network driver + * @hw: ieee802154_hw of ca8210 being stopped + * + * Return: 0 or linux error code + */ +static void ca8210_stop(struct ieee802154_hw *hw) +{ +} + +/** + * ca8210_xmit_async() - Asynchronously transmits a given socket buffer using + * the ca8210 + * @hw: ieee802154_hw of ca8210 to transmit from + * @skb: Socket buffer to transmit + * + * Return: 0 or linux error code + */ +static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct ca8210_priv *priv = hw->priv; + int status; + + dev_dbg(&priv->spi->dev, "calling ca8210_xmit_async()\n"); + + priv->tx_skb = skb; + priv->async_tx_pending = true; + status = ca8210_skb_tx(skb, priv->nextmsduhandle, priv); + return status; +} + +/** + * ca8210_get_ed() - Returns the measured energy on the current channel at this + * instant in time + * @hw: ieee802154_hw of target ca8210 + * @level: Measured Energy Detect level + * + * Return: 0 or linux error code + */ +static int ca8210_get_ed(struct ieee802154_hw *hw, u8 *level) +{ + u8 lenvar; + struct ca8210_priv *priv = hw->priv; + + return link_to_linux_err( + hwme_get_request_sync(HWME_EDVALUE, &lenvar, level, priv->spi) + ); +} + +/** + * ca8210_set_channel() - Sets the current operating 802.15.4 channel of the + * ca8210 + * @hw: ieee802154_hw of target ca8210 + * @page: Channel page to set + * @channel: Channel number to set + * + * Return: 0 or linux error code + */ +static int ca8210_set_channel( + struct ieee802154_hw *hw, + u8 page, + u8 channel +) +{ + u8 status; + struct ca8210_priv *priv = hw->priv; + + status = mlme_set_request_sync( + PHY_CURRENT_CHANNEL, + 0, + 1, + &channel, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting channel, MLME-SET.confirm status = %d\n", + status + ); + } + return link_to_linux_err(status); +} + +/** + * ca8210_set_hw_addr_filt() - Sets the address filtering parameters of the + * ca8210 + * @hw: ieee802154_hw of target ca8210 + * @filt: Filtering parameters + * @changed: Bitmap representing which parameters to change + * + * Effectively just sets the actual addressing information identifying this node + * as all filtering is performed by the ca8210 as detailed in the IEEE 802.15.4 + * 2006 specification. + * + * Return: 0 or linux error code + */ +static int ca8210_set_hw_addr_filt( + struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed +) +{ + u8 status = 0; + struct ca8210_priv *priv = hw->priv; + + if (changed & IEEE802154_AFILT_PANID_CHANGED) { + status = mlme_set_request_sync( + MAC_PAN_ID, + 0, + 2, + &filt->pan_id, priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting pan id, MLME-SET.confirm status = %d", + status + ); + return link_to_linux_err(status); + } + } + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { + status = mlme_set_request_sync( + MAC_SHORT_ADDRESS, + 0, + 2, + &filt->short_addr, priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting short address, MLME-SET.confirm status = %d", + status + ); + return link_to_linux_err(status); + } + } + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { + status = mlme_set_request_sync( + NS_IEEE_ADDRESS, + 0, + 8, + &filt->ieee_addr, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting ieee address, MLME-SET.confirm status = %d", + status + ); + return link_to_linux_err(status); + } + } + /* TODO: Should use MLME_START to set coord bit? */ + return 0; +} + +/** + * ca8210_set_tx_power() - Sets the transmit power of the ca8210 + * @hw: ieee802154_hw of target ca8210 + * @mbm: Transmit power in mBm (dBm*100) + * + * Return: 0 or linux error code + */ +static int ca8210_set_tx_power(struct ieee802154_hw *hw, s32 mbm) +{ + struct ca8210_priv *priv = hw->priv; + + mbm /= 100; + return link_to_linux_err( + mlme_set_request_sync(PHY_TRANSMIT_POWER, 0, 1, &mbm, priv->spi) + ); +} + +/** + * ca8210_set_cca_mode() - Sets the clear channel assessment mode of the ca8210 + * @hw: ieee802154_hw of target ca8210 + * @cca: CCA mode to set + * + * Return: 0 or linux error code + */ +static int ca8210_set_cca_mode( + struct ieee802154_hw *hw, + const struct wpan_phy_cca *cca +) +{ + u8 status; + u8 cca_mode; + struct ca8210_priv *priv = hw->priv; + + cca_mode = cca->mode & 3; + if (cca_mode == 3 && cca->opt == NL802154_CCA_OPT_ENERGY_CARRIER_OR) { + /* cca_mode 0 == CS OR ED, 3 == CS AND ED */ + cca_mode = 0; + } + status = mlme_set_request_sync( + PHY_CCA_MODE, + 0, + 1, + &cca_mode, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting cca mode, MLME-SET.confirm status = %d", + status + ); + } + return link_to_linux_err(status); +} + +/** + * ca8210_set_cca_ed_level() - Sets the CCA ED level of the ca8210 + * @hw: ieee802154_hw of target ca8210 + * @level: ED level to set (in mbm) + * + * Sets the minimum threshold of measured energy above which the ca8210 will + * back off and retry a transmission. + * + * Return: 0 or linux error code + */ +static int ca8210_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) +{ + u8 status; + u8 ed_threshold = (level / 100) * 2 + 256; + struct ca8210_priv *priv = hw->priv; + + status = hwme_set_request_sync( + HWME_EDTHRESHOLD, + 1, + &ed_threshold, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting ed threshold, HWME-SET.confirm status = %d", + status + ); + } + return link_to_linux_err(status); +} + +/** + * ca8210_set_csma_params() - Sets the CSMA parameters of the ca8210 + * @hw: ieee802154_hw of target ca8210 + * @min_be: Minimum backoff exponent when backing off a transmission + * @max_be: Maximum backoff exponent when backing off a transmission + * @retries: Number of times to retry after backing off + * + * Return: 0 or linux error code + */ +static int ca8210_set_csma_params( + struct ieee802154_hw *hw, + u8 min_be, + u8 max_be, + u8 retries +) +{ + u8 status; + struct ca8210_priv *priv = hw->priv; + + status = mlme_set_request_sync(MAC_MIN_BE, 0, 1, &min_be, priv->spi); + if (status) { + dev_err( + &priv->spi->dev, + "error setting min be, MLME-SET.confirm status = %d", + status + ); + return link_to_linux_err(status); + } + status = mlme_set_request_sync(MAC_MAX_BE, 0, 1, &max_be, priv->spi); + if (status) { + dev_err( + &priv->spi->dev, + "error setting max be, MLME-SET.confirm status = %d", + status + ); + return link_to_linux_err(status); + } + status = mlme_set_request_sync( + MAC_MAX_CSMA_BACKOFFS, + 0, + 1, + &retries, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting max csma backoffs, MLME-SET.confirm status = %d", + status + ); + } + return link_to_linux_err(status); +} + +/** + * ca8210_set_frame_retries() - Sets the maximum frame retries of the ca8210 + * @hw: ieee802154_hw of target ca8210 + * @retries: Number of retries + * + * Sets the number of times to retry a transmission if no acknowledgment was + * was received from the other end when one was requested. + * + * Return: 0 or linux error code + */ +static int ca8210_set_frame_retries(struct ieee802154_hw *hw, s8 retries) +{ + u8 status; + struct ca8210_priv *priv = hw->priv; + + status = mlme_set_request_sync( + MAC_MAX_FRAME_RETRIES, + 0, + 1, + &retries, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting frame retries, MLME-SET.confirm status = %d", + status + ); + } + return link_to_linux_err(status); +} + +static int ca8210_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) +{ + u8 status; + struct ca8210_priv *priv = hw->priv; + + status = mlme_set_request_sync( + MAC_PROMISCUOUS_MODE, + 0, + 1, + (const void*)&on, + priv->spi + ); + if (status) { + dev_err( + &priv->spi->dev, + "error setting promiscuous mode, MLME-SET.confirm status = %d", + status + ); + } else { + priv->promiscuous = on; + } + return link_to_linux_err(status); +} + +static const struct ieee802154_ops ca8210_phy_ops = { + .start = ca8210_start, + .stop = ca8210_stop, + .xmit_async = ca8210_xmit_async, + .ed = ca8210_get_ed, + .set_channel = ca8210_set_channel, + .set_hw_addr_filt = ca8210_set_hw_addr_filt, + .set_txpower = ca8210_set_tx_power, + .set_cca_mode = ca8210_set_cca_mode, + .set_cca_ed_level = ca8210_set_cca_ed_level, + .set_csma_params = ca8210_set_csma_params, + .set_frame_retries = ca8210_set_frame_retries, + .set_promiscuous_mode = ca8210_set_promiscuous_mode +}; + +/* Test/EVBME Interface */ + +/** + * ca8210_test_int_open() - Opens the test interface to the userspace + * @inodp: inode representation of file interface + * @filp: file interface + * + * Return: 0 or linux error code + */ +static int ca8210_test_int_open(struct inode *inodp, struct file *filp) +{ + struct ca8210_priv *priv = inodp->i_private; + + filp->private_data = priv; + return 0; +} + +/** + * ca8210_test_check_upstream() - Checks a command received from the upstream + * testing interface for required action + * @buf: Buffer containing command to check + * @device_ref: Nondescript pointer to target device + * + * Return: 0 or linux error code + */ +static int ca8210_test_check_upstream(u8 *buf, void *device_ref) +{ + int ret; + u8 response[CA8210_SPI_BUF_SIZE]; + + if (buf[0] == SPI_MLME_SET_REQUEST) { + ret = tdme_checkpibattribute(buf[2], buf[4], buf + 5); + if (ret) { + response[0] = SPI_MLME_SET_CONFIRM; + response[1] = 3; + response[2] = MAC_INVALID_PARAMETER; + response[3] = buf[2]; + response[4] = buf[3]; + if (cascoda_api_upstream) + cascoda_api_upstream(response, 5, device_ref); + return ret; + } + } + if (buf[0] == SPI_MLME_ASSOCIATE_REQUEST) { + return tdme_channelinit(buf[2], device_ref); + } else if (buf[0] == SPI_MLME_START_REQUEST) { + return tdme_channelinit(buf[4], device_ref); + } else if ( + (buf[0] == SPI_MLME_SET_REQUEST) && + (buf[2] == PHY_CURRENT_CHANNEL) + ) { + return tdme_channelinit(buf[5], device_ref); + } else if ( + (buf[0] == SPI_TDME_SET_REQUEST) && + (buf[2] == TDME_CHANNEL) + ) { + return tdme_channelinit(buf[4], device_ref); + } else if ( + (CA8210_MAC_WORKAROUNDS) && + (buf[0] == SPI_MLME_RESET_REQUEST) && + (buf[2] == 1) + ) { + /* reset COORD Bit for Channel Filtering as Coordinator */ + return tdme_setsfr_request_sync( + 0, + CA8210_SFR_MACCON, + 0, + device_ref + ); + } + return 0; +} /* End of EVBMECheckSerialCommand() */ + +/** + * ca8210_test_int_user_write() - Called by a process in userspace to send a + * message to the ca8210 drivers + * @filp: file interface + * @in_buf: Buffer containing message to write + * @len: length of message + * @off: file offset + * + * Return: 0 or linux error code + */ +static ssize_t ca8210_test_int_user_write( + struct file *filp, + const char __user *in_buf, + size_t len, + loff_t *off +) +{ + int ret; + struct ca8210_priv *priv = filp->private_data; + u8 command[CA8210_SPI_BUF_SIZE]; + + if (len > CA8210_SPI_BUF_SIZE) { + dev_warn( + &priv->spi->dev, + "userspace requested erroneously long write (%zu)\n", + len + ); + return -EMSGSIZE; + } + + ret = copy_from_user(command, in_buf, len); + if (ret) { + dev_err( + &priv->spi->dev, + "%d bytes could not be copied from userspace\n", + ret + ); + return -EIO; + } + + ret = ca8210_test_check_upstream(command, priv->spi); + if (ret == 0) { + ret = ca8210_spi_exchange( + command, + command[1] + 2, + NULL, + priv->spi + ); + if (ret < 0) { + /* effectively 0 bytes were written successfully */ + dev_err( + &priv->spi->dev, + "spi exchange failed\n" + ); + return ret; + } + if (command[0] & SPI_SYN) + priv->sync_down++; + } + + return len; +} + +/** + * ca8210_test_int_user_read() - Called by a process in userspace to read a + * message from the ca8210 drivers + * @filp: file interface + * @buf: Buffer to write message to + * @len: length of message to read (ignored) + * @offp: file offset + * + * If the O_NONBLOCK flag was set when opening the file then this function will + * not block, i.e. it will return if the fifo is empty. Otherwise the function + * will block, i.e. wait until new data arrives. + * + * Return: number of bytes read + */ +static ssize_t ca8210_test_int_user_read( + struct file *filp, + char __user *buf, + size_t len, + loff_t *offp +) +{ + int i, cmdlen; + struct ca8210_priv *priv = filp->private_data; + unsigned char *fifo_buffer; + unsigned long bytes_not_copied; + + if (filp->f_flags & O_NONBLOCK) { + /* Non-blocking mode */ + if (kfifo_is_empty(&priv->test.up_fifo)) + return 0; + } else { + /* Blocking mode */ + wait_event_interruptible( + priv->test.readq, + !kfifo_is_empty(&priv->test.up_fifo) + ); + } + + if (kfifo_out(&priv->test.up_fifo, &fifo_buffer, 4) != 4) { + dev_err( + &priv->spi->dev, + "test_interface: Wrong number of elements popped from upstream fifo\n" + ); + return 0; + } + cmdlen = fifo_buffer[1]; + bytes_not_copied = cmdlen + 2; + + bytes_not_copied = copy_to_user(buf, fifo_buffer, bytes_not_copied); + if (bytes_not_copied > 0) { + dev_err( + &priv->spi->dev, + "%lu bytes could not be copied to user space!\n", + bytes_not_copied + ); + } + + dev_dbg(&priv->spi->dev, "test_interface: Cmd len = %d\n", cmdlen); + + dev_dbg(&priv->spi->dev, "test_interface: Read\n"); + for (i = 0; i < cmdlen + 2; i++) + dev_dbg(&priv->spi->dev, "%#03x\n", fifo_buffer[i]); + + kfree(fifo_buffer); + + return cmdlen + 2; +} + +/** + * ca8210_test_int_ioctl() - Called by a process in userspace to enact an + * arbitrary action + * @filp: file interface + * @ioctl_num: which action to enact + * @ioctl_param: arbitrary parameter for the action + * + * Return: status + */ +static long ca8210_test_int_ioctl( + struct file *filp, + unsigned int ioctl_num, + unsigned long ioctl_param +) +{ + struct ca8210_priv *priv = filp->private_data; + + switch (ioctl_num) { + case CA8210_IOCTL_HARD_RESET: + ca8210_reset_send(priv->spi, ioctl_param); + break; + default: + break; + } + return 0; +} + +/** + * ca8210_test_int_poll() - Called by a process in userspace to determine which + * actions are currently possible for the file + * @filp: file interface + * @ptable: poll table + * + * Return: set of poll return flags + */ +static unsigned int ca8210_test_int_poll( + struct file *filp, + struct poll_table_struct *ptable +) +{ + unsigned int return_flags = 0; + struct ca8210_priv *priv = filp->private_data; + + poll_wait(filp, &priv->test.readq, ptable); + if (!kfifo_is_empty(&priv->test.up_fifo)) + return_flags |= (POLLIN | POLLRDNORM); + if (wait_event_interruptible( + priv->test.readq, + !kfifo_is_empty(&priv->test.up_fifo))) { + return POLLERR; + } + return return_flags; +} + +static const struct file_operations test_int_fops = { + .read = ca8210_test_int_user_read, + .write = ca8210_test_int_user_write, + .open = ca8210_test_int_open, + .release = NULL, + .unlocked_ioctl = ca8210_test_int_ioctl, + .poll = ca8210_test_int_poll +}; + +/* Init/Deinit */ + +/** + * ca8210_get_platform_data() - Populate a ca8210_platform_data object + * @spi_device: Pointer to ca8210 spi device object to get data for + * @pdata: Pointer to ca8210_platform_data object to populate + * + * Return: 0 or linux error code + */ +static int ca8210_get_platform_data( + struct spi_device *spi_device, + struct ca8210_platform_data *pdata +) +{ + int ret = 0; + + if (!spi_device->dev.of_node) + return -EINVAL; + + pdata->extclockenable = of_property_read_bool( + spi_device->dev.of_node, + "extclock-enable" + ); + if (pdata->extclockenable) { + ret = of_property_read_u32( + spi_device->dev.of_node, + "extclock-freq", + &pdata->extclockfreq + ); + if (ret < 0) + return ret; + + ret = of_property_read_u32( + spi_device->dev.of_node, + "extclock-gpio", + &pdata->extclockgpio + ); + } + + return ret; +} + +/** + * ca8210_config_extern_clk() - Configure the external clock provided by the + * ca8210 + * @pdata: Pointer to ca8210_platform_data containing clock parameters + * @spi: Pointer to target ca8210 spi device + * @on: True to turn the clock on, false to turn off + * + * The external clock is configured with a frequency and output pin taken from + * the platform data. + * + * Return: 0 or linux error code + */ +static int ca8210_config_extern_clk( + struct ca8210_platform_data *pdata, + struct spi_device *spi, + bool on +) +{ + u8 clkparam[2]; + + if (on) { + dev_info(&spi->dev, "Switching external clock on\n"); + switch (pdata->extclockfreq) { + case SIXTEEN_MHZ: + clkparam[0] = 1; + break; + case EIGHT_MHZ: + clkparam[0] = 2; + break; + case FOUR_MHZ: + clkparam[0] = 3; + break; + case TWO_MHZ: + clkparam[0] = 4; + break; + case ONE_MHZ: + clkparam[0] = 5; + break; + default: + dev_crit(&spi->dev, "Invalid extclock-freq\n"); + return -EINVAL; + } + clkparam[1] = pdata->extclockgpio; + } else { + dev_info(&spi->dev, "Switching external clock off\n"); + clkparam[0] = 0; /* off */ + clkparam[1] = 0; + } + return link_to_linux_err( + hwme_set_request_sync(HWME_SYSCLKOUT, 2, clkparam, spi) + ); +} + +/** + * ca8210_register_ext_clock() - Register ca8210's external clock with kernel + * @spi: Pointer to target ca8210 spi device + * + * Return: 0 or linux error code + */ +static int ca8210_register_ext_clock(struct spi_device *spi) +{ + struct device_node *np = spi->dev.of_node; + struct ca8210_priv *priv = spi_get_drvdata(spi); + struct ca8210_platform_data *pdata = spi->dev.platform_data; + int ret = 0; + + if (!np) + return -EFAULT; + + priv->clk = clk_register_fixed_rate( + &spi->dev, + np->name, + NULL, + 0, + pdata->extclockfreq + ); + + if (IS_ERR(priv->clk)) { + dev_crit(&spi->dev, "Failed to register external clk\n"); + return PTR_ERR(priv->clk); + } + ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); + if (ret) { + clk_unregister(priv->clk); + dev_crit( + &spi->dev, + "Failed to register external clock as clock provider\n" + ); + } else { + dev_info(&spi->dev, "External clock set as clock provider\n"); + } + + return ret; +} + +/** + * ca8210_unregister_ext_clock() - Unregister ca8210's external clock with + * kernel + * @spi: Pointer to target ca8210 spi device + */ +static void ca8210_unregister_ext_clock(struct spi_device *spi) +{ + struct ca8210_priv *priv = spi_get_drvdata(spi); + + if (!priv->clk) + return + + of_clk_del_provider(spi->dev.of_node); + clk_unregister(priv->clk); + dev_info(&spi->dev, "External clock unregistered\n"); +} + +/** + * ca8210_reset_init() - Initialise the reset input to the ca8210 + * @spi: Pointer to target ca8210 spi device + * + * Return: 0 or linux error code + */ +static int ca8210_reset_init(struct spi_device *spi) +{ + int ret; + struct ca8210_platform_data *pdata = spi->dev.platform_data; + + pdata->gpio_reset = of_get_named_gpio( + spi->dev.of_node, + "reset-gpio", + 0 + ); + + ret = gpio_direction_output(pdata->gpio_reset, 1); + if (ret < 0) { + dev_crit( + &spi->dev, + "Reset GPIO %d did not set to output mode\n", + pdata->gpio_reset + ); + } + + return ret; +} + +/** + * ca8210_interrupt_init() - Initialise the irq output from the ca8210 + * @spi: Pointer to target ca8210 spi device + * + * Return: 0 or linux error code + */ +static int ca8210_interrupt_init(struct spi_device *spi) +{ + int ret; + struct ca8210_platform_data *pdata = spi->dev.platform_data; + + pdata->gpio_irq = of_get_named_gpio( + spi->dev.of_node, + "irq-gpio", + 0 + ); + + pdata->irq_id = gpio_to_irq(pdata->gpio_irq); + if (pdata->irq_id < 0) { + dev_crit( + &spi->dev, + "Could not get irq for gpio pin %d\n", + pdata->gpio_irq + ); + gpio_free(pdata->gpio_irq); + return pdata->irq_id; + } + + ret = request_irq( + pdata->irq_id, + ca8210_interrupt_handler, + IRQF_TRIGGER_FALLING, + "ca8210-irq", + spi_get_drvdata(spi) + ); + if (ret) { + dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id); + gpio_unexport(pdata->gpio_irq); + gpio_free(pdata->gpio_irq); + } + + return ret; +} + +/** + * ca8210_dev_com_init() - Initialise the spi communication component + * @priv: Pointer to private data structure + * + * Return: 0 or linux error code + */ +static int ca8210_dev_com_init(struct ca8210_priv *priv) +{ + priv->mlme_workqueue = alloc_ordered_workqueue( + "MLME work queue", + WQ_UNBOUND + ); + if (!priv->mlme_workqueue) { + dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n"); + return -ENOMEM; + } + + priv->irq_workqueue = alloc_ordered_workqueue( + "ca8210 irq worker", + WQ_UNBOUND + ); + if (!priv->irq_workqueue) { + dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * ca8210_dev_com_clear() - Deinitialise the spi communication component + * @priv: Pointer to private data structure + */ +static void ca8210_dev_com_clear(struct ca8210_priv *priv) +{ + flush_workqueue(priv->mlme_workqueue); + destroy_workqueue(priv->mlme_workqueue); + flush_workqueue(priv->irq_workqueue); + destroy_workqueue(priv->irq_workqueue); +} + +#define CA8210_MAX_TX_POWERS (9) +static const s32 ca8210_tx_powers[CA8210_MAX_TX_POWERS] = { + 800, 700, 600, 500, 400, 300, 200, 100, 0 +}; + +#define CA8210_MAX_ED_LEVELS (21) +static const s32 ca8210_ed_levels[CA8210_MAX_ED_LEVELS] = { + -10300, -10250, -10200, -10150, -10100, -10050, -10000, -9950, -9900, + -9850, -9800, -9750, -9700, -9650, -9600, -9550, -9500, -9450, -9400, + -9350, -9300 +}; + +/** + * ca8210_hw_setup() - Populate the ieee802154_hw phy attributes with the + * ca8210's defaults + * @ca8210_hw: Pointer to ieee802154_hw to populate + */ +static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) +{ + /* Support channels 11-26 */ + ca8210_hw->phy->supported.channels[0] = CA8210_VALID_CHANNELS; + ca8210_hw->phy->supported.tx_powers_size = CA8210_MAX_TX_POWERS; + ca8210_hw->phy->supported.tx_powers = ca8210_tx_powers; + ca8210_hw->phy->supported.cca_ed_levels_size = CA8210_MAX_ED_LEVELS; + ca8210_hw->phy->supported.cca_ed_levels = ca8210_ed_levels; + ca8210_hw->phy->current_channel = 18; + ca8210_hw->phy->current_page = 0; + ca8210_hw->phy->transmit_power = 800; + ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER; + ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; + ca8210_hw->phy->cca_ed_level = -9800; + ca8210_hw->phy->symbol_duration = 16; + ca8210_hw->phy->lifs_period = 40; + ca8210_hw->phy->sifs_period = 12; + ca8210_hw->flags = + IEEE802154_HW_AFILT | + IEEE802154_HW_OMIT_CKSUM | + IEEE802154_HW_FRAME_RETRIES | + IEEE802154_HW_PROMISCUOUS | + IEEE802154_HW_CSMA_PARAMS; + ca8210_hw->phy->flags = + WPAN_PHY_FLAG_TXPOWER | + WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; +} + +/** + * ca8210_test_interface_init() - Initialise the test file interface + * @priv: Pointer to private data structure + * + * Provided as an alternative to the standard linux network interface, the test + * interface exposes a file in the filesystem (ca8210_test) that allows + * 802.15.4 SAP Commands and Cascoda EVBME commands to be sent directly to + * the stack. + * + * Return: 0 or linux error code + */ +static int ca8210_test_interface_init(struct ca8210_priv *priv) +{ + struct ca8210_test *test = &priv->test; + char node_name[32]; + + snprintf( + node_name, + sizeof(node_name), + "ca8210@%d_%d", + priv->spi->master->bus_num, + priv->spi->chip_select + ); + + test->ca8210_dfs_spi_int = debugfs_create_file( + node_name, + 0600, /* S_IRUSR | S_IWUSR */ + NULL, + priv, + &test_int_fops + ); + if (IS_ERR(test->ca8210_dfs_spi_int)) { + dev_err( + &priv->spi->dev, + "Error %ld when creating debugfs node\n", + PTR_ERR(test->ca8210_dfs_spi_int) + ); + return PTR_ERR(test->ca8210_dfs_spi_int); + } + debugfs_create_symlink("ca8210", NULL, node_name); + init_waitqueue_head(&test->readq); + return kfifo_alloc( + &test->up_fifo, + CA8210_TEST_INT_FIFO_SIZE, + GFP_KERNEL + ); +} + +/** + * ca8210_test_interface_clear() - Deinitialise the test file interface + * @priv: Pointer to private data structure + */ +static void ca8210_test_interface_clear(struct ca8210_priv *priv) +{ + struct ca8210_test *test = &priv->test; + + if (!IS_ERR(test->ca8210_dfs_spi_int)) + debugfs_remove(test->ca8210_dfs_spi_int); + kfifo_free(&test->up_fifo); + dev_info(&priv->spi->dev, "Test interface removed\n"); +} + +/** + * ca8210_remove() - Shut down a ca8210 upon being disconnected + * @priv: Pointer to private data structure + * + * Return: 0 or linux error code + */ +static int ca8210_remove(struct spi_device *spi_device) +{ + struct ca8210_priv *priv; + struct ca8210_platform_data *pdata; + + dev_info(&spi_device->dev, "Removing ca8210\n"); + + pdata = spi_device->dev.platform_data; + if (pdata) { + if (pdata->extclockenable) { + ca8210_unregister_ext_clock(spi_device); + ca8210_config_extern_clk(pdata, spi_device, 0); + } + free_irq(pdata->irq_id, spi_device->dev.driver_data); + kfree(pdata); + spi_device->dev.platform_data = NULL; + } + /* get spi_device private data */ + priv = spi_get_drvdata(spi_device); + if (priv) { + dev_info( + &spi_device->dev, + "sync_down = %d, sync_up = %d\n", + priv->sync_down, + priv->sync_up + ); + ca8210_dev_com_clear(spi_device->dev.driver_data); + if (priv->hw) { + if (priv->hw_registered) + ieee802154_unregister_hw(priv->hw); + ieee802154_free_hw(priv->hw); + priv->hw = NULL; + dev_info( + &spi_device->dev, + "Unregistered & freed ieee802154_hw.\n" + ); + } + if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) + ca8210_test_interface_clear(priv); + } + + return 0; +} + +/** + * ca8210_probe() - Set up a connected ca8210 upon being detected by the system + * @priv: Pointer to private data structure + * + * Return: 0 or linux error code + */ +static int ca8210_probe(struct spi_device *spi_device) +{ + struct ca8210_priv *priv; + struct ieee802154_hw *hw; + struct ca8210_platform_data *pdata; + int ret; + + dev_info(&spi_device->dev, "Inserting ca8210\n"); + + /* allocate ieee802154_hw and private data */ + hw = ieee802154_alloc_hw(sizeof(struct ca8210_priv), &ca8210_phy_ops); + if (!hw) { + dev_crit(&spi_device->dev, "ieee802154_alloc_hw failed\n"); + ret = -ENOMEM; + goto error; + } + + priv = hw->priv; + priv->hw = hw; + priv->spi = spi_device; + hw->parent = &spi_device->dev; + spin_lock_init(&priv->lock); + priv->async_tx_pending = false; + priv->hw_registered = false; + priv->sync_up = 0; + priv->sync_down = 0; + priv->promiscuous = false; + priv->retries = 0; + init_completion(&priv->ca8210_is_awake); + init_completion(&priv->spi_transfer_complete); + init_completion(&priv->sync_exchange_complete); + spi_set_drvdata(priv->spi, priv); + if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) { + cascoda_api_upstream = ca8210_test_int_driver_write; + ca8210_test_interface_init(priv); + } else { + cascoda_api_upstream = NULL; + } + ca8210_hw_setup(hw); + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_crit( + &spi_device->dev, + "Could not allocate platform data\n" + ); + ret = -ENOMEM; + goto error; + } + + ret = ca8210_get_platform_data(priv->spi, pdata); + if (ret) { + dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n"); + goto error; + } + priv->spi->dev.platform_data = pdata; + + ret = ca8210_dev_com_init(priv); + if (ret) { + dev_crit(&spi_device->dev, "ca8210_dev_com_init failed\n"); + goto error; + } + ret = ca8210_reset_init(priv->spi); + if (ret) { + dev_crit(&spi_device->dev, "ca8210_reset_init failed\n"); + goto error; + } + + ret = ca8210_interrupt_init(priv->spi); + if (ret) { + dev_crit(&spi_device->dev, "ca8210_interrupt_init failed\n"); + goto error; + } + + msleep(100); + + ca8210_reset_send(priv->spi, 1); + + ret = tdme_chipinit(priv->spi); + if (ret) { + dev_crit(&spi_device->dev, "tdme_chipinit failed\n"); + goto error; + } + + if (pdata->extclockenable) { + ret = ca8210_config_extern_clk(pdata, priv->spi, 1); + if (ret) { + dev_crit( + &spi_device->dev, + "ca8210_config_extern_clk failed\n" + ); + goto error; + } + ret = ca8210_register_ext_clock(priv->spi); + if (ret) { + dev_crit( + &spi_device->dev, + "ca8210_register_ext_clock failed\n" + ); + goto error; + } + } + + ret = ieee802154_register_hw(hw); + if (ret) { + dev_crit(&spi_device->dev, "ieee802154_register_hw failed\n"); + goto error; + } + priv->hw_registered = true; + + return 0; +error: + msleep(100); /* wait for pending spi transfers to complete */ + ca8210_remove(spi_device); + return link_to_linux_err(ret); +} + +static const struct of_device_id ca8210_of_ids[] = { + {.compatible = "cascoda,ca8210", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ca8210_of_ids); + +static struct spi_driver ca8210_spi_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ca8210_of_ids), + }, + .probe = ca8210_probe, + .remove = ca8210_remove +}; + +module_spi_driver(ca8210_spi_driver); + +MODULE_AUTHOR("Harry Morris "); +MODULE_DESCRIPTION("CA-8210 SoftMAC driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION("1.0"); diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 7b131f8e40937000a273cfabbbf04a03a4af2f17..bd63289c55e8fcb68e9e371712ad1fdfc3521a43 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 800a46c8d26c25f74244ee30872097982ebc775e..7919369c0a728b1285cb20e2df9337eb0208a451 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -91,6 +92,7 @@ struct ipvl_addr { struct ipvl_port { struct net_device *dev; + possible_net_t pnet; struct hlist_head hlhead[IPVLAN_HASH_SIZE]; struct list_head ipvlans; u16 mode; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index aa8575ccbce392426ab1ceb65ecadee5175dcb1b..618ed88fad0fc1d4e227f0e84fde74462b2bc496 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -9,7 +9,11 @@ #include "ipvlan.h" -static u32 ipvl_nf_hook_refcnt = 0; +static unsigned int ipvlan_netid __read_mostly; + +struct ipvlan_netns { + unsigned int ipvl_nf_hook_refcnt; +}; static struct nf_hook_ops ipvl_nfops[] __read_mostly = { { @@ -35,28 +39,34 @@ static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) ipvlan->dev->mtu = dev->mtu; } -static int ipvlan_register_nf_hook(void) +static int ipvlan_register_nf_hook(struct net *net) { + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); int err = 0; - if (!ipvl_nf_hook_refcnt) { - err = _nf_register_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops)); + if (!vnet->ipvl_nf_hook_refcnt) { + err = nf_register_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); if (!err) - ipvl_nf_hook_refcnt = 1; + vnet->ipvl_nf_hook_refcnt = 1; } else { - ipvl_nf_hook_refcnt++; + vnet->ipvl_nf_hook_refcnt++; } return err; } -static void ipvlan_unregister_nf_hook(void) +static void ipvlan_unregister_nf_hook(struct net *net) { - WARN_ON(!ipvl_nf_hook_refcnt); + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON(!vnet->ipvl_nf_hook_refcnt)) + return; - ipvl_nf_hook_refcnt--; - if (!ipvl_nf_hook_refcnt) - _nf_unregister_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops)); + vnet->ipvl_nf_hook_refcnt--; + if (!vnet->ipvl_nf_hook_refcnt) + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); } static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) @@ -69,7 +79,7 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) if (port->mode != nval) { if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ - err = ipvlan_register_nf_hook(); + err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); if (!err) { mdev->l3mdev_ops = &ipvl_l3mdev_ops; mdev->priv_flags |= IFF_L3MDEV_MASTER; @@ -78,7 +88,7 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ mdev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(); + ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } list_for_each_entry(ipvlan, &port->ipvlans, pnode) { @@ -111,6 +121,7 @@ static int ipvlan_port_create(struct net_device *dev) if (!port) return -ENOMEM; + write_pnet(&port->pnet, dev_net(dev)); port->dev = dev; port->mode = IPVLAN_MODE_L3; INIT_LIST_HEAD(&port->ipvlans); @@ -142,7 +153,7 @@ static void ipvlan_port_destroy(struct net_device *dev) dev->priv_flags &= ~IFF_IPVLAN_MASTER; if (port->mode == IPVLAN_MODE_L3S) { dev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(); + ipvlan_unregister_nf_hook(dev_net(dev)); dev->l3mdev_ops = NULL; } netdev_rx_handler_unregister(dev); @@ -673,6 +684,24 @@ static int ipvlan_device_event(struct notifier_block *unused, ipvlan->dev); break; + case NETDEV_REGISTER: { + struct net *oldnet, *newnet = dev_net(dev); + struct ipvlan_netns *old_vnet; + + oldnet = read_pnet(&port->pnet); + if (net_eq(newnet, oldnet)) + break; + + write_pnet(&port->pnet, newnet); + + old_vnet = net_generic(oldnet, ipvlan_netid); + if (!old_vnet->ipvl_nf_hook_refcnt) + break; + + ipvlan_register_nf_hook(newnet); + ipvlan_unregister_nf_hook(oldnet); + break; + } case NETDEV_UNREGISTER: if (dev->reg_state != NETREG_UNREGISTERING) break; @@ -854,6 +883,23 @@ static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = { .notifier_call = ipvlan_addr6_event, }; +static void ipvlan_ns_exit(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) { + vnet->ipvl_nf_hook_refcnt = 0; + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); + } +} + +static struct pernet_operations ipvlan_net_ops = { + .id = &ipvlan_netid, + .size = sizeof(struct ipvlan_netns), + .exit = ipvlan_ns_exit, +}; + static int __init ipvlan_init_module(void) { int err; @@ -863,10 +909,16 @@ static int __init ipvlan_init_module(void) register_inet6addr_notifier(&ipvlan_addr6_notifier_block); register_inetaddr_notifier(&ipvlan_addr4_notifier_block); - err = ipvlan_link_register(&ipvlan_link_ops); + err = register_pernet_subsys(&ipvlan_net_ops); if (err < 0) goto error; + err = ipvlan_link_register(&ipvlan_link_ops); + if (err < 0) { + unregister_pernet_subsys(&ipvlan_net_ops); + goto error; + } + return 0; error: unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); @@ -878,6 +930,7 @@ static int __init ipvlan_init_module(void) static void __exit ipvlan_cleanup_module(void) { rtnl_link_unregister(&ipvlan_link_ops); + unregister_pernet_subsys(&ipvlan_net_ops); unregister_netdevice_notifier(&ipvlan_notifier_block); unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index b23b71981fd55689daa817d41191063300bdaaf5..224f65cb576bbf106a4779ef5f60b75f34903b1c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -13,7 +13,7 @@ * * Alan Cox : Fixed oddments for NET3.014 * Alan Cox : Rejig for NET3.029 snap #3 - * Alan Cox : Fixed NET3.029 bugs and sped up + * Alan Cox : Fixed NET3.029 bugs and sped up * Larry McVoy : Tiny tweak to double performance * Alan Cox : Backed out LMV's tweak - the linux mm * can't take it... @@ -41,7 +41,7 @@ #include #include -#include +#include #include #include @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -64,8 +65,7 @@ struct pcpu_lstats { struct u64_stats_sync syncp; }; -/* - * The higher levels take care of making this non-reentrant (it's +/* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static netdev_tx_t loopback_xmit(struct sk_buff *skb, @@ -74,6 +74,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, struct pcpu_lstats *lb_stats; int len; + skb_tx_timestamp(skb); skb_orphan(skb); /* Before queueing this packet to netif_rx(), @@ -129,8 +130,21 @@ static u32 always_on(struct net_device *dev) return 1; } +static int loopback_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *ts_info) +{ + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + ts_info->phc_index = -1; + + return 0; +}; + static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, + .get_ts_info = loopback_get_ts_info, }; static int loopback_dev_init(struct net_device *dev) @@ -149,14 +163,13 @@ static void loopback_dev_free(struct net_device *dev) } static const struct net_device_ops loopback_ops = { - .ndo_init = loopback_dev_init, - .ndo_start_xmit= loopback_xmit, + .ndo_init = loopback_dev_init, + .ndo_start_xmit = loopback_xmit, .ndo_get_stats64 = loopback_get_stats64, .ndo_set_mac_address = eth_mac_addr, }; -/* - * The loopback device is special. There is only one instance +/* The loopback device is special. There is only one instance * per network namespace. */ static void loopback_setup(struct net_device *dev) @@ -170,7 +183,7 @@ static void loopback_setup(struct net_device *dev) dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; netif_keep_dst(dev); dev->hw_features = NETIF_F_GSO_SOFTWARE; - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | NETIF_F_HW_CSUM | NETIF_F_RXCSUM @@ -206,7 +219,6 @@ static __net_init int loopback_net_init(struct net *net) net->loopback_dev = dev; return 0; - out_free_netdev: free_netdev(dev); out: @@ -217,5 +229,5 @@ static __net_init int loopback_net_init(struct net *net) /* Registered in net/core/dev.c */ struct pernet_operations __net_initdata loopback_net_ops = { - .init = loopback_net_init, + .init = loopback_net_init, }; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 49ce4e9f4a0f387dac1792252ecd5044b6373cd4..cdc347be68f23196d6ba820d21475260bc5bae3b 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1605,8 +1605,9 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) if (!attrs[MACSEC_ATTR_SA_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], - macsec_genl_sa_policy)) + if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, + attrs[MACSEC_ATTR_SA_CONFIG], + macsec_genl_sa_policy, NULL)) return -EINVAL; return 0; @@ -1617,8 +1618,9 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], - macsec_genl_rxsc_policy)) + if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, + attrs[MACSEC_ATTR_RXSC_CONFIG], + macsec_genl_rxsc_policy, NULL)) return -EINVAL; return 0; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 36877ba6551646bf1308066026b2f9f9dd12a5d3..4daf3d0926a82cfb52fd6cf774f0a467230c3246 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -372,18 +372,19 @@ static void ntb_get_drvinfo(struct net_device *ndev, strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info)); } -static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int ntb_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = SUPPORTED_Backplane; - cmd->advertising = ADVERTISED_Backplane; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_OTHER; - cmd->phy_address = 0; - cmd->transceiver = XCVR_DUMMY1; - cmd->autoneg = AUTONEG_ENABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane); + + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_OTHER; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_ENABLE; return 0; } @@ -391,7 +392,7 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static const struct ethtool_ops ntb_ethtool_ops = { .get_drvinfo = ntb_get_drvinfo, .get_link = ethtool_op_get_link, - .get_settings = ntb_get_settings, + .get_link_ksettings = ntb_get_link_ksettings, }; static const struct ntb_queue_handlers ntb_netdev_handlers = { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 8dbd59baa34d5ed9eda97396f38b5ab38e96416a..60ffc9da6a286272d84e8dc3f4326eb908e7961a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -2,33 +2,12 @@ # PHY Layer Configuration # -menuconfig PHYLIB - tristate "PHY Device support and infrastructure" - depends on NETDEVICES +menuconfig MDIO_DEVICE + tristate "MDIO bus device drivers" help - Ethernet controllers are usually attached to PHY - devices. This option provides infrastructure for - managing PHY devices. - -if PHYLIB + MDIO devices and driver infrastructure code. -config SWPHY - bool - -config LED_TRIGGER_PHY - bool "Support LED triggers for tracking link state" - depends on LEDS_TRIGGERS - ---help--- - Adds support for a set of LED trigger events per-PHY. Link - state change will trigger the events, for consumption by an - LED class driver. There are triggers for each link speed currently - supported by the phy, and are of the form: - :: - - Where speed is in the form: - Mbps or Gbps - -comment "MDIO bus device drivers" +if MDIO_DEVICE config MDIO_BCM_IPROC tristate "Broadcom iProc MDIO bus controller" @@ -40,7 +19,7 @@ config MDIO_BCM_IPROC config MDIO_BCM_UNIMAC tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM + depends on HAS_IOMEM && OF_MDIO help This module provides a driver for the Broadcom UniMAC MDIO busses. This hardware can be found in the Broadcom GENET Ethernet MAC @@ -49,6 +28,7 @@ config MDIO_BCM_UNIMAC config MDIO_BITBANG tristate "Bitbanged MDIO buses" + depends on !(MDIO_DEVICE=y && PHYLIB=m) help This module implements the MDIO bus protocol in software, for use by low level drivers that export the ability to @@ -160,6 +140,36 @@ config MDIO_XGENE This module provides a driver for the MDIO busses found in the APM X-Gene SoC's. +endif + +menuconfig PHYLIB + tristate "PHY Device support and infrastructure" + depends on NETDEVICES + select MDIO_DEVICE + help + Ethernet controllers are usually attached to PHY + devices. This option provides infrastructure for + managing PHY devices. + +if PHYLIB + +config SWPHY + bool + +config LED_TRIGGER_PHY + bool "Support LED triggers for tracking link state" + depends on LEDS_TRIGGERS + ---help--- + Adds support for a set of LED trigger events per-PHY. Link + state change will trigger the events, for consumption by an + LED class driver. There are triggers for each link speed currently + supported by the phy, and are of the form: + :: + + Where speed is in the form: + Mbps or Gbps + + comment "MII PHY device drivers" config AMD_PHY diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 407b0b601ea8264b0ac8bf32a609271d43be995a..e36db9a2ba3814330277ea21980689c2d88c4018 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,7 +1,20 @@ # Makefile for Linux PHY drivers and MDIO bus drivers -libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o \ - mdio-boardinfo.o +libphy-y := phy.o phy-core.o phy_device.o +mdio-bus-y += mdio_bus.o mdio_device.o + +ifdef CONFIG_MDIO_DEVICE +obj-y += mdio-boardinfo.o +endif + +# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular +# dependencies that does not make it possible to split mdio-bus objects into a +# dedicated loadable module, so we bundle them all together into libphy.ko +ifdef CONFIG_PHYLIB +libphy-y += $(mdio-bus-y) +else +obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o +endif libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index ab9ad689617c78d19b21ad8f35b5f1887c483b1c..171010eb4d9c5c36da0be9888fb75cc54e136768 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Broadcom Corporation + * Copyright (C) 2015-2017 Broadcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -201,8 +201,7 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable) int val; /* Enable EEE at PHY level */ - val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN); + val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); if (val < 0) return val; @@ -211,22 +210,19 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable) else val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X); - phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, - MDIO_MMD_AN, (u32)val); + phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val); /* Advertise EEE */ - val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN); + val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV); if (val < 0) return val; if (enable) - val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); + val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); else - val &= ~(MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); + val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); - phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV, - MDIO_MMD_AN, (u32)val); + phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); return 0; } diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index d1c2614dad3a60860f33c0ff1851f18e0d1a4253..caa9f6e17f34cb54277a941a6ca81854ac0cf1ad 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -1,7 +1,7 @@ /* * Broadcom BCM7xxx internal transceivers support. * - * Copyright (C) 2014, Broadcom Corporation + * Copyright (C) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -19,7 +19,7 @@ /* Broadcom BCM7xxx internal PHY registers */ -/* 40nm only register definitions */ +/* EPHY only register definitions */ #define MII_BCM7XXX_100TX_AUX_CTL 0x10 #define MII_BCM7XXX_100TX_FALSE_CAR 0x13 #define MII_BCM7XXX_100TX_DISC 0x14 @@ -27,6 +27,19 @@ #define MII_BCM7XXX_64CLK_MDIO BIT(12) #define MII_BCM7XXX_TEST 0x1f #define MII_BCM7XXX_SHD_MODE_2 BIT(2) +#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe +#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf +#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a +#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3 +#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6 +#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400 +#define MII_BCM7XXX_SHD_3_AN_STAT 0xb +#define MII_BCM7XXX_AN_NULL_MSG_EN BIT(0) +#define MII_BCM7XXX_AN_EEE_EN BIT(1) +#define MII_BCM7XXX_SHD_3_EEE_THRESH 0xe +#define MII_BCM7XXX_EEE_THRESH_DEF 0x50 +#define MII_BCM7XXX_SHD_3_TL4 0x23 +#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1)) /* 28nm only register definitions */ #define MISC_ADDR(base, channel) base, channel @@ -286,6 +299,181 @@ static int phy_set_clr_bits(struct phy_device *dev, int location, return v; } +static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev) +{ + int ret; + + /* set shadow mode 2 */ + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, + MII_BCM7XXX_SHD_MODE_2, 0); + if (ret < 0) + return ret; + + /* Set current trim values INT_trim = -1, Ext_trim =0 */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0); + if (ret < 0) + goto reset_shadow_mode; + + /* Cal reset */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_TL4); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + MII_BCM7XXX_TL4_RST_MSK, 0); + if (ret < 0) + goto reset_shadow_mode; + + /* Cal reset disable */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_TL4); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + 0, MII_BCM7XXX_TL4_RST_MSK); + if (ret < 0) + goto reset_shadow_mode; + +reset_shadow_mode: + /* reset shadow mode 2 */ + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, + MII_BCM7XXX_SHD_MODE_2); + if (ret < 0) + return ret; + + return 0; +} + +/* The 28nm EPHY does not support Clause 45 (MMD) used by bcm-phy-lib */ +static int bcm7xxx_28nm_ephy_apd_enable(struct phy_device *phydev) +{ + int ret; + + /* set shadow mode 1 */ + ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, + MII_BRCM_FET_BT_SRE, 0); + if (ret < 0) + return ret; + + /* Enable auto-power down */ + ret = phy_set_clr_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, + MII_BRCM_FET_SHDW_AS2_APDE, 0); + if (ret < 0) + return ret; + + /* reset shadow mode 1 */ + ret = phy_set_clr_bits(phydev, MII_BRCM_FET_BRCMTEST, 0, + MII_BRCM_FET_BT_SRE); + if (ret < 0) + return ret; + + return 0; +} + +static int bcm7xxx_28nm_ephy_eee_enable(struct phy_device *phydev) +{ + int ret; + + /* set shadow mode 2 */ + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, + MII_BCM7XXX_SHD_MODE_2, 0); + if (ret < 0) + return ret; + + /* Advertise supported modes */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_AN_EEE_ADV); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + MDIO_EEE_100TX); + if (ret < 0) + goto reset_shadow_mode; + + /* Restore Defaults */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_PCS_CTRL_2); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + MII_BCM7XXX_PCS_CTRL_2_DEF); + if (ret < 0) + goto reset_shadow_mode; + + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_EEE_THRESH); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + MII_BCM7XXX_EEE_THRESH_DEF); + if (ret < 0) + goto reset_shadow_mode; + + /* Enable EEE autonegotiation */ + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, + MII_BCM7XXX_SHD_3_AN_STAT); + if (ret < 0) + goto reset_shadow_mode; + ret = phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, + (MII_BCM7XXX_AN_NULL_MSG_EN | MII_BCM7XXX_AN_EEE_EN)); + if (ret < 0) + goto reset_shadow_mode; + +reset_shadow_mode: + /* reset shadow mode 2 */ + ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, + MII_BCM7XXX_SHD_MODE_2); + if (ret < 0) + return ret; + + /* Restart autoneg */ + phy_write(phydev, MII_BMCR, + (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART)); + + return 0; +} + +static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev) +{ + u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask; + int ret = 0; + + pr_info_once("%s: %s PHY revision: 0x%02x\n", + phydev_name(phydev), phydev->drv->name, rev); + + /* Dummy read to a register to workaround a possible issue upon reset + * where the internal inverter may not allow the first MDIO transaction + * to pass the MDIO management controller and make us return 0xffff for + * such reads. + */ + phy_read(phydev, MII_BMSR); + + /* Apply AFE software work-around if necessary */ + if (rev == 0x01) { + ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev); + if (ret) + return ret; + } + + ret = bcm7xxx_28nm_ephy_eee_enable(phydev); + if (ret) + return ret; + + return bcm7xxx_28nm_ephy_apd_enable(phydev); +} + +static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev) +{ + int ret; + + /* Re-apply workarounds coming out suspend/resume */ + ret = bcm7xxx_28nm_ephy_config_init(phydev); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + static int bcm7xxx_config_init(struct phy_device *phydev) { int ret; @@ -434,6 +622,23 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .probe = bcm7xxx_28nm_probe, \ } +#define BCM7XXX_28NM_EPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_BASIC_FEATURES, \ + .flags = PHY_IS_INTERNAL, \ + .config_init = bcm7xxx_28nm_ephy_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .resume = bcm7xxx_28nm_ephy_resume, \ + .get_sset_count = bcm_phy_get_sset_count, \ + .get_strings = bcm_phy_get_strings, \ + .get_stats = bcm7xxx_28nm_get_phy_stats, \ + .probe = bcm7xxx_28nm_probe, \ +} + #define BCM7XXX_40NM_EPHY(_oui, _name) \ { \ .phy_id = (_oui), \ @@ -450,6 +655,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"), + BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"), + BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), @@ -466,6 +674,9 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7260, 0xfffffff0, }, + { PHY_ID_BCM7268, 0xfffffff0, }, + { PHY_ID_BCM7271, 0xfffffff0, }, { PHY_ID_BCM7278, 0xfffffff0, }, { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 9cd8b27d12923c78b6f68bb1bf7e2afe993b30a6..a32dc5d11e8956ebd66646420ed6915985b45491 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -74,27 +74,40 @@ static int bcm54612e_config_init(struct phy_device *phydev) return 0; } -static int bcm54810_config(struct phy_device *phydev) +static int bcm5481x_config(struct phy_device *phydev) { int rc, val; - val = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL); - val &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN; - rc = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL, - val); - if (rc < 0) - return rc; - + /* handling PHY's internal RX clock delay */ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); - val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; val |= MII_BCM54XX_AUXCTL_MISC_WREN; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + /* Disable RGMII RXC-RXD skew */ + val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + } + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + /* Enable RGMII RXC-RXD skew */ + val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN; + } rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val); if (rc < 0) return rc; + /* handling PHY's internal TX clock delay */ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL); - val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + /* Disable internal TX clock delay */ + val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN; + } + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + /* Enable internal TX clock delay */ + val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN; + } rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val); if (rc < 0) return rc; @@ -244,7 +257,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev) static int bcm54xx_config_init(struct phy_device *phydev) { - int reg, err; + int reg, err, val; reg = phy_read(phydev, MII_BCM54XX_ECR); if (reg < 0) @@ -283,8 +296,14 @@ static int bcm54xx_config_init(struct phy_device *phydev) if (err) return err; } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) { - err = bcm54810_config(phydev); - if (err) + /* For BCM54810, we need to disable BroadR-Reach function */ + val = bcm_phy_read_exp(phydev, + BCM54810_EXP_BROADREACH_LRE_MISC_CTL); + val &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN; + err = bcm_phy_write_exp(phydev, + BCM54810_EXP_BROADREACH_LRE_MISC_CTL, + val); + if (err < 0) return err; } @@ -392,29 +411,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev) ret = genphy_config_aneg(phydev); /* Then we can set up the delay. */ - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - u16 reg; - - /* - * There is no BCM5481 specification available, so down - * here is everything we know about "register 0x18". This - * at least helps BCM5481 to successfully receive packets - * on MPC8360E-RDK board. Peter Barada - * says: "This sets delay between the RXD and RXC signals - * instead of using trace lengths to achieve timing". - */ - - /* Set RDX clk delay. */ - reg = 0x7 | (0x7 << 12); - phy_write(phydev, 0x18, reg); - - reg = phy_read(phydev, 0x18); - /* Set RDX-RXC skew. */ - reg |= (1 << 8); - /* Write bits 14:0. */ - reg |= (1 << 15); - phy_write(phydev, 0x18, reg); - } + bcm5481x_config(phydev); if (of_property_read_bool(np, "enet-phy-lane-swap")) { /* Lane Swap - Undocumented register...magic! */ diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 19865530e0b13c24e5d74def6cbd55e77b0ad378..b57f20e552ba83d0a0e39ef0593229b7ccd874d5 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -133,14 +133,14 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev) (struct dp83867_private *)phydev->priv; u16 val; - val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR); + val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) val |= DP83867_CFG4_PORT_MIRROR_EN; else val &= ~DP83867_CFG4_PORT_MIRROR_EN; - phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val); + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); return 0; } @@ -231,8 +231,7 @@ static int dp83867_config_init(struct phy_device *phydev) * register's bit 11 (marked as RESERVED). */ - bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1, - DP83867_DEVADDR); + bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS1); if (bs & DP83867_STRAP_STS1_RESERVED) val &= ~DP83867_PHYCR_RESERVED_MASK; @@ -243,8 +242,7 @@ static int dp83867_config_init(struct phy_device *phydev) if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { - val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR); + val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN); @@ -255,25 +253,24 @@ static int dp83867_config_init(struct phy_device *phydev) if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) val |= DP83867_RGMII_RX_CLK_DELAY_EN; - phy_write_mmd_indirect(phydev, DP83867_RGMIICTL, - DP83867_DEVADDR, val); + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val); delay = (dp83867->rx_id_delay | (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); - phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL, - DP83867_DEVADDR, delay); + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL, + delay); if (dp83867->io_impedance >= 0) { - val = phy_read_mmd_indirect(phydev, DP83867_IO_MUX_CFG, - DP83867_DEVADDR); + val = phy_read_mmd(phydev, DP83867_DEVADDR, + DP83867_IO_MUX_CFG); val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; val |= dp83867->io_impedance & DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; - phy_write_mmd_indirect(phydev, DP83867_IO_MUX_CFG, - DP83867_DEVADDR, val); + phy_write_mmd(phydev, DP83867_DEVADDR, + DP83867_IO_MUX_CFG, val); } } diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index b1fd7bb0e4dbebe2da6f7f1b13857b1b64fa76f9..55f8c52dd2f1b719b98950125c901eadc8383777 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -166,13 +166,13 @@ static int xway_gphy_config_init(struct phy_device *phydev) /* Clear all pending interrupts */ phy_read(phydev, XWAY_MDIO_ISTAT); - phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2, - XWAY_MMD_LEDCH_NACS_NONE | - XWAY_MMD_LEDCH_SBF_F02HZ | - XWAY_MMD_LEDCH_FBF_F16HZ); - phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2, - XWAY_MMD_LEDCH_CBLINK_NONE | - XWAY_MMD_LEDCH_SCAN_NONE); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH, + XWAY_MMD_LEDCH_NACS_NONE | + XWAY_MMD_LEDCH_SBF_F02HZ | + XWAY_MMD_LEDCH_FBF_F16HZ); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCL, + XWAY_MMD_LEDCH_CBLINK_NONE | + XWAY_MMD_LEDCH_SCAN_NONE); /** * In most cases only one LED is connected to this phy, so @@ -183,12 +183,12 @@ static int xway_gphy_config_init(struct phy_device *phydev) ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX; ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT | XWAY_MMD_LEDxL_BLINKS_NONE; - phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh); - phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl); - phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh); - phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl); - phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh); - phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0H, ledxh); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED0L, ledxl); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1H, ledxh); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED1L, ledxl); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh); + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl); return 0; } diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 8c73b2e771ddd7c865900ceed1d9abdcefff8a21..34395230ce709bd68ba93ca46eff089bb576668f 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -1,7 +1,7 @@ /* * Broadcom UniMAC MDIO bus controller driver * - * Copyright (C) 2014, Broadcom Corporation + * Copyright (C) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -228,6 +228,7 @@ static int unimac_mdio_remove(struct platform_device *pdev) } static const struct of_device_id unimac_mdio_ids[] = { + { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, { .compatible = "brcm,genet-mdio-v2", }, diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 61941e29daae85abe7cc8a5726d669eae3d29137..1861f387820d61d527ed552e5ddb24c0009234b3 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -24,10 +24,12 @@ static DEFINE_MUTEX(mdio_board_lock); * @mdiodev: MDIO device pointer * Context: can sleep */ -void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus) +void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, + int (*cb) + (struct mii_bus *bus, + struct mdio_board_info *bi)) { struct mdio_board_entry *be; - struct mdio_device *mdiodev; struct mdio_board_info *bi; int ret; @@ -38,23 +40,14 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus) if (strcmp(bus->id, bi->bus_id)) continue; - mdiodev = mdio_device_create(bus, bi->mdio_addr); - if (IS_ERR(mdiodev)) + ret = cb(bus, bi); + if (ret) continue; - strncpy(mdiodev->modalias, bi->modalias, - sizeof(mdiodev->modalias)); - mdiodev->bus_match = mdio_device_bus_match; - mdiodev->dev.platform_data = (void *)bi->platform_data; - - ret = mdio_device_register(mdiodev); - if (ret) { - mdio_device_free(mdiodev); - continue; - } } mutex_unlock(&mdio_board_lock); } +EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info); /** * mdio_register_board_info - register MDIO devices for a given board diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h index 00f98163e90eff985e24bf95e8fd7293d08b0763..3a7f143904e8c5948f94b1f570b9310368855ac8 100644 --- a/drivers/net/phy/mdio-boardinfo.h +++ b/drivers/net/phy/mdio-boardinfo.h @@ -14,6 +14,9 @@ struct mdio_board_entry { struct mdio_board_info board_info; }; -void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus); +void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, + int (*cb) + (struct mii_bus *bus, + struct mdio_board_info *bi)); #endif /* __MDIO_BOARD_INFO_H */ diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index f095051beb549133db52cf3acd41ad9e3dbed29c..3e2ac07b6e372322c53125014cbac8b40ea1434f 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -229,7 +229,7 @@ static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id, val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | SET_VAL(HSTMIIMWRDAT, data); - xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data); + xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index fa7d51f14869efa8b94ce161e5bd4cb96b4951d3..a898e5c4ef1b465768c3216fc1985382c23eba89 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -22,8 +22,11 @@ #include #include #include +#include +#include #include #include +#include #include #include #include @@ -289,6 +292,36 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, } #endif +/** + * mdiobus_create_device_from_board_info - create a full MDIO device given + * a mdio_board_info structure + * @bus: MDIO bus to create the devices on + * @bi: mdio_board_info structure describing the devices + * + * Returns 0 on success or < 0 on error. + */ +static int mdiobus_create_device(struct mii_bus *bus, + struct mdio_board_info *bi) +{ + struct mdio_device *mdiodev; + int ret = 0; + + mdiodev = mdio_device_create(bus, bi->mdio_addr); + if (IS_ERR(mdiodev)) + return -ENODEV; + + strncpy(mdiodev->modalias, bi->modalias, + sizeof(mdiodev->modalias)); + mdiodev->bus_match = mdio_device_bus_match; + mdiodev->dev.platform_data = (void *)bi->platform_data; + + ret = mdio_device_register(mdiodev); + if (ret) + mdio_device_free(mdiodev); + + return ret; +} + /** * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus @@ -307,6 +340,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) { struct mdio_device *mdiodev; int i, err; + struct gpio_desc *gpiod; if (NULL == bus || NULL == bus->name || NULL == bus->read || NULL == bus->write) @@ -333,6 +367,35 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) if (bus->reset) bus->reset(bus); + /* de-assert bus level PHY GPIO resets */ + if (bus->num_reset_gpios > 0) { + bus->reset_gpiod = devm_kcalloc(&bus->dev, + bus->num_reset_gpios, + sizeof(struct gpio_desc *), + GFP_KERNEL); + if (!bus->reset_gpiod) + return -ENOMEM; + } + + for (i = 0; i < bus->num_reset_gpios; i++) { + gpiod = devm_gpiod_get_index(&bus->dev, "reset", i, + GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + err = PTR_ERR(gpiod); + if (err != -ENOENT) { + dev_err(&bus->dev, + "mii_bus %s couldn't get reset GPIO\n", + bus->id); + return err; + } + } else { + bus->reset_gpiod[i] = gpiod; + gpiod_set_value_cansleep(gpiod, 1); + udelay(bus->reset_delay_us); + gpiod_set_value_cansleep(gpiod, 0); + } + } + for (i = 0; i < PHY_MAX_ADDR; i++) { if ((bus->phy_mask & (1 << i)) == 0) { struct phy_device *phydev; @@ -345,7 +408,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } } - mdiobus_setup_mdiodev_from_board_info(bus); + mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); @@ -360,6 +423,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } + + /* Put PHYs in RESET to save power */ + for (i = 0; i < bus->num_reset_gpios; i++) { + if (bus->reset_gpiod[i]) + gpiod_set_value_cansleep(bus->reset_gpiod[i], 1); + } + device_del(&bus->dev); return err; } @@ -381,6 +451,13 @@ void mdiobus_unregister(struct mii_bus *bus) mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } + + /* Put PHYs in RESET to save power */ + for (i = 0; i < bus->num_reset_gpios; i++) { + if (bus->reset_gpiod[i]) + gpiod_set_value_cansleep(bus->reset_gpiod[i], 1); + } + device_del(&bus->dev); } EXPORT_SYMBOL(mdiobus_unregister); @@ -648,9 +725,18 @@ int __init mdio_bus_init(void) return ret; } +EXPORT_SYMBOL_GPL(mdio_bus_init); +#if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); } +EXPORT_SYMBOL_GPL(mdio_bus_exit); +#else +module_init(mdio_bus_init); +/* no module_exit, intentional */ +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MDIO bus/device layer"); +#endif diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index da5b392683703b9ece67a24ebcb59aadeba7cc8e..6a5fd18f062c4ea400bc8036d787a94fe5108a34 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -626,8 +626,7 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev) * MMD extended PHY registers. */ static int -ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, - int regnum) +ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum) { return -1; } @@ -635,10 +634,10 @@ ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, /* This routine does nothing since the Micrel ksz9021 does not support * standard IEEE MMD extended PHY registers. */ -static void -ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, - int regnum, u32 val) +static int +ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val) { + return -1; } static int kszphy_get_sset_count(struct phy_device *phydev) @@ -946,8 +945,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, - .read_mmd_indirect = ksz9021_rd_mmd_phyreg, - .write_mmd_indirect = ksz9021_wr_mmd_phyreg, + .read_mmd = ksz9021_rd_mmd_phyreg, + .write_mmd = ksz9021_wr_mmd_phyreg, }, { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 324fbf6ad8ff8fcc51e92cd2f4d26399b00dfc41..2b2f543cf9f030dbea5b2c478f6b8930f5fa520e 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -78,9 +78,8 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; /* these values can be used to identify internal PHY */ - priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID, 3); - priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV, - 3); + priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID); + priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV); phydev->priv = priv; diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c new file mode 100644 index 0000000000000000000000000000000000000000..6739b738bbaf3277153015a7a91707c75a92f085 --- /dev/null +++ b/drivers/net/phy/phy-core.c @@ -0,0 +1,101 @@ +/* + * Core PHY library, taken from phy.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include + +static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, + u16 regnum) +{ + /* Write the desired MMD Devad */ + bus->write(bus, phy_addr, MII_MMD_CTRL, devad); + + /* Write the desired MMD register address */ + bus->write(bus, phy_addr, MII_MMD_DATA, regnum); + + /* Select the Function : DATA with no post increment */ + bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR); +} + +/** + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from (0..31) + * @regnum: The register on the MMD to read (0..65535) + * + * Same rules as for phy_read(); + */ +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) +{ + int val; + + if (regnum > (u16)~0 || devad > 32) + return -EINVAL; + + if (phydev->drv->read_mmd) { + val = phydev->drv->read_mmd(phydev, devad, regnum); + } else if (phydev->is_c45) { + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); + + val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); + } else { + struct mii_bus *bus = phydev->mdio.bus; + int phy_addr = phydev->mdio.addr; + + mutex_lock(&bus->mdio_lock); + mmd_phy_indirect(bus, phy_addr, devad, regnum); + + /* Read the content of the MMD's selected register */ + val = bus->read(bus, phy_addr, MII_MMD_DATA); + mutex_unlock(&bus->mdio_lock); + } + return val; +} +EXPORT_SYMBOL(phy_read_mmd); + +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) +{ + int ret; + + if (regnum > (u16)~0 || devad > 32) + return -EINVAL; + + if (phydev->drv->write_mmd) { + ret = phydev->drv->write_mmd(phydev, devad, regnum, val); + } else if (phydev->is_c45) { + u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); + + ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, + addr, val); + } else { + struct mii_bus *bus = phydev->mdio.bus; + int phy_addr = phydev->mdio.addr; + + mutex_lock(&bus->mdio_lock); + mmd_phy_indirect(bus, phy_addr, devad, regnum); + + /* Write the data into MMD's selected register */ + bus->write(bus, phy_addr, MII_MMD_DATA, val); + mutex_unlock(&bus->mdio_lock); + + ret = 0; + } + return ret; +} +EXPORT_SYMBOL(phy_write_mmd); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 97ff1278167bc455af890e9b6c16d7b9d659ea57..82ab8fb82587553fefc1d05bbff06d4a76bc9679 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -50,8 +50,22 @@ static const char *phy_speed_to_str(int speed) return "1Gbps"; case SPEED_2500: return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; case SPEED_10000: return "10Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; + case SPEED_100000: + return "100Gbps"; case SPEED_UNKNOWN: return "Unknown"; default: @@ -162,7 +176,9 @@ struct phy_setting { u32 setting; }; -/* A mapping of all SUPPORTED settings to speed/duplex */ +/* A mapping of all SUPPORTED settings to speed/duplex. This table + * must be grouped by speed and sorted in descending match priority + * - iow, descending speed. */ static const struct phy_setting settings[] = { { .speed = SPEED_10000, @@ -221,45 +237,70 @@ static const struct phy_setting settings[] = { }, }; -#define MAX_NUM_SETTINGS ARRAY_SIZE(settings) - /** - * phy_find_setting - find a PHY settings array entry that matches speed & duplex + * phy_lookup_setting - lookup a PHY setting * @speed: speed to match * @duplex: duplex to match + * @feature: allowed link modes + * @exact: an exact match is required + * + * Search the settings array for a setting that matches the speed and + * duplex, and which is supported. * - * Description: Searches the settings array for the setting which - * matches the desired speed and duplex, and returns the index - * of that setting. Returns the index of the last setting if - * none of the others match. + * If @exact is unset, either an exact match or %NULL for no match will + * be returned. + * + * If @exact is set, an exact match, the fastest supported setting at + * or below the specified speed, the slowest supported setting, or if + * they all fail, %NULL will be returned. */ -static inline unsigned int phy_find_setting(int speed, int duplex) +static const struct phy_setting * +phy_lookup_setting(int speed, int duplex, u32 features, bool exact) { - unsigned int idx = 0; + const struct phy_setting *p, *match = NULL, *last = NULL; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->setting & features) { + last = p; + if (p->speed == speed && p->duplex == duplex) { + /* Exact match for speed and duplex */ + match = p; + break; + } else if (!exact) { + if (!match && p->speed <= speed) + /* Candidate */ + match = p; + + if (p->speed < speed) + break; + } + } + } - while (idx < ARRAY_SIZE(settings) && - (settings[idx].speed != speed || settings[idx].duplex != duplex)) - idx++; + if (!match && !exact) + match = last; - return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; + return match; } /** - * phy_find_valid - find a PHY setting that matches the requested features mask - * @idx: The first index in settings[] to search - * @features: A mask of the valid settings + * phy_find_valid - find a PHY setting that matches the requested parameters + * @speed: desired speed + * @duplex: desired duplex + * @supported: mask of supported link modes * - * Description: Returns the index of the first valid setting less - * than or equal to the one pointed to by idx, as determined by - * the mask in features. Returns the index of the last setting - * if nothing else matches. + * Locate a supported phy setting that is, in priority order: + * - an exact match for the specified speed and duplex mode + * - a match for the specified speed, or slower speed + * - the slowest supported speed + * Returns the matched phy_setting entry, or %NULL if no supported phy + * settings were found. */ -static inline unsigned int phy_find_valid(unsigned int idx, u32 features) +static const struct phy_setting * +phy_find_valid(int speed, int duplex, u32 supported) { - while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) - idx++; - - return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; + return phy_lookup_setting(speed, duplex, supported, false); } /** @@ -279,20 +320,11 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int count = 0; unsigned int idx = 0; - while (idx < MAX_NUM_SETTINGS && count < size) { - idx = phy_find_valid(idx, phy->supported); - - if (!(settings[idx].setting & phy->supported)) - break; - + for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) /* Assumes settings are grouped by speed */ - if ((count == 0) || - (speeds[count - 1] != settings[idx].speed)) { - speeds[count] = settings[idx].speed; - count++; - } - idx++; - } + if ((settings[idx].setting & phy->supported) && + (count == 0 || speeds[count - 1] != settings[idx].speed)) + speeds[count++] = settings[idx].speed; return count; } @@ -308,12 +340,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy, */ static inline bool phy_check_valid(int speed, int duplex, u32 features) { - unsigned int idx; - - idx = phy_find_valid(phy_find_setting(speed, duplex), features); - - return settings[idx].speed == speed && settings[idx].duplex == duplex && - (settings[idx].setting & features); + return !!phy_lookup_setting(speed, duplex, features, true); } /** @@ -326,18 +353,22 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features) */ static void phy_sanitize_settings(struct phy_device *phydev) { + const struct phy_setting *setting; u32 features = phydev->supported; - unsigned int idx; /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) phydev->autoneg = AUTONEG_DISABLE; - idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), - features); - - phydev->speed = settings[idx].speed; - phydev->duplex = settings[idx].duplex; + setting = phy_find_valid(phydev->speed, phydev->duplex, features); + if (setting) { + phydev->speed = setting->speed; + phydev->duplex = setting->duplex; + } else { + /* We failed to find anything (no supported speeds?) */ + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + } } /** @@ -1224,91 +1255,6 @@ void phy_mac_interrupt(struct phy_device *phydev, int new_link) } EXPORT_SYMBOL(phy_mac_interrupt); -static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, - int addr) -{ - /* Write the desired MMD Devad */ - bus->write(bus, addr, MII_MMD_CTRL, devad); - - /* Write the desired MMD register address */ - bus->write(bus, addr, MII_MMD_DATA, prtad); - - /* Select the Function : DATA with no post increment */ - bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); -} - -/** - * phy_read_mmd_indirect - reads data from the MMD registers - * @phydev: The PHY device bus - * @prtad: MMD Address - * @devad: MMD DEVAD - * - * Description: it reads data from the MMD registers (clause 22 to access to - * clause 45) of the specified phy address. - * To read these register we have: - * 1) Write reg 13 // DEVAD - * 2) Write reg 14 // MMD Address - * 3) Write reg 13 // MMD Data Command for MMD DEVAD - * 3) Read reg 14 // Read MMD data - */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad) -{ - struct phy_driver *phydrv = phydev->drv; - int addr = phydev->mdio.addr; - int value = -1; - - if (!phydrv->read_mmd_indirect) { - struct mii_bus *bus = phydev->mdio.bus; - - mutex_lock(&bus->mdio_lock); - mmd_phy_indirect(bus, prtad, devad, addr); - - /* Read the content of the MMD's selected register */ - value = bus->read(bus, addr, MII_MMD_DATA); - mutex_unlock(&bus->mdio_lock); - } else { - value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr); - } - return value; -} -EXPORT_SYMBOL(phy_read_mmd_indirect); - -/** - * phy_write_mmd_indirect - writes data to the MMD registers - * @phydev: The PHY device - * @prtad: MMD Address - * @devad: MMD DEVAD - * @data: data to write in the MMD register - * - * Description: Write data from the MMD registers of the specified - * phy address. - * To write these register we have: - * 1) Write reg 13 // DEVAD - * 2) Write reg 14 // MMD Address - * 3) Write reg 13 // MMD Data Command for MMD DEVAD - * 3) Write reg 14 // Write MMD data - */ -void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, u32 data) -{ - struct phy_driver *phydrv = phydev->drv; - int addr = phydev->mdio.addr; - - if (!phydrv->write_mmd_indirect) { - struct mii_bus *bus = phydev->mdio.bus; - - mutex_lock(&bus->mdio_lock); - mmd_phy_indirect(bus, prtad, devad, addr); - - /* Write the data into MMD's selected register */ - bus->write(bus, addr, MII_MMD_DATA, data); - mutex_unlock(&bus->mdio_lock); - } else { - phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); - } -} -EXPORT_SYMBOL(phy_write_mmd_indirect); - /** * phy_init_eee - init and check the EEE feature * @phydev: target phy_device struct @@ -1325,15 +1271,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) return -EIO; /* According to 802.3az,the EEE is supported only in full duplex-mode. - * Also EEE feature is active when core is operating with MII, GMII - * or RGMII (all kinds). Internal PHYs are also allowed to proceed and - * should return an error if they do not support EEE. */ - if ((phydev->duplex == DUPLEX_FULL) && - ((phydev->interface == PHY_INTERFACE_MODE_MII) || - (phydev->interface == PHY_INTERFACE_MODE_GMII) || - phy_interface_is_rgmii(phydev) || - phy_is_internal(phydev))) { + if (phydev->duplex == DUPLEX_FULL) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; int status; @@ -1344,8 +1283,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) return status; /* First check if the EEE ability is supported */ - eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, - MDIO_MMD_PCS); + eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); if (eee_cap <= 0) goto eee_exit_err; @@ -1356,13 +1294,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* Check which link settings negotiated and verify it in * the EEE advertising registers. */ - eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, - MDIO_MMD_AN); + eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); if (eee_lp <= 0) goto eee_exit_err; - eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, - MDIO_MMD_AN); + eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); if (eee_adv <= 0) goto eee_exit_err; @@ -1375,14 +1311,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* Configure the PHY to stop receiving xMII * clock while it is signaling LPI. */ - int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS); + int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); if (val < 0) return val; val |= MDIO_PCS_CTRL1_CLKSTOP_EN; - phy_write_mmd_indirect(phydev, MDIO_CTRL1, - MDIO_MMD_PCS, val); + phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val); } return 0; /* EEE supported */ @@ -1404,7 +1338,7 @@ int phy_get_eee_err(struct phy_device *phydev) if (!phydev->drv) return -EIO; - return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); + return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR); } EXPORT_SYMBOL(phy_get_eee_err); @@ -1424,19 +1358,19 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) return -EIO; /* Get Supported EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); if (val < 0) return val; data->supported = mmd_eee_cap_to_ethtool_sup_t(val); /* Get advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); if (val < 0) return val; data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); /* Get LP advertisement EEE */ - val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN); + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); if (val < 0) return val; data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); @@ -1454,15 +1388,37 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); */ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { - int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + int cap, old_adv, adv, ret; if (!phydev->drv) return -EIO; + /* Get Supported EEE */ + cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + if (cap < 0) + return cap; + + old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); + if (old_adv < 0) + return old_adv; + + adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; + /* Mask prohibited EEE modes */ - val &= ~phydev->eee_broken_modes; + adv &= ~phydev->eee_broken_modes; - phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); + if (old_adv != adv) { + ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); + if (ret < 0) + return ret; + + /* Restart autonegotiation so the new modes get sent to the + * link partner. + */ + ret = genphy_restart_aneg(phydev); + if (ret < 0) + return ret; + } return 0; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 5198ccfa347f8b4bfb5ee5e0c69ee12fb44ec681..1219eeab69d1ff4b94fceb942e38c9b9e989ae40 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1217,7 +1217,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev) * supported by the phy. If we read 0, EEE is not advertised * In both case, we don't need to continue */ - adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); + adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); if (adv <= 0) return 0; @@ -1228,7 +1228,7 @@ static int genphy_config_eee_advert(struct phy_device *phydev) if (old_adv == adv) return 0; - phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv); + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); return 1; } diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index fb32eaf2255d84a7de2842f60f91b52cf17c1717..cef6967b039617fdd909e783ca479a8241eebd88 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 85c01247f2e3858f33e901de1a37d3eaae0941d7..6c5d5ef46f75aa9a9089ac80bbee30a7f579b016 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2476,7 +2476,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) goto team_put; } err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, - nl_option, team_nl_option_policy); + nl_option, team_nl_option_policy, + info->extack); if (err) goto team_put; if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cc88cd7856f5e5ec4d3e3e309cbefe196c5b27ec..bbd707b9ef7a6a305804ed0d56c3fc0e1db7d565 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2444,18 +2444,16 @@ static struct miscdevice tun_miscdev = { /* ethtool interface */ -static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - cmd->supported = 0; - cmd->advertising = 0; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_TP; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; +static int tun_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_TP; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -2518,7 +2516,6 @@ static int tun_set_coalesce(struct net_device *dev, } static const struct ethtool_ops tun_ethtool_ops = { - .get_settings = tun_get_settings, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, @@ -2526,6 +2523,7 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, + .get_link_ksettings = tun_get_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 0dd510604118bc8c26c5ec9a84410edbe16a4d8d..a3aa0a27dfe56b22121a0571cc4eaca1b2bbee03 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -136,9 +136,9 @@ static const struct ethtool_ops ax88172_ethtool_ops = { .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static void ax88172_set_multicast(struct net_device *net) @@ -206,6 +206,7 @@ static const struct net_device_ops ax88172_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = asix_ioctl, @@ -301,9 +302,9 @@ static const struct ethtool_ops ax88772_ethtool_ops = { .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static int ax88772_link_reset(struct usbnet *dev) @@ -591,6 +592,7 @@ static const struct net_device_ops ax88772_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = asix_ioctl, @@ -775,9 +777,9 @@ static const struct ethtool_ops ax88178_ethtool_ops = { .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static int marvell_phy_init(struct usbnet *dev) @@ -1044,6 +1046,7 @@ static const struct net_device_ops ax88178_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 6308386b09dfeafcb12c7912b06e8acf6aaff69b..501576f538546392381471da43f0d2897df243bd 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -143,6 +143,7 @@ static const struct net_device_ops ax88172a_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = ax88172a_ioctl, diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index a3a7db0702d8d7ece5d4999ce2426bef316f0e06..51cf60092a18e33924f52c568d91a33f30828b21 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -620,16 +620,18 @@ ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom, return 0; } -static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int ax88179_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); - return mii_ethtool_gset(&dev->mii, cmd); + return mii_ethtool_get_link_ksettings(&dev->mii, cmd); } -static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd) +static int ax88179_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); - return mii_ethtool_sset(&dev->mii, cmd); + return mii_ethtool_set_link_ksettings(&dev->mii, cmd); } static int @@ -826,11 +828,11 @@ static const struct ethtool_ops ax88179_ethtool_ops = { .set_wol = ax88179_set_wol, .get_eeprom_len = ax88179_get_eeprom_len, .get_eeprom = ax88179_get_eeprom, - .get_settings = ax88179_get_settings, - .set_settings = ax88179_set_settings, .get_eee = ax88179_get_eee, .set_eee = ax88179_set_eee, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = ax88179_get_link_ksettings, + .set_link_ksettings = ax88179_set_link_ksettings, }; static void ax88179_set_multicast(struct net_device *net) @@ -957,6 +959,7 @@ static const struct net_device_ops ax88179_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_change_mtu = ax88179_change_mtu, .ndo_set_mac_address = ax88179_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 0acc9b640419a2e94bc9a2d3d43a5a2a65800c8b..fce92f0e5abd56ba863a297ebb9eb6ea25ff37b1 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -688,29 +688,34 @@ static void catc_get_drvinfo(struct net_device *dev, usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); } -static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int catc_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; - cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; - cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = PORT_TP; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = PORT_TP; + cmd->base.phy_address = 0; + cmd->base.autoneg = AUTONEG_DISABLE; + return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, - .get_settings = catc_get_settings, - .get_link = ethtool_op_get_link + .get_link = ethtool_op_get_link, + .get_link_ksettings = catc_get_link_ksettings, }; /* diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 3a98f3762a4c81debc023d04b9c01876c6cbdc5f..a6b997cffd3b6e4c4f039a3d3dee5f386bb17e48 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -100,6 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f317984f75360141ee606b8ca31b78baa853a1d5..bb3f71f9fbde06ef1a8d514dbb856c8aa7ff5130 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -131,8 +131,6 @@ static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 s static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx); static const struct ethtool_ops cdc_ncm_ethtool_ops = { - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, @@ -142,6 +140,8 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = { .get_sset_count = cdc_ncm_get_sset_count, .get_strings = cdc_ncm_get_strings, .get_ethtool_stats = cdc_ncm_get_ethtool_stats, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx) @@ -753,6 +753,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 0b4bdd39106b0a73e954070a42ac87be22ac1821..b91f92e4e5f22d659d89c35d8accc6ef03191b74 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -281,9 +281,9 @@ static const struct ethtool_ops dm9601_ethtool_ops = { .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = dm9601_get_eeprom_len, .get_eeprom = dm9601_get_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static void dm9601_set_multicast(struct net_device *net) @@ -343,6 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = dm9601_ioctl, .ndo_set_rx_mode = dm9601_set_multicast, diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c index 4ff70b22c6eec0e0516373b02b66c71fae158028..5a43b77a6b9c60ef4f2b667ae0ffaa86aa65d4bd 100644 --- a/drivers/net/usb/int51x1.c +++ b/drivers/net/usb/int51x1.c @@ -144,6 +144,7 @@ static const struct net_device_ops int51x1_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = int51x1_set_multicast, diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 2a2c3edb6bad0b3bd257c3a101d100ad3b00cc59..37fb621fde86cb6f483151c4ffa54e76ff0bd319 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -245,8 +245,6 @@ struct kaweth_device __u16 packet_filter_bitmap; struct kaweth_ethernet_configuration configuration; - - struct net_device_stats stats; }; /**************************************************************** @@ -598,7 +596,7 @@ static void kaweth_usb_receive(struct urb *urb) struct sk_buff *skb; if (unlikely(status == -EPIPE)) { - kaweth->stats.rx_errors++; + net->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -EPIPE.\n"); @@ -613,12 +611,12 @@ static void kaweth_usb_receive(struct urb *urb) } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { - kaweth->stats.rx_errors++; + net->stats.rx_errors++; dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n"); return; } if (unlikely(status == -EOVERFLOW)) { - kaweth->stats.rx_errors++; + net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } spin_lock(&kaweth->device_lock); @@ -663,8 +661,8 @@ static void kaweth_usb_receive(struct urb *urb) netif_rx(skb); - kaweth->stats.rx_packets++; - kaweth->stats.rx_bytes += pkt_len; + net->stats.rx_packets++; + net->stats.rx_bytes += pkt_len; } kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); @@ -804,7 +802,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, /* We now decide whether we can put our special header into the sk_buff */ if (skb_cow_head(skb, 2)) { - kaweth->stats.tx_errors++; + net->stats.tx_errors++; netif_start_queue(net); spin_unlock_irq(&kaweth->device_lock); dev_kfree_skb_any(skb); @@ -828,15 +826,15 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, { dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res); skip: - kaweth->stats.tx_errors++; + net->stats.tx_errors++; netif_start_queue(net); dev_kfree_skb_irq(skb); } else { - kaweth->stats.tx_packets++; - kaweth->stats.tx_bytes += skb->len; + net->stats.tx_packets++; + net->stats.tx_bytes += skb->len; } spin_unlock_irq(&kaweth->device_lock); @@ -905,15 +903,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth) } } -/**************************************************************** - * kaweth_netdev_stats - ****************************************************************/ -static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev) -{ - struct kaweth_device *kaweth = netdev_priv(dev); - return &kaweth->stats; -} - /**************************************************************** * kaweth_tx_timeout ****************************************************************/ @@ -922,7 +911,7 @@ static void kaweth_tx_timeout(struct net_device *net) struct kaweth_device *kaweth = netdev_priv(net); dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); - kaweth->stats.tx_errors++; + net->stats.tx_errors++; netif_trans_update(net); usb_unlink_urb(kaweth->tx_urb); @@ -975,7 +964,6 @@ static const struct net_device_ops kaweth_netdev_ops = { .ndo_start_xmit = kaweth_start_xmit, .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_rx_mode = kaweth_set_rx_mode, - .ndo_get_stats = kaweth_netdev_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 636f48f19d1eacae67c050de4fc3e651bffdf825..9eff97a650ae5e3056f20e8c0f1bb717df4773ab 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1952,10 +1953,10 @@ static int lan8835_fixup(struct phy_device *phydev) struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ - buf = phy_read_mmd_indirect(phydev, 0x8010, 3); + buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010); buf &= ~0x1800; buf |= 0x0800; - phy_write_mmd_indirect(phydev, 0x8010, 3, buf); + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); /* RGMII MAC TXC Delay Enable */ ret = lan78xx_write_reg(dev, MAC_RGMII_ID, @@ -1975,11 +1976,11 @@ static int ksz9031rnx_fixup(struct phy_device *phydev) /* Micrel9301RNX PHY configuration */ /* RGMII Control Signal Pad Skew */ - phy_write_mmd_indirect(phydev, 4, 2, 0x0077); + phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077); /* RGMII RX Data Pad Skew */ - phy_write_mmd_indirect(phydev, 5, 2, 0x7777); + phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777); /* RGMII RX Clock Pad Skew */ - phy_write_mmd_indirect(phydev, 8, 2, 0x1FF); + phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF); dev->interface = PHY_INTERFACE_MODE_RGMII_RXID; diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 4f345bd4e6e29558daf29c3d472d2c0768c3202f..5a47e5510ca8243eed8f12cc86a4ec70ac42658e 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -464,9 +464,9 @@ static const struct ethtool_ops mcs7830_ethtool_ops = { .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static const struct net_device_ops mcs7830_netdev_ops = { @@ -475,6 +475,7 @@ static const struct net_device_ops mcs7830_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = mcs7830_ioctl, .ndo_set_rx_mode = mcs7830_set_multicast, diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36674484c6fb9b73011619824f7bc60c50b9c1ad..6514c86f043eeb8777354094c4d2fd9f2ffd9c9d 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -501,13 +501,13 @@ static void read_bulk_callback(struct urb *urb) if (rx_status & 0x1e) { netif_dbg(pegasus, rx_err, net, "RX packet error %x\n", rx_status); - pegasus->stats.rx_errors++; + net->stats.rx_errors++; if (rx_status & 0x06) /* long or runt */ - pegasus->stats.rx_length_errors++; + net->stats.rx_length_errors++; if (rx_status & 0x08) - pegasus->stats.rx_crc_errors++; + net->stats.rx_crc_errors++; if (rx_status & 0x10) /* extra bits */ - pegasus->stats.rx_frame_errors++; + net->stats.rx_frame_errors++; goto goon; } if (pegasus->chip == 0x8513) { @@ -535,8 +535,8 @@ static void read_bulk_callback(struct urb *urb) skb_put(pegasus->rx_skb, pkt_len); pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net); netif_rx(pegasus->rx_skb); - pegasus->stats.rx_packets++; - pegasus->stats.rx_bytes += pkt_len; + net->stats.rx_packets++; + net->stats.rx_bytes += pkt_len; if (pegasus->flags & PEGASUS_UNPLUG) return; @@ -670,13 +670,13 @@ static void intr_callback(struct urb *urb) /* byte 0 == tx_status1, reg 2B */ if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL |LATE_COL|JABBER_TIMEOUT)) { - pegasus->stats.tx_errors++; + net->stats.tx_errors++; if (d[0] & TX_UNDERRUN) - pegasus->stats.tx_fifo_errors++; + net->stats.tx_fifo_errors++; if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT)) - pegasus->stats.tx_aborted_errors++; + net->stats.tx_aborted_errors++; if (d[0] & LATE_COL) - pegasus->stats.tx_window_errors++; + net->stats.tx_window_errors++; } /* d[5].LINK_STATUS lies on some adapters. @@ -685,7 +685,7 @@ static void intr_callback(struct urb *urb) */ /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ - pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; + net->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; } res = usb_submit_urb(urb, GFP_ATOMIC); @@ -701,7 +701,7 @@ static void pegasus_tx_timeout(struct net_device *net) pegasus_t *pegasus = netdev_priv(net); netif_warn(pegasus, timer, net, "tx timeout\n"); usb_unlink_urb(pegasus->tx_urb); - pegasus->stats.tx_errors++; + net->stats.tx_errors++; } static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb, @@ -731,23 +731,18 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb, netif_device_detach(pegasus->net); break; default: - pegasus->stats.tx_errors++; + net->stats.tx_errors++; netif_start_queue(net); } } else { - pegasus->stats.tx_packets++; - pegasus->stats.tx_bytes += skb->len; + net->stats.tx_packets++; + net->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); return NETDEV_TX_OK; } -static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev) -{ - return &((pegasus_t *) netdev_priv(dev))->stats; -} - static inline void disable_net_traffic(pegasus_t *pegasus) { __le16 tmp = cpu_to_le16(0); @@ -953,20 +948,22 @@ static inline void pegasus_reset_wol(struct net_device *dev) } static int -pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +pegasus_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *ecmd) { pegasus_t *pegasus; pegasus = netdev_priv(dev); - mii_ethtool_gset(&pegasus->mii, ecmd); + mii_ethtool_get_link_ksettings(&pegasus->mii, ecmd); return 0; } static int -pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +pegasus_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *ecmd) { pegasus_t *pegasus = netdev_priv(dev); - return mii_ethtool_sset(&pegasus->mii, ecmd); + return mii_ethtool_set_link_ksettings(&pegasus->mii, ecmd); } static int pegasus_nway_reset(struct net_device *dev) @@ -995,14 +992,14 @@ static void pegasus_set_msglevel(struct net_device *dev, u32 v) static const struct ethtool_ops ops = { .get_drvinfo = pegasus_get_drvinfo, - .get_settings = pegasus_get_settings, - .set_settings = pegasus_set_settings, .nway_reset = pegasus_nway_reset, .get_link = pegasus_get_link, .get_msglevel = pegasus_get_msglevel, .set_msglevel = pegasus_set_msglevel, .get_wol = pegasus_get_wol, .set_wol = pegasus_set_wol, + .get_link_ksettings = pegasus_get_link_ksettings, + .set_link_ksettings = pegasus_set_link_ksettings, }; static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) @@ -1292,7 +1289,6 @@ static const struct net_device_ops pegasus_netdev_ops = { .ndo_do_ioctl = pegasus_ioctl, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, - .ndo_get_stats = pegasus_netdev_stats, .ndo_tx_timeout = pegasus_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index d15646244fdf06b7b474bff96922696e79caf230..9b7ea9c9167d6074abf5b883cfce955075d4c90b 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h @@ -83,7 +83,6 @@ typedef struct pegasus { struct usb_device *usb; struct usb_interface *intf; struct net_device *net; - struct net_device_stats stats; struct mii_if_info mii; unsigned flags; unsigned features; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2474618404f5e592c0fe56d38c30c8988e1ed8ef..a3ed8115747c37e4f8639db60a90aa5aa1a60dda 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -58,12 +58,198 @@ struct qmi_wwan_state { enum qmi_wwan_flags { QMI_WWAN_FLAG_RAWIP = 1 << 0, + QMI_WWAN_FLAG_MUX = 1 << 1, }; enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ }; +struct qmimux_hdr { + u8 pad; + u8 mux_id; + __be16 pkt_len; +}; + +struct qmimux_priv { + struct net_device *real_dev; + u8 mux_id; +}; + +static int qmimux_open(struct net_device *dev) +{ + struct qmimux_priv *priv = netdev_priv(dev); + struct net_device *real_dev = priv->real_dev; + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + return 0; +} + +static int qmimux_stop(struct net_device *dev) +{ + netif_carrier_off(dev); + return 0; +} + +static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct qmimux_priv *priv = netdev_priv(dev); + unsigned int len = skb->len; + struct qmimux_hdr *hdr; + + hdr = (struct qmimux_hdr *)skb_push(skb, sizeof(struct qmimux_hdr)); + hdr->pad = 0; + hdr->mux_id = priv->mux_id; + hdr->pkt_len = cpu_to_be16(len); + skb->dev = priv->real_dev; + return dev_queue_xmit(skb); +} + +static const struct net_device_ops qmimux_netdev_ops = { + .ndo_open = qmimux_open, + .ndo_stop = qmimux_stop, + .ndo_start_xmit = qmimux_start_xmit, +}; + +static void qmimux_setup(struct net_device *dev) +{ + dev->header_ops = NULL; /* No header */ + dev->type = ARPHRD_NONE; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->netdev_ops = &qmimux_netdev_ops; + dev->destructor = free_netdev; +} + +static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) +{ + struct qmimux_priv *priv; + struct list_head *iter; + struct net_device *ldev; + + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) { + priv = netdev_priv(ldev); + if (priv->mux_id == mux_id) { + rcu_read_unlock(); + return ldev; + } + } + rcu_read_unlock(); + return NULL; +} + +static bool qmimux_has_slaves(struct usbnet *dev) +{ + return !list_empty(&dev->net->adj_list.upper); +} + +static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + unsigned int len, offset = sizeof(struct qmimux_hdr); + struct qmimux_hdr *hdr; + struct net_device *net; + struct sk_buff *skbn; + + while (offset < skb->len) { + hdr = (struct qmimux_hdr *)skb->data; + len = be16_to_cpu(hdr->pkt_len); + + /* drop the packet, bogus length */ + if (offset + len > skb->len) + return 0; + + /* control packet, we do not know what to do */ + if (hdr->pad & 0x80) + goto skip; + + net = qmimux_find_dev(dev, hdr->mux_id); + if (!net) + goto skip; + skbn = netdev_alloc_skb(net, len); + if (!skbn) + return 0; + skbn->dev = net; + + switch (skb->data[offset] & 0xf0) { + case 0x40: + skbn->protocol = htons(ETH_P_IP); + break; + case 0x60: + skbn->protocol = htons(ETH_P_IPV6); + break; + default: + /* not ip - do not know what to do */ + goto skip; + } + + memcpy(skb_put(skbn, len), skb->data + offset, len); + if (netif_rx(skbn) != NET_RX_SUCCESS) + return 0; + +skip: + offset += len + sizeof(struct qmimux_hdr); + } + return 1; +} + +static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) +{ + struct net_device *new_dev; + struct qmimux_priv *priv; + int err; + + new_dev = alloc_netdev(sizeof(struct qmimux_priv), + "qmimux%d", NET_NAME_UNKNOWN, qmimux_setup); + if (!new_dev) + return -ENOBUFS; + + dev_net_set(new_dev, dev_net(real_dev)); + priv = netdev_priv(new_dev); + priv->mux_id = mux_id; + priv->real_dev = real_dev; + + err = register_netdevice(new_dev); + if (err < 0) + goto out_free_newdev; + + /* Account for reference in struct qmimux_priv_priv */ + dev_hold(real_dev); + + err = netdev_upper_dev_link(real_dev, new_dev); + if (err) + goto out_unregister_netdev; + + netif_stacked_transfer_operstate(real_dev, new_dev); + + return 0; + +out_unregister_netdev: + unregister_netdevice(new_dev); + dev_put(real_dev); + +out_free_newdev: + free_netdev(new_dev); + return err; +} + +static void qmimux_unregister_device(struct net_device *dev) +{ + struct qmimux_priv *priv = netdev_priv(dev); + struct net_device *real_dev = priv->real_dev; + + netdev_upper_dev_unlink(real_dev, dev); + unregister_netdevice(dev); + + /* Get rid of the reference to real_dev */ + dev_put(real_dev); +} + static void qmi_wwan_netdev_setup(struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -137,10 +323,114 @@ static ssize_t raw_ip_store(struct device *d, struct device_attribute *attr, co return ret; } +static ssize_t add_mux_show(struct device *d, struct device_attribute *attr, char *buf) +{ + struct net_device *dev = to_net_dev(d); + struct qmimux_priv *priv; + struct list_head *iter; + struct net_device *ldev; + ssize_t count = 0; + + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(dev, ldev, iter) { + priv = netdev_priv(ldev); + count += scnprintf(&buf[count], PAGE_SIZE - count, + "0x%02x\n", priv->mux_id); + } + rcu_read_unlock(); + return count; +} + +static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct qmi_wwan_state *info = (void *)&dev->data; + u8 mux_id; + int ret; + + if (kstrtou8(buf, 0, &mux_id)) + return -EINVAL; + + /* mux_id [1 - 0x7f] range empirically found */ + if (mux_id < 1 || mux_id > 0x7f) + return -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (qmimux_find_dev(dev, mux_id)) { + netdev_err(dev->net, "mux_id already present\n"); + ret = -EINVAL; + goto err; + } + + /* we don't want to modify a running netdev */ + if (netif_running(dev->net)) { + netdev_err(dev->net, "Cannot change a running device\n"); + ret = -EBUSY; + goto err; + } + + ret = qmimux_register_device(dev->net, mux_id); + if (!ret) { + info->flags |= QMI_WWAN_FLAG_MUX; + ret = len; + } +err: + rtnl_unlock(); + return ret; +} + +static ssize_t del_mux_show(struct device *d, struct device_attribute *attr, char *buf) +{ + return add_mux_show(d, attr, buf); +} + +static ssize_t del_mux_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len) +{ + struct usbnet *dev = netdev_priv(to_net_dev(d)); + struct qmi_wwan_state *info = (void *)&dev->data; + struct net_device *del_dev; + u8 mux_id; + int ret = 0; + + if (kstrtou8(buf, 0, &mux_id)) + return -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + /* we don't want to modify a running netdev */ + if (netif_running(dev->net)) { + netdev_err(dev->net, "Cannot change a running device\n"); + ret = -EBUSY; + goto err; + } + + del_dev = qmimux_find_dev(dev, mux_id); + if (!del_dev) { + netdev_err(dev->net, "mux_id not present\n"); + ret = -EINVAL; + goto err; + } + qmimux_unregister_device(del_dev); + + if (!qmimux_has_slaves(dev)) + info->flags &= ~QMI_WWAN_FLAG_MUX; + ret = len; +err: + rtnl_unlock(); + return ret; +} + static DEVICE_ATTR_RW(raw_ip); +static DEVICE_ATTR_RW(add_mux); +static DEVICE_ATTR_RW(del_mux); static struct attribute *qmi_wwan_sysfs_attrs[] = { &dev_attr_raw_ip.attr, + &dev_attr_add_mux.attr, + &dev_attr_del_mux.attr, NULL, }; @@ -184,6 +474,9 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (skb->len < dev->net->hard_header_len) return 0; + if (info->flags & QMI_WWAN_FLAG_MUX) + return qmimux_rx_fixup(dev, skb); + switch (skb->data[0] & 0xf0) { case 0x40: proto = htons(ETH_P_IP); @@ -249,6 +542,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = qmi_wwan_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1036,11 +1330,33 @@ static int qmi_wwan_probe(struct usb_interface *intf, return usbnet_probe(intf, id); } +static void qmi_wwan_disconnect(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + struct list_head *iter; + struct net_device *ldev; + + if (info->flags & QMI_WWAN_FLAG_MUX) { + if (!rtnl_trylock()) { + restart_syscall(); + return; + } + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) + qmimux_unregister_device(ldev); + rcu_read_unlock(); + rtnl_unlock(); + info->flags &= ~QMI_WWAN_FLAG_MUX; + } + usbnet_disconnect(intf); +} + static struct usb_driver qmi_wwan_driver = { .name = "qmi_wwan", .id_table = products, .probe = qmi_wwan_probe, - .disconnect = usbnet_disconnect, + .disconnect = qmi_wwan_disconnect, .suspend = qmi_wwan_suspend, .resume = qmi_wwan_resume, .reset_resume = qmi_wwan_resume, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 07f788c49d573fe9d4dc15e24b8f29449b4ecbe2..ddc62cb69be828a730e6ed32ecc9ee951fef8d3b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1766,6 +1766,7 @@ static int rx_bottom(struct r8152 *tp, int budget) unsigned long flags; struct list_head *cursor, *next, rx_queue; int ret = 0, work_done = 0; + struct napi_struct *napi = &tp->napi; if (!skb_queue_empty(&tp->rx_queue)) { while (work_done < budget) { @@ -1778,7 +1779,7 @@ static int rx_bottom(struct r8152 *tp, int budget) break; pkt_len = skb->len; - napi_gro_receive(&tp->napi, skb); + napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; @@ -1828,7 +1829,7 @@ static int rx_bottom(struct r8152 *tp, int budget) pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc); - skb = napi_alloc_skb(&tp->napi, pkt_len); + skb = napi_alloc_skb(napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@ -1840,7 +1841,7 @@ static int rx_bottom(struct r8152 *tp, int budget) skb->protocol = eth_type_trans(skb, netdev); rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { - napi_gro_receive(&tp->napi, skb); + napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; @@ -3156,6 +3157,7 @@ static bool rtl8153_in_nway(struct r8152 *tp) static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; + struct napi_struct *napi = &tp->napi; u8 speed; speed = rtl8152_get_speed(tp); @@ -3165,7 +3167,7 @@ static void set_carrier(struct r8152 *tp) tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); netif_stop_queue(netdev); - napi_disable(&tp->napi); + napi_disable(napi); netif_carrier_on(netdev); rtl_start_rx(tp); napi_enable(&tp->napi); @@ -3178,9 +3180,9 @@ static void set_carrier(struct r8152 *tp) } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); - napi_disable(&tp->napi); + napi_disable(napi); tp->rtl_ops.disable(tp); - napi_enable(&tp->napi); + napi_enable(napi); netif_info(tp, link, netdev, "carrier off\n"); } } @@ -3642,11 +3644,13 @@ static int rtl8152_runtime_suspend(struct r8152 *tp) tp->rtl_ops.autosuspend_en(tp, true); if (netif_carrier_ok(netdev)) { - napi_disable(&tp->napi); + struct napi_struct *napi = &tp->napi; + + napi_disable(napi); rtl_stop_rx(tp); rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); - napi_enable(&tp->napi); + napi_enable(napi); } } @@ -3662,12 +3666,14 @@ static int rtl8152_system_suspend(struct r8152 *tp) netif_device_detach(netdev); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { + struct napi_struct *napi = &tp->napi; + clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); - napi_disable(&tp->napi); + napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); - napi_enable(&tp->napi); + napi_enable(napi); } return ret; @@ -3693,45 +3699,46 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) static int rtl8152_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev = tp->netdev; mutex_lock(&tp->control); if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - netif_device_attach(tp->netdev); + netif_device_attach(netdev); } - if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { + if (netif_running(netdev) && netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + struct napi_struct *napi = &tp->napi; + tp->rtl_ops.autosuspend_en(tp, false); - napi_disable(&tp->napi); + napi_disable(napi); set_bit(WORK_ENABLE, &tp->flags); - - if (netif_carrier_ok(tp->netdev)) { + if (netif_carrier_ok(netdev)) { if (rtl8152_get_speed(tp) & LINK_STATUS) { rtl_start_rx(tp); } else { - netif_carrier_off(tp->netdev); + netif_carrier_off(netdev); tp->rtl_ops.disable(tp); - netif_info(tp, link, tp->netdev, + netif_info(tp, link, netdev, "linking down\n"); } } - - napi_enable(&tp->napi); + napi_enable(napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); } else { tp->rtl_ops.up(tp); - netif_carrier_off(tp->netdev); + netif_carrier_off(netdev); set_bit(WORK_ENABLE, &tp->flags); } usb_submit_urb(tp->intr_urb, GFP_KERNEL); } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - if (tp->netdev->flags & IFF_UP) + if (netdev->flags & IFF_UP) tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } @@ -3819,7 +3826,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev, } static -int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +int rtl8152_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(netdev); int ret; @@ -3833,7 +3841,7 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) mutex_lock(&tp->control); - ret = mii_ethtool_gset(&tp->mii, cmd); + ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd); mutex_unlock(&tp->control); @@ -3843,7 +3851,8 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) return ret; } -static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int rtl8152_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); int ret; @@ -3854,11 +3863,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) mutex_lock(&tp->control); - ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex); if (!ret) { - tp->autoneg = cmd->autoneg; - tp->speed = cmd->speed; - tp->duplex = cmd->duplex; + tp->autoneg = cmd->base.autoneg; + tp->speed = cmd->base.speed; + tp->duplex = cmd->base.duplex; } mutex_unlock(&tp->control); @@ -4136,8 +4146,6 @@ static int rtl8152_set_coalesce(struct net_device *netdev, static const struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, - .get_settings = rtl8152_get_settings, - .set_settings = rtl8152_set_settings, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, @@ -4151,6 +4159,8 @@ static const struct ethtool_ops ops = { .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, + .get_link_ksettings = rtl8152_get_link_ksettings, + .set_link_ksettings = rtl8152_set_link_ksettings, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -4249,44 +4259,6 @@ static const struct net_device_ops rtl8152_netdev_ops = { .ndo_features_check = rtl8152_features_check, }; -static void r8152b_get_version(struct r8152 *tp) -{ - u32 ocp_data; - u16 version; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); - version = (u16)(ocp_data & VERSION_MASK); - - switch (version) { - case 0x4c00: - tp->version = RTL_VER_01; - break; - case 0x4c10: - tp->version = RTL_VER_02; - break; - case 0x5c00: - tp->version = RTL_VER_03; - tp->mii.supports_gmii = 1; - break; - case 0x5c10: - tp->version = RTL_VER_04; - tp->mii.supports_gmii = 1; - break; - case 0x5c20: - tp->version = RTL_VER_05; - tp->mii.supports_gmii = 1; - break; - case 0x5c30: - tp->version = RTL_VER_06; - tp->mii.supports_gmii = 1; - break; - default: - netif_info(tp, probe, tp->netdev, - "Unknown version 0x%04x\n", version); - break; - } -} - static void rtl8152_unload(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) @@ -4351,14 +4323,66 @@ static int rtl_ops_init(struct r8152 *tp) return ret; } +static u8 rtl_get_version(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + u32 ocp_data = 0; + __le32 *tmp; + u8 version; + int ret; + + tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return 0; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); + if (ret > 0) + ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; + + kfree(tmp); + + switch (ocp_data) { + case 0x4c00: + version = RTL_VER_01; + break; + case 0x4c10: + version = RTL_VER_02; + break; + case 0x5c00: + version = RTL_VER_03; + break; + case 0x5c10: + version = RTL_VER_04; + break; + case 0x5c20: + version = RTL_VER_05; + break; + case 0x5c30: + version = RTL_VER_06; + break; + default: + version = RTL_VER_UNKNOWN; + dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); + break; + } + + return version; +} + static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); + u8 version = rtl_get_version(intf); struct r8152 *tp; struct net_device *netdev; int ret; + if (version == RTL_VER_UNKNOWN) + return -ENODEV; + if (udev->actconfig->desc.bConfigurationValue != 1) { usb_driver_set_configuration(udev, 1); return -ENODEV; @@ -4378,8 +4402,18 @@ static int rtl8152_probe(struct usb_interface *intf, tp->udev = udev; tp->netdev = netdev; tp->intf = intf; + tp->version = version; + + switch (version) { + case RTL_VER_01: + case RTL_VER_02: + tp->mii.supports_gmii = 0; + break; + default: + tp->mii.supports_gmii = 1; + break; + } - r8152b_get_version(tp); ret = rtl_ops_init(tp); if (ret) goto out; diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index c5b21138b7eb1723579528532deb1b7ea9c2aa72..e96e2e5673d724ea5e009b686ca32afaf921190f 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -291,6 +291,7 @@ static const struct net_device_ops rndis_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index c81c79110cefca9443d614679d8e7cdd4b3295c3..daaa88a66f401fbe834b126d54125b79bbcfbad4 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -791,47 +791,52 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } -static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int rtl8150_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { rtl8150_t *dev = netdev_priv(netdev); short lpa, bmcr; + u32 supported; - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->phy_address = dev->phy; + ecmd->base.port = PORT_TP; + ecmd->base.phy_address = dev->phy; get_registers(dev, BMCR, 2, &bmcr); get_registers(dev, ANLP, 2, &lpa); if (bmcr & BMCR_ANENABLE) { u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = AUTONEG_ENABLE; + ecmd->base.speed = speed; + ecmd->base.autoneg = AUTONEG_ENABLE; if (speed == SPEED_100) - ecmd->duplex = (lpa & LPA_100FULL) ? + ecmd->base.duplex = (lpa & LPA_100FULL) ? DUPLEX_FULL : DUPLEX_HALF; else - ecmd->duplex = (lpa & LPA_10FULL) ? + ecmd->base.duplex = (lpa & LPA_10FULL) ? DUPLEX_FULL : DUPLEX_HALF; } else { - ecmd->autoneg = AUTONEG_DISABLE; - ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ? - SPEED_100 : SPEED_10)); - ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? + ecmd->base.autoneg = AUTONEG_DISABLE; + ecmd->base.speed = ((bmcr & BMCR_SPEED100) ? + SPEED_100 : SPEED_10); + ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + return 0; } static const struct ethtool_ops ops = { .get_drvinfo = rtl8150_get_drvinfo, - .get_settings = rtl8150_get_settings, - .get_link = ethtool_op_get_link + .get_link = ethtool_op_get_link, + .get_link_ksettings = rtl8150_get_link_ksettings, }; static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index ac69f28d92d2360ddf3fc4ace031046db8d7d39d..2110ab3513f0446a9a090a83de66b0044b0791c9 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -199,6 +199,7 @@ static const struct net_device_ops sierra_net_device_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -648,9 +649,9 @@ static const struct ethtool_ops sierra_net_ethtool_ops = { .get_link = sierra_net_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 190de9a90f7387c5070c7f589aa18bb7d05ac5d7..d0a113743195acae86931c51eea50b94ddadd487 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -743,13 +743,13 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = { .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, .get_eeprom = smsc75xx_ethtool_get_eeprom, .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_wol = smsc75xx_ethtool_get_wol, .set_wol = smsc75xx_ethtool_set_wol, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -1381,6 +1381,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 5f19fb0f025d9449d0ba20958610e0d1f083f032..765400b62168436b278d8a09fa0dd1fd8566bcb7 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -33,7 +33,7 @@ #include "smsc95xx.h" #define SMSC_CHIPNAME "smsc95xx" -#define SMSC_DRIVER_VERSION "1.0.5" +#define SMSC_DRIVER_VERSION "1.0.6" #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) @@ -853,32 +853,32 @@ static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl) pdata->mdix_ctrl = mdix_ctrl; } -static int smsc95xx_get_settings(struct net_device *net, - struct ethtool_cmd *cmd) +static int smsc95xx_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); int retval; - retval = usbnet_get_settings(net, cmd); + retval = usbnet_get_link_ksettings(net, cmd); - cmd->eth_tp_mdix = pdata->mdix_ctrl; - cmd->eth_tp_mdix_ctrl = pdata->mdix_ctrl; + cmd->base.eth_tp_mdix = pdata->mdix_ctrl; + cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl; return retval; } -static int smsc95xx_set_settings(struct net_device *net, - struct ethtool_cmd *cmd) +static int smsc95xx_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); int retval; - if (pdata->mdix_ctrl != cmd->eth_tp_mdix_ctrl) - set_mdix_status(net, cmd->eth_tp_mdix_ctrl); + if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl) + set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl); - retval = usbnet_set_settings(net, cmd); + retval = usbnet_set_link_ksettings(net, cmd); return retval; } @@ -889,8 +889,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = { .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, - .get_settings = smsc95xx_get_settings, - .set_settings = smsc95xx_set_settings, .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, .get_eeprom = smsc95xx_ethtool_get_eeprom, .set_eeprom = smsc95xx_ethtool_set_eeprom, @@ -898,6 +896,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = { .get_regs = smsc95xx_ethtool_getregs, .get_wol = smsc95xx_ethtool_get_wol, .set_wol = smsc95xx_ethtool_set_wol, + .get_link_ksettings = smsc95xx_get_link_ksettings, + .set_link_ksettings = smsc95xx_set_link_ksettings, }; static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -1248,6 +1248,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = smsc95xx_ioctl, @@ -1498,7 +1499,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev) if (ret < 0) return ret; - if (val & 0xFFFF) { + if (val & RX_FIFO_INF_USED_) { netdev_info(dev->net, "rx fifo not empty in autosuspend\n"); return -EBUSY; } diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h index 29a4d9efce7c49e02fe232c330d71bd6136f69c3..cfc704f3a46083227097407b71aa5058c7beb5ce 100644 --- a/drivers/net/usb/smsc95xx.h +++ b/drivers/net/usb/smsc95xx.h @@ -21,218 +21,280 @@ #define _SMSC95XX_H /* Tx command words */ -#define TX_CMD_A_DATA_OFFSET_ (0x001F0000) -#define TX_CMD_A_FIRST_SEG_ (0x00002000) -#define TX_CMD_A_LAST_SEG_ (0x00001000) -#define TX_CMD_A_BUF_SIZE_ (0x000007FF) +#define TX_CMD_A_DATA_OFFSET_ (0x001F0000) /* Data Start Offset */ +#define TX_CMD_A_FIRST_SEG_ (0x00002000) /* First Segment */ +#define TX_CMD_A_LAST_SEG_ (0x00001000) /* Last Segment */ +#define TX_CMD_A_BUF_SIZE_ (0x000007FF) /* Buffer Size */ -#define TX_CMD_B_CSUM_ENABLE (0x00004000) -#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) -#define TX_CMD_B_DISABLE_PADDING_ (0x00001000) -#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) +#define TX_CMD_B_CSUM_ENABLE (0x00004000) /* TX Checksum Enable */ +#define TX_CMD_B_ADD_CRC_DIS_ (0x00002000) /* Add CRC Disable */ +#define TX_CMD_B_DIS_PADDING_ (0x00001000) /* Disable Frame Padding */ +#define TX_CMD_B_FRAME_LENGTH_ (0x000007FF) /* Frame Length (bytes) */ /* Rx status word */ -#define RX_STS_FF_ (0x40000000) /* Filter Fail */ -#define RX_STS_FL_ (0x3FFF0000) /* Frame Length */ -#define RX_STS_ES_ (0x00008000) /* Error Summary */ -#define RX_STS_BF_ (0x00002000) /* Broadcast Frame */ -#define RX_STS_LE_ (0x00001000) /* Length Error */ -#define RX_STS_RF_ (0x00000800) /* Runt Frame */ -#define RX_STS_MF_ (0x00000400) /* Multicast Frame */ -#define RX_STS_TL_ (0x00000080) /* Frame too long */ -#define RX_STS_CS_ (0x00000040) /* Collision Seen */ -#define RX_STS_FT_ (0x00000020) /* Frame Type */ -#define RX_STS_RW_ (0x00000010) /* Receive Watchdog */ -#define RX_STS_ME_ (0x00000008) /* Mii Error */ -#define RX_STS_DB_ (0x00000004) /* Dribbling */ -#define RX_STS_CRC_ (0x00000002) /* CRC Error */ - -/* SCSRs */ -#define ID_REV (0x00) -#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) -#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) -#define ID_REV_CHIP_ID_9500_ (0x9500) -#define ID_REV_CHIP_ID_9500A_ (0x9E00) -#define ID_REV_CHIP_ID_9512_ (0xEC00) -#define ID_REV_CHIP_ID_9530_ (0x9530) -#define ID_REV_CHIP_ID_89530_ (0x9E08) -#define ID_REV_CHIP_ID_9730_ (0x9730) - -#define INT_STS (0x08) -#define INT_STS_TX_STOP_ (0x00020000) -#define INT_STS_RX_STOP_ (0x00010000) -#define INT_STS_PHY_INT_ (0x00008000) -#define INT_STS_TXE_ (0x00004000) -#define INT_STS_TDFU_ (0x00002000) -#define INT_STS_TDFO_ (0x00001000) -#define INT_STS_RXDF_ (0x00000800) -#define INT_STS_GPIOS_ (0x000007FF) -#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF) - -#define RX_CFG (0x0C) -#define RX_FIFO_FLUSH_ (0x00000001) - -#define TX_CFG (0x10) -#define TX_CFG_ON_ (0x00000004) -#define TX_CFG_STOP_ (0x00000002) -#define TX_CFG_FIFO_FLUSH_ (0x00000001) - -#define HW_CFG (0x14) -#define HW_CFG_BIR_ (0x00001000) -#define HW_CFG_LEDB_ (0x00000800) -#define HW_CFG_RXDOFF_ (0x00000600) -#define HW_CFG_DRP_ (0x00000040) -#define HW_CFG_MEF_ (0x00000020) -#define HW_CFG_LRST_ (0x00000008) -#define HW_CFG_PSEL_ (0x00000004) -#define HW_CFG_BCE_ (0x00000002) -#define HW_CFG_SRST_ (0x00000001) - -#define RX_FIFO_INF (0x18) - -#define PM_CTRL (0x20) -#define PM_CTL_RES_CLR_WKP_STS (0x00000200) -#define PM_CTL_DEV_RDY_ (0x00000080) -#define PM_CTL_SUS_MODE_ (0x00000060) -#define PM_CTL_SUS_MODE_0 (0x00000000) -#define PM_CTL_SUS_MODE_1 (0x00000020) -#define PM_CTL_SUS_MODE_2 (0x00000040) -#define PM_CTL_SUS_MODE_3 (0x00000060) -#define PM_CTL_PHY_RST_ (0x00000010) -#define PM_CTL_WOL_EN_ (0x00000008) -#define PM_CTL_ED_EN_ (0x00000004) -#define PM_CTL_WUPS_ (0x00000003) -#define PM_CTL_WUPS_NO_ (0x00000000) -#define PM_CTL_WUPS_ED_ (0x00000001) -#define PM_CTL_WUPS_WOL_ (0x00000002) -#define PM_CTL_WUPS_MULTI_ (0x00000003) - -#define LED_GPIO_CFG (0x24) -#define LED_GPIO_CFG_SPD_LED (0x01000000) -#define LED_GPIO_CFG_LNK_LED (0x00100000) -#define LED_GPIO_CFG_FDX_LED (0x00010000) - -#define GPIO_CFG (0x28) - -#define AFC_CFG (0x2C) - +#define RX_STS_FF_ (0x40000000) /* Filter Fail */ +#define RX_STS_FL_ (0x3FFF0000) /* Frame Length */ +#define RX_STS_ES_ (0x00008000) /* Error Summary */ +#define RX_STS_BF_ (0x00002000) /* Broadcast Frame */ +#define RX_STS_LE_ (0x00001000) /* Length Error */ +#define RX_STS_RF_ (0x00000800) /* Runt Frame */ +#define RX_STS_MF_ (0x00000400) /* Multicast Frame */ +#define RX_STS_TL_ (0x00000080) /* Frame too long */ +#define RX_STS_CS_ (0x00000040) /* Collision Seen */ +#define RX_STS_FT_ (0x00000020) /* Frame Type */ +#define RX_STS_RW_ (0x00000010) /* Receive Watchdog */ +#define RX_STS_ME_ (0x00000008) /* MII Error */ +#define RX_STS_DB_ (0x00000004) /* Dribbling */ +#define RX_STS_CRC_ (0x00000002) /* CRC Error */ + +/* SCSRs - System Control and Status Registers */ +/* Device ID and Revision Register */ +#define ID_REV (0x00) +#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_ID_9500_ (0x9500) +#define ID_REV_CHIP_ID_9500A_ (0x9E00) +#define ID_REV_CHIP_ID_9512_ (0xEC00) +#define ID_REV_CHIP_ID_9530_ (0x9530) +#define ID_REV_CHIP_ID_89530_ (0x9E08) +#define ID_REV_CHIP_ID_9730_ (0x9730) + +/* Interrupt Status Register */ +#define INT_STS (0x08) +#define INT_STS_MAC_RTO_ (0x00040000) /* MAC Reset Time Out */ +#define INT_STS_TX_STOP_ (0x00020000) /* TX Stopped */ +#define INT_STS_RX_STOP_ (0x00010000) /* RX Stopped */ +#define INT_STS_PHY_INT_ (0x00008000) /* PHY Interrupt */ +#define INT_STS_TXE_ (0x00004000) /* Transmitter Error */ +#define INT_STS_TDFU_ (0x00002000) /* TX Data FIFO Underrun */ +#define INT_STS_TDFO_ (0x00001000) /* TX Data FIFO Overrun */ +#define INT_STS_RXDF_ (0x00000800) /* RX Dropped Frame */ +#define INT_STS_GPIOS_ (0x000007FF) /* GPIOs Interrupts */ +#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF) + +/* Receive Configuration Register */ +#define RX_CFG (0x0C) +#define RX_FIFO_FLUSH_ (0x00000001) /* Receive FIFO Flush */ + +/* Transmit Configuration Register */ +#define TX_CFG (0x10) +#define TX_CFG_ON_ (0x00000004) /* Transmitter Enable */ +#define TX_CFG_STOP_ (0x00000002) /* Stop Transmitter */ +#define TX_CFG_FIFO_FLUSH_ (0x00000001) /* Transmit FIFO Flush */ + +/* Hardware Configuration Register */ +#define HW_CFG (0x14) +#define HW_CFG_BIR_ (0x00001000) /* Bulk In Empty Response */ +#define HW_CFG_LEDB_ (0x00000800) /* Activity LED 80ms Bypass */ +#define HW_CFG_RXDOFF_ (0x00000600) /* RX Data Offset */ +#define HW_CFG_SBP_ (0x00000100) /* Stall Bulk Out Pipe Dis. */ +#define HW_CFG_IME_ (0x00000080) /* Internal MII Visi. Enable */ +#define HW_CFG_DRP_ (0x00000040) /* Discard Errored RX Frame */ +#define HW_CFG_MEF_ (0x00000020) /* Mult. ETH Frames/USB pkt */ +#define HW_CFG_ETC_ (0x00000010) /* EEPROM Timeout Control */ +#define HW_CFG_LRST_ (0x00000008) /* Soft Lite Reset */ +#define HW_CFG_PSEL_ (0x00000004) /* External PHY Select */ +#define HW_CFG_BCE_ (0x00000002) /* Burst Cap Enable */ +#define HW_CFG_SRST_ (0x00000001) /* Soft Reset */ + +/* Receive FIFO Information Register */ +#define RX_FIFO_INF (0x18) +#define RX_FIFO_INF_USED_ (0x0000FFFF) /* RX Data FIFO Used Space */ + +/* Transmit FIFO Information Register */ +#define TX_FIFO_INF (0x1C) +#define TX_FIFO_INF_FREE_ (0x0000FFFF) /* TX Data FIFO Free Space */ + +/* Power Management Control Register */ +#define PM_CTRL (0x20) +#define PM_CTL_RES_CLR_WKP_STS (0x00000200) /* Resume Clears Wakeup STS */ +#define PM_CTL_RES_CLR_WKP_EN (0x00000100) /* Resume Clears Wkp Enables */ +#define PM_CTL_DEV_RDY_ (0x00000080) /* Device Ready */ +#define PM_CTL_SUS_MODE_ (0x00000060) /* Suspend Mode */ +#define PM_CTL_SUS_MODE_0 (0x00000000) +#define PM_CTL_SUS_MODE_1 (0x00000020) +#define PM_CTL_SUS_MODE_2 (0x00000040) +#define PM_CTL_SUS_MODE_3 (0x00000060) +#define PM_CTL_PHY_RST_ (0x00000010) /* PHY Reset */ +#define PM_CTL_WOL_EN_ (0x00000008) /* Wake On Lan Enable */ +#define PM_CTL_ED_EN_ (0x00000004) /* Energy Detect Enable */ +#define PM_CTL_WUPS_ (0x00000003) /* Wake Up Status */ +#define PM_CTL_WUPS_NO_ (0x00000000) /* No Wake Up Event Detected */ +#define PM_CTL_WUPS_ED_ (0x00000001) /* Energy Detect */ +#define PM_CTL_WUPS_WOL_ (0x00000002) /* Wake On Lan */ +#define PM_CTL_WUPS_MULTI_ (0x00000003) /* Multiple Events Occurred */ + +/* LED General Purpose IO Configuration Register */ +#define LED_GPIO_CFG (0x24) +#define LED_GPIO_CFG_SPD_LED (0x01000000) /* GPIOz as Speed LED */ +#define LED_GPIO_CFG_LNK_LED (0x00100000) /* GPIOy as Link LED */ +#define LED_GPIO_CFG_FDX_LED (0x00010000) /* GPIOx as Full Duplex LED */ + +/* General Purpose IO Configuration Register */ +#define GPIO_CFG (0x28) + +/* Automatic Flow Control Configuration Register */ +#define AFC_CFG (0x2C) +#define AFC_CFG_HI_ (0x00FF0000) /* Auto Flow Ctrl High Level */ +#define AFC_CFG_LO_ (0x0000FF00) /* Auto Flow Ctrl Low Level */ +#define AFC_CFG_BACK_DUR_ (0x000000F0) /* Back Pressure Duration */ +#define AFC_CFG_FC_MULT_ (0x00000008) /* Flow Ctrl on Mcast Frame */ +#define AFC_CFG_FC_BRD_ (0x00000004) /* Flow Ctrl on Bcast Frame */ +#define AFC_CFG_FC_ADD_ (0x00000002) /* Flow Ctrl on Addr. Decode */ +#define AFC_CFG_FC_ANY_ (0x00000001) /* Flow Ctrl on Any Frame */ /* Hi watermark = 15.5Kb (~10 mtu pkts) */ /* low watermark = 3k (~2 mtu pkts) */ /* backpressure duration = ~ 350us */ /* Apply FC on any frame. */ -#define AFC_CFG_DEFAULT (0x00F830A1) - -#define E2P_CMD (0x30) -#define E2P_CMD_BUSY_ (0x80000000) -#define E2P_CMD_MASK_ (0x70000000) -#define E2P_CMD_READ_ (0x00000000) -#define E2P_CMD_EWDS_ (0x10000000) -#define E2P_CMD_EWEN_ (0x20000000) -#define E2P_CMD_WRITE_ (0x30000000) -#define E2P_CMD_WRAL_ (0x40000000) -#define E2P_CMD_ERASE_ (0x50000000) -#define E2P_CMD_ERAL_ (0x60000000) -#define E2P_CMD_RELOAD_ (0x70000000) -#define E2P_CMD_TIMEOUT_ (0x00000400) -#define E2P_CMD_LOADED_ (0x00000200) -#define E2P_CMD_ADDR_ (0x000001FF) - -#define MAX_EEPROM_SIZE (512) - -#define E2P_DATA (0x34) -#define E2P_DATA_MASK_ (0x000000FF) - -#define BURST_CAP (0x38) - +#define AFC_CFG_DEFAULT (0x00F830A1) + +/* EEPROM Command Register */ +#define E2P_CMD (0x30) +#define E2P_CMD_BUSY_ (0x80000000) /* E2P Controller Busy */ +#define E2P_CMD_MASK_ (0x70000000) /* Command Mask (see below) */ +#define E2P_CMD_READ_ (0x00000000) /* Read Location */ +#define E2P_CMD_EWDS_ (0x10000000) /* Erase/Write Disable */ +#define E2P_CMD_EWEN_ (0x20000000) /* Erase/Write Enable */ +#define E2P_CMD_WRITE_ (0x30000000) /* Write Location */ +#define E2P_CMD_WRAL_ (0x40000000) /* Write All */ +#define E2P_CMD_ERASE_ (0x50000000) /* Erase Location */ +#define E2P_CMD_ERAL_ (0x60000000) /* Erase All */ +#define E2P_CMD_RELOAD_ (0x70000000) /* Data Reload */ +#define E2P_CMD_TIMEOUT_ (0x00000400) /* Set if no resp within 30ms */ +#define E2P_CMD_LOADED_ (0x00000200) /* Valid EEPROM found */ +#define E2P_CMD_ADDR_ (0x000001FF) /* Byte aligned address */ + +#define MAX_EEPROM_SIZE (512) + +/* EEPROM Data Register */ +#define E2P_DATA (0x34) +#define E2P_DATA_MASK_ (0x000000FF) /* EEPROM Data Mask */ + +/* Burst Cap Register */ +#define BURST_CAP (0x38) +#define BURST_CAP_MASK_ (0x000000FF) /* Max burst sent by the UTX */ + +/* Configuration Straps Status Register */ #define STRAP_STATUS (0x3C) -#define STRAP_STATUS_PWR_SEL_ (0x00000020) -#define STRAP_STATUS_AMDIX_EN_ (0x00000010) -#define STRAP_STATUS_PORT_SWAP_ (0x00000008) -#define STRAP_STATUS_EEP_SIZE_ (0x00000004) -#define STRAP_STATUS_RMT_WKP_ (0x00000002) -#define STRAP_STATUS_EEP_DISABLE_ (0x00000001) - -#define GPIO_WAKE (0x64) - -#define INT_EP_CTL (0x68) -#define INT_EP_CTL_INTEP_ (0x80000000) -#define INT_EP_CTL_MACRTO_ (0x00080000) -#define INT_EP_CTL_TX_STOP_ (0x00020000) -#define INT_EP_CTL_RX_STOP_ (0x00010000) -#define INT_EP_CTL_PHY_INT_ (0x00008000) -#define INT_EP_CTL_TXE_ (0x00004000) -#define INT_EP_CTL_TDFU_ (0x00002000) -#define INT_EP_CTL_TDFO_ (0x00001000) -#define INT_EP_CTL_RXDF_ (0x00000800) -#define INT_EP_CTL_GPIOS_ (0x000007FF) - -#define BULK_IN_DLY (0x6C) - -/* MAC CSRs */ -#define MAC_CR (0x100) -#define MAC_CR_RXALL_ (0x80000000) -#define MAC_CR_RCVOWN_ (0x00800000) -#define MAC_CR_LOOPBK_ (0x00200000) -#define MAC_CR_FDPX_ (0x00100000) -#define MAC_CR_MCPAS_ (0x00080000) -#define MAC_CR_PRMS_ (0x00040000) -#define MAC_CR_INVFILT_ (0x00020000) -#define MAC_CR_PASSBAD_ (0x00010000) -#define MAC_CR_HFILT_ (0x00008000) -#define MAC_CR_HPFILT_ (0x00002000) -#define MAC_CR_LCOLL_ (0x00001000) -#define MAC_CR_BCAST_ (0x00000800) -#define MAC_CR_DISRTY_ (0x00000400) -#define MAC_CR_PADSTR_ (0x00000100) -#define MAC_CR_BOLMT_MASK (0x000000C0) -#define MAC_CR_DFCHK_ (0x00000020) -#define MAC_CR_TXEN_ (0x00000008) -#define MAC_CR_RXEN_ (0x00000004) - -#define ADDRH (0x104) - -#define ADDRL (0x108) - -#define HASHH (0x10C) - -#define HASHL (0x110) - -#define MII_ADDR (0x114) -#define MII_WRITE_ (0x02) -#define MII_BUSY_ (0x01) -#define MII_READ_ (0x00) /* ~of MII Write bit */ - -#define MII_DATA (0x118) - -#define FLOW (0x11C) -#define FLOW_FCPT_ (0xFFFF0000) -#define FLOW_FCPASS_ (0x00000004) -#define FLOW_FCEN_ (0x00000002) -#define FLOW_FCBSY_ (0x00000001) - -#define VLAN1 (0x120) - -#define VLAN2 (0x124) - -#define WUFF (0x128) -#define LAN9500_WUFF_NUM (4) -#define LAN9500A_WUFF_NUM (8) - -#define WUCSR (0x12C) -#define WUCSR_WFF_PTR_RST_ (0x80000000) -#define WUCSR_GUE_ (0x00000200) -#define WUCSR_WUFR_ (0x00000040) -#define WUCSR_MPR_ (0x00000020) -#define WUCSR_WAKE_EN_ (0x00000004) -#define WUCSR_MPEN_ (0x00000002) - -#define COE_CR (0x130) -#define Tx_COE_EN_ (0x00010000) -#define Rx_COE_MODE_ (0x00000002) -#define Rx_COE_EN_ (0x00000001) - -/* Vendor-specific PHY Definitions */ - +#define STRAP_STATUS_PWR_SEL_ (0x00000020) /* Device self-powered */ +#define STRAP_STATUS_AMDIX_EN_ (0x00000010) /* Auto-MDIX Enabled */ +#define STRAP_STATUS_PORT_SWAP_ (0x00000008) /* USBD+/USBD- Swapped */ +#define STRAP_STATUS_EEP_SIZE_ (0x00000004) /* EEPROM Size */ +#define STRAP_STATUS_RMT_WKP_ (0x00000002) /* Remote Wkp supported */ +#define STRAP_STATUS_EEP_DISABLE_ (0x00000001) /* EEPROM Disabled */ + +/* Data Port Select Register */ +#define DP_SEL (0x40) + +/* Data Port Command Register */ +#define DP_CMD (0x44) + +/* Data Port Address Register */ +#define DP_ADDR (0x48) + +/* Data Port Data 0 Register */ +#define DP_DATA0 (0x4C) + +/* Data Port Data 1 Register */ +#define DP_DATA1 (0x50) + +/* General Purpose IO Wake Enable and Polarity Register */ +#define GPIO_WAKE (0x64) + +/* Interrupt Endpoint Control Register */ +#define INT_EP_CTL (0x68) +#define INT_EP_CTL_INTEP_ (0x80000000) /* Always TX Interrupt PKT */ +#define INT_EP_CTL_MAC_RTO_ (0x00080000) /* MAC Reset Time Out */ +#define INT_EP_CTL_RX_FIFO_ (0x00040000) /* RX FIFO Has Frame */ +#define INT_EP_CTL_TX_STOP_ (0x00020000) /* TX Stopped */ +#define INT_EP_CTL_RX_STOP_ (0x00010000) /* RX Stopped */ +#define INT_EP_CTL_PHY_INT_ (0x00008000) /* PHY Interrupt */ +#define INT_EP_CTL_TXE_ (0x00004000) /* TX Error */ +#define INT_EP_CTL_TDFU_ (0x00002000) /* TX Data FIFO Underrun */ +#define INT_EP_CTL_TDFO_ (0x00001000) /* TX Data FIFO Overrun */ +#define INT_EP_CTL_RXDF_ (0x00000800) /* RX Dropped Frame */ +#define INT_EP_CTL_GPIOS_ (0x000007FF) /* GPIOs Interrupt Enable */ + +/* Bulk In Delay Register (units of 16.667ns, until ~1092µs) */ +#define BULK_IN_DLY (0x6C) + +/* MAC CSRs - MAC Control and Status Registers */ +/* MAC Control Register */ +#define MAC_CR (0x100) +#define MAC_CR_RXALL_ (0x80000000) /* Receive All Mode */ +#define MAC_CR_RCVOWN_ (0x00800000) /* Disable Receive Own */ +#define MAC_CR_LOOPBK_ (0x00200000) /* Loopback Operation Mode */ +#define MAC_CR_FDPX_ (0x00100000) /* Full Duplex Mode */ +#define MAC_CR_MCPAS_ (0x00080000) /* Pass All Multicast */ +#define MAC_CR_PRMS_ (0x00040000) /* Promiscuous Mode */ +#define MAC_CR_INVFILT_ (0x00020000) /* Inverse Filtering */ +#define MAC_CR_PASSBAD_ (0x00010000) /* Pass Bad Frames */ +#define MAC_CR_HFILT_ (0x00008000) /* Hash Only Filtering Mode */ +#define MAC_CR_HPFILT_ (0x00002000) /* Hash/Perfect Filt. Mode */ +#define MAC_CR_LCOLL_ (0x00001000) /* Late Collision Control */ +#define MAC_CR_BCAST_ (0x00000800) /* Disable Broadcast Frames */ +#define MAC_CR_DISRTY_ (0x00000400) /* Disable Retry */ +#define MAC_CR_PADSTR_ (0x00000100) /* Automatic Pad Stripping */ +#define MAC_CR_BOLMT_MASK (0x000000C0) /* BackOff Limit */ +#define MAC_CR_DFCHK_ (0x00000020) /* Deferral Check */ +#define MAC_CR_TXEN_ (0x00000008) /* Transmitter Enable */ +#define MAC_CR_RXEN_ (0x00000004) /* Receiver Enable */ + +/* MAC Address High Register */ +#define ADDRH (0x104) + +/* MAC Address Low Register */ +#define ADDRL (0x108) + +/* Multicast Hash Table High Register */ +#define HASHH (0x10C) + +/* Multicast Hash Table Low Register */ +#define HASHL (0x110) + +/* MII Access Register */ +#define MII_ADDR (0x114) +#define MII_WRITE_ (0x02) +#define MII_BUSY_ (0x01) +#define MII_READ_ (0x00) /* ~of MII Write bit */ + +/* MII Data Register */ +#define MII_DATA (0x118) + +/* Flow Control Register */ +#define FLOW (0x11C) +#define FLOW_FCPT_ (0xFFFF0000) /* Pause Time */ +#define FLOW_FCPASS_ (0x00000004) /* Pass Control Frames */ +#define FLOW_FCEN_ (0x00000002) /* Flow Control Enable */ +#define FLOW_FCBSY_ (0x00000001) /* Flow Control Busy */ + +/* VLAN1 Tag Register */ +#define VLAN1 (0x120) + +/* VLAN2 Tag Register */ +#define VLAN2 (0x124) + +/* Wake Up Frame Filter Register */ +#define WUFF (0x128) +#define LAN9500_WUFF_NUM (4) +#define LAN9500A_WUFF_NUM (8) + +/* Wake Up Control and Status Register */ +#define WUCSR (0x12C) +#define WUCSR_WFF_PTR_RST_ (0x80000000) /* WFrame Filter Pointer Rst */ +#define WUCSR_GUE_ (0x00000200) /* Global Unicast Enable */ +#define WUCSR_WUFR_ (0x00000040) /* Wakeup Frame Received */ +#define WUCSR_MPR_ (0x00000020) /* Magic Packet Received */ +#define WUCSR_WAKE_EN_ (0x00000004) /* Wakeup Frame Enable */ +#define WUCSR_MPEN_ (0x00000002) /* Magic Packet Enable */ + +/* Checksum Offload Engine Control Register */ +#define COE_CR (0x130) +#define Tx_COE_EN_ (0x00010000) /* TX Csum Offload Enable */ +#define Rx_COE_MODE_ (0x00000002) /* RX Csum Offload Mode */ +#define Rx_COE_EN_ (0x00000001) /* RX Csum Offload Enable */ + +/* Vendor-specific PHY Definitions (via MII access) */ /* EDPD NLP / crossover time configuration (LAN9500A only) */ #define PHY_EDPD_CONFIG (16) #define PHY_EDPD_CONFIG_TX_NLP_EN_ ((u16)0x8000) @@ -255,17 +317,20 @@ #define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) #define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) +/* Control/Status Indication Register */ #define SPECIAL_CTRL_STS (27) #define SPECIAL_CTRL_STS_OVRRD_AMDIX_ ((u16)0x8000) #define SPECIAL_CTRL_STS_AMDIX_ENABLE_ ((u16)0x4000) #define SPECIAL_CTRL_STS_AMDIX_STATE_ ((u16)0x2000) +/* Interrupt Source Register */ #define PHY_INT_SRC (29) #define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) #define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) #define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) #define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) +/* Interrupt Mask Register */ #define PHY_INT_MASK (30) #define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) #define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) @@ -273,7 +338,7 @@ #define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) #define PHY_INT_MASK_DEFAULT_ (PHY_INT_MASK_ANEG_COMP_ | \ PHY_INT_MASK_LINK_DOWN_) - +/* PHY Special Control/Status Register */ #define PHY_SPECIAL (31) #define PHY_SPECIAL_SPD_ ((u16)0x001C) #define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) @@ -287,12 +352,13 @@ #define USB_VENDOR_REQUEST_GET_STATS 0xA2 /* Interrupt Endpoint status word bitfields */ -#define INT_ENP_TX_STOP_ ((u32)BIT(17)) -#define INT_ENP_RX_STOP_ ((u32)BIT(16)) -#define INT_ENP_PHY_INT_ ((u32)BIT(15)) -#define INT_ENP_TXE_ ((u32)BIT(14)) -#define INT_ENP_TDFU_ ((u32)BIT(13)) -#define INT_ENP_TDFO_ ((u32)BIT(12)) -#define INT_ENP_RXDF_ ((u32)BIT(11)) +#define INT_ENP_MAC_RTO_ ((u32)BIT(18)) /* MAC Reset Time Out */ +#define INT_ENP_TX_STOP_ ((u32)BIT(17)) /* TX Stopped */ +#define INT_ENP_RX_STOP_ ((u32)BIT(16)) /* RX Stopped */ +#define INT_ENP_PHY_INT_ ((u32)BIT(15)) /* PHY Interrupt */ +#define INT_ENP_TXE_ ((u32)BIT(14)) /* TX Error */ +#define INT_ENP_TDFU_ ((u32)BIT(13)) /* TX FIFO Underrun */ +#define INT_ENP_TDFO_ ((u32)BIT(12)) /* TX FIFO Overrun */ +#define INT_ENP_RXDF_ ((u32)BIT(11)) /* RX Dropped Frame */ #endif /* _SMSC95XX_H */ diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index aadfe1d1c37ee67e2a7d17c759db12dad248c41d..2d316c1b851b2aceb19695e0035c94251f6ec475 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -249,9 +249,9 @@ static const struct ethtool_ops sr9700_ethtool_ops = { .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = sr9700_get_eeprom_len, .get_eeprom = sr9700_get_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static void sr9700_set_multicast(struct net_device *netdev) @@ -308,6 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = sr9700_ioctl, .ndo_set_rx_mode = sr9700_set_multicast, diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index a50df0d8fb9abbd548ad6646e4a066a1211363c5..9277a0f228dfa6de355c74d2652edcf2fb1d2f4b 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -524,9 +524,9 @@ static const struct ethtool_ops sr9800_ethtool_ops = { .set_wol = sr_set_wol, .get_eeprom_len = sr_get_eeprom_len, .get_eeprom = sr_get_eeprom, - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; static int sr9800_link_reset(struct usbnet *dev) @@ -679,6 +679,7 @@ static const struct net_device_ops sr9800_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = sr_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = sr_ioctl, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 453244805c52e570394673d7a3ebd71cc62fd5ca..79048e72c1bd7d2f6c54b1a17efadc283571c408 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -82,8 +82,6 @@ // randomly generated ethernet address static u8 node_id [ETH_ALEN]; -static const char driver_name [] = "usbnet"; - /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param (msg_level, int, 0); @@ -316,6 +314,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) */ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -327,8 +326,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) if (skb->protocol == 0) skb->protocol = eth_type_trans (skb, dev->net); - dev->net->stats.rx_packets++; - dev->net->stats.rx_bytes += skb->len; + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets++; + stats64->rx_bytes += skb->len; + u64_stats_update_end(&stats64->syncp); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); @@ -947,18 +948,20 @@ EXPORT_SYMBOL_GPL(usbnet_open); * they'll probably want to use this base set. */ -int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) +int usbnet_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); if (!dev->mii.mdio_read) return -EOPNOTSUPP; - return mii_ethtool_gset(&dev->mii, cmd); + return mii_ethtool_get_link_ksettings(&dev->mii, cmd); } -EXPORT_SYMBOL_GPL(usbnet_get_settings); +EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings); -int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) +int usbnet_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); int retval; @@ -966,7 +969,7 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) if (!dev->mii.mdio_write) return -EOPNOTSUPP; - retval = mii_ethtool_sset(&dev->mii, cmd); + retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); /* link speed/duplex might have changed */ if (dev->driver_info->link_reset) @@ -976,9 +979,39 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) usbnet_update_max_qlen(dev); return retval; +} +EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings); +void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct usbnet *dev = netdev_priv(net); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &net->stats); + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } } -EXPORT_SYMBOL_GPL(usbnet_set_settings); +EXPORT_SYMBOL_GPL(usbnet_get_stats64); u32 usbnet_get_link (struct net_device *net) { @@ -1038,14 +1071,14 @@ EXPORT_SYMBOL_GPL(usbnet_set_msglevel); /* drivers may override default ethtool_ops in their bind() routine */ static const struct ethtool_ops usbnet_ethtool_ops = { - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = usbnet_get_link_ksettings, + .set_link_ksettings = usbnet_set_link_ksettings, }; /*-------------------------------------------------------------------------*/ @@ -1211,8 +1244,12 @@ static void tx_complete (struct urb *urb) struct usbnet *dev = entry->dev; if (urb->status == 0) { - dev->net->stats.tx_packets += entry->packets; - dev->net->stats.tx_bytes += entry->length; + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets += entry->packets; + stats64->tx_bytes += entry->length; + u64_stats_update_end(&stats64->syncp); } else { dev->net->stats.tx_errors++; @@ -1569,6 +1606,7 @@ void usbnet_disconnect (struct usb_interface *intf) usb_free_urb(dev->interrupt); kfree(dev->padding_pkt); + free_percpu(dev->stats64); free_netdev(net); } EXPORT_SYMBOL_GPL(usbnet_disconnect); @@ -1580,6 +1618,7 @@ static const struct net_device_ops usbnet_netdev_ops = { .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1641,6 +1680,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->intf = udev; dev->driver_info = info; dev->driver_name = name; + + dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->stats64) + goto out0; + dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); init_waitqueue_head(&dev->wait); @@ -1780,6 +1824,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) */ cancel_work_sync(&dev->kevent); del_timer_sync(&dev->delay); + free_percpu(dev->stats64); +out0: free_netdev(net); out: return status; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8c39d6d690e5e7f8ea6e522b8eb01a2db550c61a..38f0f03a29c8898131110a620e73da4776cb7020 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -45,18 +45,13 @@ static struct { { "peer_ifindex" }, }; -static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int veth_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = 0; - cmd->advertising = 0; - ethtool_cmd_speed_set(cmd, SPEED_10000); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_TP; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.port = PORT_TP; + cmd->base.autoneg = AUTONEG_DISABLE; return 0; } @@ -95,12 +90,12 @@ static void veth_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops veth_ethtool_ops = { - .get_settings = veth_get_settings, .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, + .get_link_ksettings = veth_get_link_ksettings, }; static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) @@ -373,7 +368,8 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, ifmp = nla_data(nla_peer); err = rtnl_nla_parse_ifla(peer_tb, nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg)); + nla_len(nla_peer) - sizeof(struct ifinfomsg), + NULL); if (err < 0) return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f36584616e7d6825c7e69137b4a31a3d55779688..3d0bc484b3d7c77c9fa6ca5971d7d5563d6844b4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -33,9 +33,10 @@ static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); -static bool csum = true, gso = true; +static bool csum = true, gso = true, napi_tx; module_param(csum, bool, 0444); module_param(gso, bool, 0444); +module_param(napi_tx, bool, 0644); /* FIXME: MTU in config. */ #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) @@ -86,6 +87,8 @@ struct send_queue { /* Name of the send queue: output.$index */ char name[40]; + + struct napi_struct napi; }; /* Internal representation of a receive virtqueue */ @@ -239,15 +242,39 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } +static void virtqueue_napi_schedule(struct napi_struct *napi, + struct virtqueue *vq) +{ + if (napi_schedule_prep(napi)) { + virtqueue_disable_cb(vq); + __napi_schedule(napi); + } +} + +static void virtqueue_napi_complete(struct napi_struct *napi, + struct virtqueue *vq, int processed) +{ + int opaque; + + opaque = virtqueue_enable_cb_prepare(vq); + if (napi_complete_done(napi, processed) && + unlikely(virtqueue_poll(vq, opaque))) + virtqueue_napi_schedule(napi, vq); +} + static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; + struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); - /* We were probably waiting for more output buffers. */ - netif_wake_subqueue(vi->dev, vq2txq(vq)); + if (napi->weight) + virtqueue_napi_schedule(napi, vq); + else + /* We were probably waiting for more output buffers. */ + netif_wake_subqueue(vi->dev, vq2txq(vq)); } static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) @@ -936,27 +963,44 @@ static void skb_recv_done(struct virtqueue *rvq) struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; - /* Schedule NAPI, Suppress further interrupts if successful. */ - if (napi_schedule_prep(&rq->napi)) { - virtqueue_disable_cb(rvq); - __napi_schedule(&rq->napi); - } + virtqueue_napi_schedule(&rq->napi, rvq); } -static void virtnet_napi_enable(struct receive_queue *rq) +static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) { - napi_enable(&rq->napi); + napi_enable(napi); /* If all buffers were filled by other side before we napi_enabled, we - * won't get another interrupt, so process any outstanding packets - * now. virtnet_poll wants re-enable the queue, so we disable here. - * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (napi_schedule_prep(&rq->napi)) { - virtqueue_disable_cb(rq->vq); - local_bh_disable(); - __napi_schedule(&rq->napi); - local_bh_enable(); + * won't get another interrupt, so process any outstanding packets now. + * Call local_bh_enable after to trigger softIRQ processing. + */ + local_bh_disable(); + virtqueue_napi_schedule(napi, vq); + local_bh_enable(); +} + +static void virtnet_napi_tx_enable(struct virtnet_info *vi, + struct virtqueue *vq, + struct napi_struct *napi) +{ + if (!napi->weight) + return; + + /* Tx napi touches cachelines on the cpu handling tx interrupts. Only + * enable the feature if this is likely affine with the transmit path. + */ + if (!vi->affinity_hint_set) { + napi->weight = 0; + return; } + + return virtnet_napi_enable(vq, napi); +} + +static void virtnet_napi_tx_disable(struct napi_struct *napi) +{ + if (napi->weight) + napi_disable(napi); } static void refill_work(struct work_struct *work) @@ -971,7 +1015,7 @@ static void refill_work(struct work_struct *work) napi_disable(&rq->napi); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); - virtnet_napi_enable(rq); + virtnet_napi_enable(rq->vq, &rq->napi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. @@ -1007,25 +1051,68 @@ static int virtnet_receive(struct receive_queue *rq, int budget) return received; } +static void free_old_xmit_skbs(struct send_queue *sq) +{ + struct sk_buff *skb; + unsigned int len; + struct virtnet_info *vi = sq->vq->vdev->priv; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + unsigned int packets = 0; + unsigned int bytes = 0; + + while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + packets++; + + dev_kfree_skb_any(skb); + } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + u64_stats_update_end(&stats->tx_syncp); +} + +static void virtnet_poll_cleantx(struct receive_queue *rq) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + unsigned int index = vq2rxq(rq->vq); + struct send_queue *sq = &vi->sq[index]; + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); + + if (!sq->napi.weight) + return; + + if (__netif_tx_trylock(txq)) { + free_old_xmit_skbs(sq); + __netif_tx_unlock(txq); + } + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); +} + static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); - unsigned int r, received; + unsigned int received; + + virtnet_poll_cleantx(rq); received = virtnet_receive(rq, budget); /* Out of packets? */ - if (received < budget) { - r = virtqueue_enable_cb_prepare(rq->vq); - if (napi_complete_done(napi, received)) { - if (unlikely(virtqueue_poll(rq->vq, r)) && - napi_schedule_prep(napi)) { - virtqueue_disable_cb(rq->vq); - __napi_schedule(napi); - } - } - } + if (received < budget) + virtqueue_napi_complete(napi, rq->vq, received); return received; } @@ -1040,40 +1127,29 @@ static int virtnet_open(struct net_device *dev) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - virtnet_napi_enable(&vi->rq[i]); + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } return 0; } -static void free_old_xmit_skbs(struct send_queue *sq) +static int virtnet_poll_tx(struct napi_struct *napi, int budget) { - struct sk_buff *skb; - unsigned int len; + struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - unsigned int packets = 0; - unsigned int bytes = 0; - - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); - bytes += skb->len; - packets++; + __netif_tx_lock(txq, raw_smp_processor_id()); + free_old_xmit_skbs(sq); + __netif_tx_unlock(txq); - dev_kfree_skb_any(skb); - } + virtqueue_napi_complete(napi, sq->vq, 0); - /* Avoid overhead when no packets have been processed - * happens when called speculatively from start_xmit. - */ - if (!packets) - return; + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + netif_tx_wake_queue(txq); - u64_stats_update_begin(&stats->tx_syncp); - stats->tx_bytes += bytes; - stats->tx_packets += packets; - u64_stats_update_end(&stats->tx_syncp); + return 0; } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) @@ -1125,10 +1201,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); bool kick = !skb->xmit_more; + bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); + if (use_napi && kick) + virtqueue_enable_cb_delayed(sq->vq); + /* timestamp packet in software */ skb_tx_timestamp(skb); @@ -1147,8 +1227,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Don't wait up for transmitted skbs to be freed. */ - skb_orphan(skb); - nf_reset(skb); + if (!use_napi) { + skb_orphan(skb); + nf_reset(skb); + } /* If running out of space, stop queue to avoid getting packets that we * are then unable to transmit. @@ -1162,7 +1244,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + if (!use_napi && + unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { @@ -1366,8 +1449,10 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } return 0; } @@ -1636,47 +1721,57 @@ static void virtnet_get_channels(struct net_device *dev, } /* Check if the user is trying to change anything besides speed/duplex */ -static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) +static bool +virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) { - struct ethtool_cmd diff1 = *cmd; - struct ethtool_cmd diff2 = {}; + struct ethtool_link_ksettings diff1 = *cmd; + struct ethtool_link_ksettings diff2 = {}; /* cmd is always set so we need to clear it, validate the port type * and also without autonegotiation we can ignore advertising */ - ethtool_cmd_speed_set(&diff1, 0); - diff2.port = PORT_OTHER; - diff1.advertising = 0; - diff1.duplex = 0; - diff1.cmd = 0; + diff1.base.speed = 0; + diff2.base.port = PORT_OTHER; + ethtool_link_ksettings_zero_link_mode(&diff1, advertising); + diff1.base.duplex = 0; + diff1.base.cmd = 0; + diff1.base.link_mode_masks_nwords = 0; - return !memcmp(&diff1, &diff2, sizeof(diff1)); + return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && + bitmap_empty(diff1.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS) && + bitmap_empty(diff1.link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) && + bitmap_empty(diff1.link_modes.lp_advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int virtnet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); u32 speed; - speed = ethtool_cmd_speed(cmd); + speed = cmd->base.speed; /* don't allow custom speed and duplex */ if (!ethtool_validate_speed(speed) || - !ethtool_validate_duplex(cmd->duplex) || + !ethtool_validate_duplex(cmd->base.duplex) || !virtnet_validate_ethtool_cmd(cmd)) return -EINVAL; vi->speed = speed; - vi->duplex = cmd->duplex; + vi->duplex = cmd->base.duplex; return 0; } -static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int virtnet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct virtnet_info *vi = netdev_priv(dev); - ethtool_cmd_speed_set(cmd, vi->speed); - cmd->duplex = vi->duplex; - cmd->port = PORT_OTHER; + cmd->base.speed = vi->speed; + cmd->base.duplex = vi->duplex; + cmd->base.port = PORT_OTHER; return 0; } @@ -1696,8 +1791,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, - .get_settings = virtnet_get_settings, - .set_settings = virtnet_set_settings, + .get_link_ksettings = virtnet_get_link_ksettings, + .set_link_ksettings = virtnet_set_link_ksettings, }; static void virtnet_freeze_down(struct virtio_device *vdev) @@ -1712,8 +1807,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev) cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } } } @@ -1736,8 +1833,11 @@ static int virtnet_restore_up(struct virtio_device *vdev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(&vi->rq[i]); + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } } netif_device_attach(vi->dev); @@ -1778,7 +1878,8 @@ static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) return ret; } -static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) +static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); struct virtnet_info *vi = netdev_priv(dev); @@ -1790,16 +1891,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { - netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n"); + NL_SET_ERR_MSG(extack, "can't set XDP while host is implementing LRO, disable LRO first"); return -EOPNOTSUPP; } if (vi->mergeable_rx_bufs && !vi->any_header_sg) { - netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n"); + NL_SET_ERR_MSG(extack, "XDP expects header/data in single page, any_header_sg required"); return -EINVAL; } if (dev->mtu > max_sz) { + NL_SET_ERR_MSG(extack, "MTU too large to enable XDP"); netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); return -EINVAL; } @@ -1810,6 +1912,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog) /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { + NL_SET_ERR_MSG(extack, "Too few free TX rings available"); netdev_warn(dev, "request %i queues but max is %i\n", curr_qp + xdp_qp, vi->max_queue_pairs); return -ENOMEM; @@ -1871,7 +1974,7 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: - return virtnet_xdp_set(dev, xdp->prog); + return virtnet_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_attached = virtnet_xdp_query(dev); return 0; @@ -1942,6 +2045,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + netif_napi_del(&vi->sq[i].napi); } /* We called napi_hash_del() before netif_napi_del(), @@ -2127,6 +2231,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); + netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, + napi_tx ? napi_weight : 0); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index f88ffafebfbfd40192fd5919ce970f1bf15b73fc..2ff27314e04739034cef408b59aed6a77cd98911 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -471,22 +471,25 @@ vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static int -vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +vmxnet3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | - SUPPORTED_TP; - ecmd->advertising = ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP); + ecmd->base.port = PORT_TP; if (adapter->link_speed) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = DUPLEX_FULL; + ecmd->base.speed = adapter->link_speed; + ecmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; } return 0; } @@ -880,7 +883,6 @@ vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) } static const struct ethtool_ops vmxnet3_ethtool_ops = { - .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, .get_regs = vmxnet3_get_regs, @@ -900,6 +902,7 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_rxfh = vmxnet3_get_rss, .set_rxfh = vmxnet3_set_rss, #endif + .get_link_ksettings = vmxnet3_get_link_ksettings, }; void vmxnet3_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7d909c8183e95a62b6f8a3182d3ce645a264e909..ceda5861da780bc64b6070acec7f37de9a8fa3e5 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -104,6 +104,23 @@ static void vrf_get_stats64(struct net_device *dev, } } +/* by default VRF devices do not have a qdisc and are expected + * to be created with only a single queue. + */ +static bool qdisc_tx_is_default(const struct net_device *dev) +{ + struct netdev_queue *txq; + struct Qdisc *qdisc; + + if (dev->num_tx_queues > 1) + return false; + + txq = netdev_get_tx_queue(dev, 0); + qdisc = rcu_access_pointer(txq->qdisc); + + return !qdisc->enqueue; +} + /* Local traffic destined to local address. Reinsert the packet to rx * path, similar to loopback handling. */ @@ -357,6 +374,29 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static int vrf_finish_direct(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + struct net_device *vrf_dev = skb->dev; + + if (!list_empty(&vrf_dev->ptype_all) && + likely(skb_headroom(skb) >= ETH_HLEN)) { + struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); + + ether_addr_copy(eth->h_source, vrf_dev->dev_addr); + eth_zero_addr(eth->h_dest); + eth->h_proto = skb->protocol; + + rcu_read_lock_bh(); + dev_queue_xmit_nit(skb, vrf_dev); + rcu_read_unlock_bh(); + + skb_pull(skb, ETH_HLEN); + } + + return 1; +} + #if IS_ENABLED(CONFIG_IPV6) /* modelled after ip6_finish_output2 */ static int vrf_finish_output6(struct net *net, struct sock *sk, @@ -405,18 +445,13 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) * packet to go through device based features such as qdisc, netfilter * hooks and packet sockets with skb->dev set to vrf device. */ -static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, - struct sock *sk, - struct sk_buff *skb) +static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, + struct sk_buff *skb) { struct net_vrf *vrf = netdev_priv(vrf_dev); struct dst_entry *dst = NULL; struct rt6_info *rt6; - /* don't divert link scope packets */ - if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) - return skb; - rcu_read_lock(); rt6 = rcu_dereference(vrf->rt6); @@ -438,6 +473,55 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, return skb; } +static int vrf_output6_direct(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + skb->protocol = htons(ETH_P_IPV6); + + return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, + net, sk, skb, NULL, skb->dev, + vrf_finish_direct, + !(IPCB(skb)->flags & IPSKB_REROUTED)); +} + +static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, + struct sock *sk, + struct sk_buff *skb) +{ + struct net *net = dev_net(vrf_dev); + int err; + + skb->dev = vrf_dev; + + err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, + skb, NULL, vrf_dev, vrf_output6_direct); + + if (likely(err == 1)) + err = vrf_output6_direct(net, sk, skb); + + /* reset skb device */ + if (likely(err == 1)) + nf_reset(skb); + else + skb = NULL; + + return skb; +} + +static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, + struct sock *sk, + struct sk_buff *skb) +{ + /* don't divert link scope packets */ + if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) + return skb; + + if (qdisc_tx_is_default(vrf_dev)) + return vrf_ip6_out_direct(vrf_dev, sk, skb); + + return vrf_ip6_out_redirect(vrf_dev, skb); +} + /* holding rtnl */ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { @@ -609,18 +693,13 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) * packet to go through device based features such as qdisc, netfilter * hooks and packet sockets with skb->dev set to vrf device. */ -static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, - struct sock *sk, - struct sk_buff *skb) +static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, + struct sk_buff *skb) { struct net_vrf *vrf = netdev_priv(vrf_dev); struct dst_entry *dst = NULL; struct rtable *rth; - /* don't divert multicast */ - if (ipv4_is_multicast(ip_hdr(skb)->daddr)) - return skb; - rcu_read_lock(); rth = rcu_dereference(vrf->rth); @@ -642,6 +721,55 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, return skb; } +static int vrf_output_direct(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + skb->protocol = htons(ETH_P_IP); + + return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, + net, sk, skb, NULL, skb->dev, + vrf_finish_direct, + !(IPCB(skb)->flags & IPSKB_REROUTED)); +} + +static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, + struct sock *sk, + struct sk_buff *skb) +{ + struct net *net = dev_net(vrf_dev); + int err; + + skb->dev = vrf_dev; + + err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, + skb, NULL, vrf_dev, vrf_output_direct); + + if (likely(err == 1)) + err = vrf_output_direct(net, sk, skb); + + /* reset skb device */ + if (likely(err == 1)) + nf_reset(skb); + else + skb = NULL; + + return skb; +} + +static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, + struct sock *sk, + struct sk_buff *skb) +{ + /* don't divert multicast */ + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + return skb; + + if (qdisc_tx_is_default(vrf_dev)) + return vrf_ip_out_direct(vrf_dev, sk, skb); + + return vrf_ip_out_redirect(vrf_dev, skb); +} + /* called with rcu lock held */ static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, struct sock *sk, @@ -749,14 +877,24 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { int ret; + /* do not allow loopback device to be enslaved to a VRF. + * The vrf device acts as the loopback for the vrf. + */ + if (port_dev == dev_net(dev)->loopback_dev) + return -EOPNOTSUPP; + + port_dev->priv_flags |= IFF_L3MDEV_SLAVE; ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); if (ret < 0) - return ret; + goto err; - port_dev->priv_flags |= IFF_L3MDEV_SLAVE; cycle_netdev(port_dev); return 0; + +err: + port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; + return ret; } static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) @@ -978,9 +1116,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; - skb_push(skb, skb->mac_len); - dev_queue_xmit_nit(skb, vrf_dev); - skb_pull(skb, skb->mac_len); + if (!list_empty(&vrf_dev->ptype_all)) { + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + } IP6CB(skb)->flags |= IP6SKB_L3SLAVE; } @@ -1021,9 +1161,11 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, vrf_rx_stats(vrf_dev, skb->len); - skb_push(skb, skb->mac_len); - dev_queue_xmit_nit(skb, vrf_dev); - skb_pull(skb, skb->mac_len); + if (!list_empty(&vrf_dev->ptype_all)) { + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + } skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); out: @@ -1146,11 +1288,11 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) /* fib_nl_{new,del}rule handling looks for net from skb->sk */ skb->sk = dev_net(dev)->rtnl; if (add_it) { - err = fib_nl_newrule(skb, nlh); + err = fib_nl_newrule(skb, nlh, NULL); if (err == -EEXIST) err = 0; } else { - err = fib_nl_delrule(skb, nlh); + err = fib_nl_delrule(skb, nlh, NULL); if (err == -ENOENT) err = 0; } diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c new file mode 100644 index 0000000000000000000000000000000000000000..7f0136f2dd9d6167acc9b125fb03d8c2c5f9524d --- /dev/null +++ b/drivers/net/vsockmon.c @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Virtio transport max packet size plus header */ +#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \ + sizeof(struct af_vsockmon_hdr)) + +struct pcpu_lstats { + u64 rx_packets; + u64 rx_bytes; + struct u64_stats_sync syncp; +}; + +static int vsockmon_dev_init(struct net_device *dev) +{ + dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); + if (!dev->lstats) + return -ENOMEM; + return 0; +} + +static void vsockmon_dev_uninit(struct net_device *dev) +{ + free_percpu(dev->lstats); +} + +struct vsockmon { + struct vsock_tap vt; +}; + +static int vsockmon_open(struct net_device *dev) +{ + struct vsockmon *vsockmon = netdev_priv(dev); + + vsockmon->vt.dev = dev; + vsockmon->vt.module = THIS_MODULE; + return vsock_add_tap(&vsockmon->vt); +} + +static int vsockmon_close(struct net_device *dev) +{ + struct vsockmon *vsockmon = netdev_priv(dev); + + return vsock_remove_tap(&vsockmon->vt); +} + +static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int len = skb->len; + struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_bytes += len; + stats->rx_packets++; + u64_stats_update_end(&stats->syncp); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void +vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + int i; + u64 bytes = 0, packets = 0; + + for_each_possible_cpu(i) { + const struct pcpu_lstats *vstats; + u64 tbytes, tpackets; + unsigned int start; + + vstats = per_cpu_ptr(dev->lstats, i); + + do { + start = u64_stats_fetch_begin_irq(&vstats->syncp); + tbytes = vstats->rx_bytes; + tpackets = vstats->rx_packets; + } while (u64_stats_fetch_retry_irq(&vstats->syncp, start)); + + packets += tpackets; + bytes += tbytes; + } + + stats->rx_packets = packets; + stats->tx_packets = 0; + + stats->rx_bytes = bytes; + stats->tx_bytes = 0; +} + +static int vsockmon_is_valid_mtu(int new_mtu) +{ + return new_mtu >= (int)sizeof(struct af_vsockmon_hdr); +} + +static int vsockmon_change_mtu(struct net_device *dev, int new_mtu) +{ + if (!vsockmon_is_valid_mtu(new_mtu)) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops vsockmon_ops = { + .ndo_init = vsockmon_dev_init, + .ndo_uninit = vsockmon_dev_uninit, + .ndo_open = vsockmon_open, + .ndo_stop = vsockmon_close, + .ndo_start_xmit = vsockmon_xmit, + .ndo_get_stats64 = vsockmon_get_stats64, + .ndo_change_mtu = vsockmon_change_mtu, +}; + +static u32 always_on(struct net_device *dev) +{ + return 1; +} + +static const struct ethtool_ops vsockmon_ethtool_ops = { + .get_link = always_on, +}; + +static void vsockmon_setup(struct net_device *dev) +{ + dev->type = ARPHRD_VSOCKMON; + dev->priv_flags |= IFF_NO_QUEUE; + + dev->netdev_ops = &vsockmon_ops; + dev->ethtool_ops = &vsockmon_ethtool_ops; + dev->destructor = free_netdev; + + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | + NETIF_F_HIGHDMA | NETIF_F_LLTX; + + dev->flags = IFF_NOARP; + + dev->mtu = DEFAULT_MTU; +} + +static struct rtnl_link_ops vsockmon_link_ops __read_mostly = { + .kind = "vsockmon", + .priv_size = sizeof(struct vsockmon), + .setup = vsockmon_setup, +}; + +static __init int vsockmon_register(void) +{ + return rtnl_link_register(&vsockmon_link_ops); +} + +static __exit void vsockmon_unregister(void) +{ + rtnl_link_unregister(&vsockmon_link_ops); +} + +module_init(vsockmon_register); +module_exit(vsockmon_unregister); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Gerard Garcia "); +MODULE_DESCRIPTION("Vsock monitoring device. Based on nlmon device."); +MODULE_ALIAS_RTNL_LINK("vsockmon"); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bdb6ae16d4a85bf9539199e189011bce104ba51a..328b4712683c334bf1de66a3a3789d0a04d734c3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -276,9 +276,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, send_eth = send_ip = true; if (type == RTM_GETNEIGH) { - ndm->ndm_family = AF_INET; send_ip = !vxlan_addr_any(&rdst->remote_ip); send_eth = !is_zero_ether_addr(fdb->eth_addr); + ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; } else ndm->ndm_family = AF_BRIDGE; ndm->ndm_state = fdb->state; @@ -1515,7 +1515,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, int ns_olen; int i, len; - if (dev == NULL) + if (dev == NULL || !pskb_may_pull(request, request->len)) return NULL; len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + @@ -1530,10 +1530,11 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, skb_push(reply, sizeof(struct ethhdr)); skb_reset_mac_header(reply); - ns = (struct nd_msg *)skb_transport_header(request); + ns = (struct nd_msg *)(ipv6_hdr(request) + 1); daddr = eth_hdr(request)->h_source; - ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); + ns_olen = request->len - skb_network_offset(request) - + sizeof(struct ipv6hdr) - sizeof(*ns); for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); @@ -1604,10 +1605,13 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) if (!in6_dev) goto out; + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) + goto out; + iphdr = ipv6_hdr(skb); daddr = &iphdr->daddr; - msg = (struct nd_msg *)skb_transport_header(skb); + msg = (struct nd_msg *)(iphdr + 1); if (msg->icmph.icmp6_code != 0 || msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) goto out; @@ -2242,16 +2246,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) - else if (ntohs(eth->h_proto) == ETH_P_IPV6 && - pskb_may_pull(skb, sizeof(struct ipv6hdr) - + sizeof(struct nd_msg)) && - ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { - struct nd_msg *msg; - - msg = (struct nd_msg *)skb_transport_header(skb); - if (msg->icmph.icmp6_code == 0 && - msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) - return neigh_reduce(dev, skb, vni); + else if (ntohs(eth->h_proto) == ETH_P_IPV6) { + struct ipv6hdr *hdr, _hdr; + if ((hdr = skb_header_pointer(skb, + skb_network_offset(skb), + sizeof(_hdr), &_hdr)) && + hdr->nexthdr == IPPROTO_ICMPV6) + return neigh_reduce(dev, skb, vni); } #endif } @@ -2322,6 +2323,9 @@ static void vxlan_cleanup(unsigned long arg) if (f->state & (NUD_PERMANENT | NUD_NOARP)) continue; + if (f->flags & NTF_EXT_LEARNED) + continue; + timeout = f->used + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, @@ -2754,8 +2758,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, sock = vxlan_create_sock(net, ipv6, port, flags); if (IS_ERR(sock)) { - pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), - PTR_ERR(sock)); kfree(vs); return ERR_CAST(sock); } @@ -2818,17 +2820,21 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) static int vxlan_sock_add(struct vxlan_dev *vxlan) { - bool ipv6 = vxlan->flags & VXLAN_F_IPV6; bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA; + bool ipv6 = vxlan->flags & VXLAN_F_IPV6 || metadata; + bool ipv4 = !ipv6 || metadata; int ret = 0; RCU_INIT_POINTER(vxlan->vn4_sock, NULL); #if IS_ENABLED(CONFIG_IPV6) RCU_INIT_POINTER(vxlan->vn6_sock, NULL); - if (ipv6 || metadata) + if (ipv6) { ret = __vxlan_sock_add(vxlan, true); + if (ret < 0 && ret != -EAFNOSUPPORT) + ipv4 = false; + } #endif - if (!ret && (!ipv6 || metadata)) + if (ipv4) ret = __vxlan_sock_add(vxlan, false); if (ret < 0) vxlan_sock_release(vxlan); @@ -2923,6 +2929,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, return -EINVAL; } + if (lowerdev) { + dev->gso_max_size = lowerdev->gso_max_size; + dev->gso_max_segs = lowerdev->gso_max_segs; + } + if (conf->mtu) { int max_mtu = ETH_MAX_MTU; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index e1dd1ec18d64c0a4eb6d16bd5c85d0dec485ee36..b9b934b7713c94e49c384acc95cc8cf4bf98d739 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -346,6 +346,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev, card->rambase == NULL) { pr_err("ioremap() failed\n"); pc300_pci_remove_one(pdev); + return -ENOMEM; } /* PLX PCI 9050 workaround for local configuration register read bug */ diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 098c814e22c8b6d7478bbc81035bb7fd9d6f4f5a..ed626f568b5802006473e077affd4f417f850917 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -1917,6 +1917,8 @@ static int adm8211_probe(struct pci_dev *pdev, dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; + wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "%s (adm8211): Cannot register device\n", diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 7a60d2e652dad6be1cab67ef64b708355e982ded..f2f4ccfdf8da0f01e49554666eaaf6e1e4dbdf26 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1689,6 +1689,8 @@ static int ar5523_probe(struct usb_interface *intf, if (error) goto out_cancel_rx_cmd; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + usb_set_intfdata(intf, hw); error = ieee80211_register_hw(hw); diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 45226dbee5ce23c773db06c0d08ece006bcba2c0..da770af830369ffa66cfe7511786e05797d559d0 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -640,6 +640,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n"); + napi_enable(&ar->napi); ath10k_ce_enable_interrupts(ar); ath10k_pci_enable_legacy_irq(ar); @@ -692,7 +693,6 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar) ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce_deinit; } - napi_enable(&ar->napi); return 0; diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 2872d347ea7819151c19d79d107976a4c130c0f4..abeee200310babce7b0d8cbd4da14765d15edbfa 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -19,12 +19,21 @@ #include "hif.h" #include "debug.h" #include "htc.h" +#include "hw.h" void ath10k_bmi_start(struct ath10k *ar) { + int ret; + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); ar->bmi.done_sent = false; + + /* Enable hardware clock to speed up firmware download */ + if (ar->hw_params.hw_ops->enable_pll_clk) { + ret = ar->hw_params.hw_ops->enable_pll_clk(ar); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret); + } } int ath10k_bmi_done(struct ath10k *ar) @@ -129,6 +138,69 @@ int ath10k_bmi_read_memory(struct ath10k *ar, return 0; } +int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val) +{ + struct bmi_cmd cmd; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg); + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BMI, + "bmi write soc register 0x%08x val 0x%08x\n", + address, reg_val); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi write soc register command in progress\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER); + cmd.write_soc_reg.addr = __cpu_to_le32(address); + cmd.write_soc_reg.value = __cpu_to_le32(reg_val); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); + if (ret) { + ath10k_warn(ar, "Unable to write soc register to device: %d\n", + ret); + return ret; + } + + return 0; +} + +int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val) +{ + struct bmi_cmd cmd; + union bmi_resp resp; + u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg); + u32 resplen = sizeof(resp.read_soc_reg); + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n", + address); + + if (ar->bmi.done_sent) { + ath10k_warn(ar, "bmi read soc register command in progress\n"); + return -EBUSY; + } + + cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER); + cmd.read_soc_reg.addr = __cpu_to_le32(address); + + ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); + if (ret) { + ath10k_warn(ar, "Unable to read soc register from device: %d\n", + ret); + return ret; + } + + *reg_val = __le32_to_cpu(resp.read_soc_reg.value); + + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n", + *reg_val); + + return 0; +} + int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, const void *buffer, u32 length) { diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 7d3231acfb24939bcee74cf9b6a38a3a26a82a67..cc45b63ade15e2bbd062bef4a0eef3530c5b0e80 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -176,7 +176,8 @@ union bmi_resp { } rompatch_uninstall; struct { /* 0 = nothing executed - * otherwise = NVRAM segment return value */ + * otherwise = NVRAM segment return value + */ __le32 result; } nvram_process; u8 payload[BMI_MAX_CMDBUF_SIZE]; @@ -232,4 +233,6 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, const void *buffer, u32 length); +int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val); +int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val); #endif /* _BMI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 4045657e0a6ec75ee5e27e51b06fe95916f8ad77..ee1090ca2eac5de05796ad3c0ebe54b986e86794 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -261,8 +261,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, } /* - * Guts of ath10k_ce_send, used by both ath10k_ce_send and - * ath10k_ce_sendlist_send. + * Guts of ath10k_ce_send. * The caller takes responsibility for any needed locking. */ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, @@ -1052,7 +1051,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, */ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC > + BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 0a8e29e9a0ebc734a8dd5cc1dad7ab46d2b0f080..5a0638915874451201fadfd8ea450504a792a627 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -71,6 +71,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA9887_HW_1_0_VERSION, @@ -91,6 +92,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA6174_HW_2_1_VERSION, @@ -110,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA6174_HW_2_1_VERSION, @@ -129,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA6174_HW_3_0_VERSION, @@ -148,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA6174_HW_3_2_VERSION, @@ -166,8 +171,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, - .hw_ops = &qca988x_ops, + .hw_ops = &qca6174_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -193,6 +201,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .sw_decrypt_mcast_mgmt = true, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, + .spectral_bin_discard = 4, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -219,6 +228,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .sw_decrypt_mcast_mgmt = true, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, + .spectral_bin_discard = 12, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -244,6 +254,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .sw_decrypt_mcast_mgmt = true, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, + .spectral_bin_discard = 12, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -263,6 +274,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -280,8 +292,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, - .hw_ops = &qca988x_ops, + .hw_ops = &qca6174_ops, + .hw_clk = qca6174_clk, + .target_cpu_freq = 176000000, .decap_align_bytes = 4, + .spectral_bin_discard = 0, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -308,6 +323,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .sw_decrypt_mcast_mgmt = true, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, + .spectral_bin_discard = 4, }, }; @@ -1623,6 +1639,13 @@ static void ath10k_core_restart(struct work_struct *work) wake_up(&ar->wmi.tx_credits_wq); wake_up(&ar->peer_mapping_wq); + /* TODO: We can have one instance of cancelling coverage_class_work by + * moving it to ath10k_halt(), so that both stop() and restart() would + * call that but it takes conf_mutex() and if we call cancel_work_sync() + * with conf_mutex it will deadlock. + */ + cancel_work_sync(&ar->set_coverage_class_work); + mutex_lock(&ar->conf_mutex); switch (ar->state) { @@ -1634,7 +1657,8 @@ static void ath10k_core_restart(struct work_struct *work) break; case ATH10K_STATE_OFF: /* this can happen if driver is being unloaded - * or if the crash happens during FW probing */ + * or if the crash happens during FW probing + */ ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); break; case ATH10K_STATE_RESTARTING: @@ -2162,7 +2186,8 @@ EXPORT_SYMBOL(ath10k_core_stop); /* mac80211 manages fw/hw initialization through start/stop hooks. However in * order to know what hw capabilities should be advertised to mac80211 it is * necessary to load the firmware (and tear it down immediately since start - * hook will try to init it again) before registering */ + * hook will try to init it again) before registering + */ static int ath10k_core_probe_fw(struct ath10k *ar) { struct bmi_target_info target_info; @@ -2356,7 +2381,8 @@ void ath10k_core_unregister(struct ath10k *ar) /* We must unregister from mac80211 before we stop HTC and HIF. * Otherwise we will fail to submit commands to FW and mac80211 will be - * unhappy about callback failures. */ + * unhappy about callback failures. + */ ath10k_mac_unregister(ar); ath10k_testmode_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 88d14be7fcceb44539f80c7217e4d6b0ea27dd79..bf091514ecc6bd220af20c68eaab00528ae2ef4c 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -501,14 +501,16 @@ enum ath10k_state { * stopped in ath10k_core_restart() work holding conf_mutex. The state * RESTARTED means that the device is up and mac80211 has started hw * reconfiguration. Once mac80211 is done with the reconfiguration we - * set the state to STATE_ON in reconfig_complete(). */ + * set the state to STATE_ON in reconfig_complete(). + */ ATH10K_STATE_RESTARTING, ATH10K_STATE_RESTARTED, /* The device has crashed while restarting hw. This state is like ON * but commands are blocked in HTC and -ECOMM response is given. This * prevents completion timeouts and makes the driver more responsive to - * userspace commands. This is also prevents recursive recovery. */ + * userspace commands. This is also prevents recursive recovery. + */ ATH10K_STATE_WEDGED, /* factory tests */ @@ -775,6 +777,8 @@ struct ath10k { u32 num_rf_chains; u32 max_spatial_stream; /* protected by conf_mutex */ + u32 low_5ghz_chan; + u32 high_5ghz_chan; bool ani_enabled; bool p2p; @@ -918,7 +922,8 @@ struct ath10k { struct work_struct restart_work; /* cycle count is reported twice for each visited channel during scan. - * access protected by data_lock */ + * access protected by data_lock + */ u32 survey_last_rx_clear_count; u32 survey_last_cycle_count; struct survey_info survey[ATH10K_NUM_CHANS]; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index fb0ade3adb07de51d0f952ae63235efbeb4b4d44..4cd2a0fd49d6e53c08c7d2af5b1d1542f811affd 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -249,9 +249,6 @@ static ssize_t ath10k_read_wmi_services(struct file *file, mutex_lock(&ar->conf_mutex); - if (len > buf_len) - len = buf_len; - spin_lock_bh(&ar->data_lock); for (i = 0; i < WMI_SERVICE_MAX; i++) { enabled = test_bit(i, ar->wmi.svc_map); @@ -1819,7 +1816,7 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar, tpc_stats->num_tx_chain, tpc_stats->rate_max); - for (j = 0; j < tpc_stats->num_tx_chain ; j++) { + for (j = 0; j < WMI_TPC_FLAG; j++) { switch (j) { case WMI_TPC_TABLE_TYPE_CDD: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { @@ -1985,7 +1982,8 @@ void ath10k_debug_stop(struct ath10k *ar) /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid - * warning from del_timer(). */ + * warning from del_timer(). + */ if (ar->debug.htt_stats_mask != 0) cancel_delayed_work(&ar->debug.htt_stats_dwork); @@ -1997,6 +1995,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; + struct ath10k_vif *arvif; + + /* Just check for for the first vif alone, as all the vifs will be + * sharing the same channel and if the channel is disabled, all the + * vifs will share the same 'is_started' state. + */ + arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); + if (!arvif->is_started) + return -EINVAL; ieee80211_radar_detected(ar->hw); @@ -2437,86 +2444,82 @@ int ath10k_debug_register(struct ath10k *ar) init_completion(&ar->debug.tpc_complete); init_completion(&ar->debug.fw_stats_complete); - debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, + debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_stats); - debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_fw_reset_stats); + debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar, + &fops_fw_reset_stats); - debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, + debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar, &fops_wmi_services); - debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); + debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar, + &fops_simulate_fw_crash); - debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_fw_crash_dump); + debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar, + &fops_fw_crash_dump); - debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_reg_addr); + debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar, + &fops_reg_addr); - debugfs_create_file("reg_value", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_reg_value); + debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar, + &fops_reg_value); - debugfs_create_file("mem_value", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_mem_value); + debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar, + &fops_mem_value); - debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_chip_id); + debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar, + &fops_chip_id); - debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_htt_stats_mask); + debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar, + &fops_htt_stats_mask); - debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, + debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar, &fops_htt_max_amsdu_ampdu); - debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_fw_dbglog); + debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, + &fops_fw_dbglog); - debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, - ar, &fops_cal_data); + debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, + &fops_cal_data); - debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_ani_enable); + debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, + &fops_ani_enable); - debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_nf_cal_period); + debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { - debugfs_create_file("dfs_simulate_radar", S_IWUSR, - ar->debug.debugfs_phy, ar, - &fops_simulate_radar); + debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, + ar, &fops_simulate_radar); - debugfs_create_bool("dfs_block_radar_events", S_IWUSR, + debugfs_create_bool("dfs_block_radar_events", 0200, ar->debug.debugfs_phy, &ar->dfs_block_radar_events); - debugfs_create_file("dfs_stats", S_IRUSR, - ar->debug.debugfs_phy, ar, + debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar, &fops_dfs_stats); } - debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_pktlog_filter); + debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, + &fops_pktlog_filter); - debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_quiet_period); + debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, + &fops_quiet_period); - debugfs_create_file("tpc_stats", S_IRUSR, - ar->debug.debugfs_phy, ar, &fops_tpc_stats); + debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, + &fops_tpc_stats); if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) - debugfs_create_file("btcoex", S_IRUGO | S_IWUSR, - ar->debug.debugfs_phy, ar, &fops_btcoex); + debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar, + &fops_btcoex); if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) - debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR, - ar->debug.debugfs_phy, ar, + debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar, &fops_peer_stats); - debugfs_create_file("fw_checksums", S_IRUSR, - ar->debug.debugfs_phy, ar, &fops_fw_checksums); + debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, + &fops_fw_checksums); return 0; } diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 7353e7ea88f13f3e804213f1aa2ab8f34e41557a..d59ac6b833400b4c553390869e7a31a5c508cadf 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -372,11 +372,10 @@ static const struct file_operations fops_peer_debug_trigger = { void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { - debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta, - &fops_aggr_mode); - debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); - debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); - debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); + debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode); + debugfs_create_file("addba", 0200, dir, sta, &fops_addba); + debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp); + debugfs_create_file("delba", 0200, dir, sta, &fops_delba); debugfs_create_file("peer_debug_trigger", 0600, dir, sta, &fops_peer_debug_trigger); } diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index b2566b06e1e1f4e6a87bb088b4e889ef6fc82bc4..6679dd9cfd12857342abf2ff87f86d835971044e 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -54,7 +54,8 @@ struct ath10k_hif_ops { int (*start)(struct ath10k *ar); /* Clean up what start() did. This does not revert to BMI phase. If - * desired so, call power_down() and power_up() */ + * desired so, call power_down() and power_up() + */ void (*stop)(struct ath10k *ar); int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, @@ -82,7 +83,8 @@ struct ath10k_hif_ops { int (*power_up)(struct ath10k *ar); /* Power down the device and free up resources. stop() must be called - * before this if start() was called earlier */ + * before this if start() was called earlier + */ void (*power_down)(struct ath10k *ar); int (*suspend)(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 9f6a915f91bfe842a1672b81a8b6046c7f9d4f1b..b7669b2e94aaec8f86683cbc4e084cca122f6625 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -119,6 +119,9 @@ int ath10k_htc_send(struct ath10k_htc *htc, credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { + ath10k_dbg(ar, ATH10K_DBG_HTC, + "htc insufficient credits ep %d required %d available %d\n", + eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); ret = -EAGAIN; goto err_pull; @@ -419,7 +422,8 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar, struct sk_buff *skb) { /* This is unexpected. FW is not supposed to send regular rx on this - * endpoint. */ + * endpoint. + */ ath10k_warn(ar, "unexpected htc rx\n"); kfree_skb(skb); } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 90c2f72666b831b5fb88bee6440b517a50e3bb45..6305308422c4440fb1e20df4e6b32050d75573d4 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -51,7 +51,8 @@ enum htt_h2t_msg_type { /* host-to-target */ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, /* This command is used for sending management frames in HTT < 3.0. - * HTT >= 3.0 uses TX_FRM for everything. */ + * HTT >= 3.0 uses TX_FRM for everything. + */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, @@ -910,7 +911,8 @@ struct htt_rx_test { /* payload consists of 2 lists: * a) num_ints * sizeof(__le32) - * b) num_chars * sizeof(u8) aligned to 4bytes */ + * b) num_chars * sizeof(u8) aligned to 4bytes + */ u8 payload[0]; } __packed; @@ -1307,7 +1309,8 @@ struct htt_frag_desc_bank_id { } __packed; /* real is 16 but it wouldn't fit in the max htt message size - * so we use a conservatively safe value for now */ + * so we use a conservatively safe value for now + */ #define HTT_FRAG_DESC_BANK_MAX 4 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 @@ -1684,12 +1687,14 @@ struct ath10k_htt { DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done); /* set if host-fw communication goes haywire - * used to avoid further failures */ + * used to avoid further failures + */ bool rx_confused; atomic_t num_mpdus_ready; /* This is used to group tx/rx completions separately and process them - * in batches to reduce cache stalls */ + * in batches to reduce cache stalls + */ struct sk_buff_head rx_compl_q; struct sk_buff_head rx_in_ord_compl_q; struct sk_buff_head tx_fetch_ind_q; @@ -1725,11 +1730,13 @@ struct ath10k_htt { /* This structure layout is programmed via rx ring setup * so that FW knows how to transfer the rx descriptor to the host. - * Buffers like this are placed on the rx ring. */ + * Buffers like this are placed on the rx ring. + */ struct htt_rx_desc { union { /* This field is filled on the host using the msdu buffer - * from htt_rx_indication */ + * from htt_rx_indication + */ struct fw_rx_desc_base fw_desc; u32 pad; } __packed; @@ -1760,7 +1767,8 @@ struct htt_rx_desc { #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle - * aggregated traffic more nicely. */ + * aggregated traffic more nicely. + */ #define ATH10K_HTT_MAX_NUM_REFILL 100 /* diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 02a3fc81fbe3bc8204aec31c99890559fe15b708..84b6067ff6e774604722224752eb0c35dbcead21 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -177,7 +177,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) * automatically balances load wrt to CPU power. * * This probably comes at a cost of lower maximum throughput but - * improves the average and stability. */ + * improves the average and stability. + */ spin_lock_bh(&htt->rx_ring.lock); num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); @@ -304,7 +305,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, rx_desc = (struct htt_rx_desc *)msdu->data; /* FIXME: we must report msdu payload since this is what caller - * expects now */ + * expects now + */ skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); @@ -630,16 +632,17 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, sgi = (info3 >> 7) & 1; status->rate_idx = mcs; - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (sgi) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (bw) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; break; case HTT_RX_VHT: case HTT_RX_VHT_WITH_TXBF: /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 - TODO check this */ + * TODO check this + */ bw = info2 & 3; sgi = info3 & 1; group_id = (info2 >> 4) & 0x3F; @@ -686,10 +689,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, } status->rate_idx = mcs; - status->vht_nss = nss; + status->nss = nss; if (sgi) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; switch (bw) { /* 20MHZ */ @@ -697,18 +700,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; /* 40MHZ */ case 1: - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; break; /* 80MHZ */ case 2: - status->vht_flag |= RX_VHT_FLAG_80MHZ; + status->bw = RATE_INFO_BW_80; break; case 3: - status->vht_flag |= RX_VHT_FLAG_160MHZ; + status->bw = RATE_INFO_BW_160; break; } - status->flag |= RX_FLAG_VHT; + status->encoding = RX_ENC_VHT; break; default: break; @@ -871,13 +874,10 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, /* New PPDU starts so clear out the old per-PPDU status. */ status->freq = 0; status->rate_idx = 0; - status->vht_nss = 0; - status->vht_flag &= ~RX_VHT_FLAG_80MHZ; - status->flag &= ~(RX_FLAG_HT | - RX_FLAG_VHT | - RX_FLAG_SHORT_GI | - RX_FLAG_40MHZ | - RX_FLAG_MACTIME_END); + status->nss = 0; + status->encoding = RX_ENC_LEGACY; + status->bw = RATE_INFO_BW_20; + status->flag &= ~RX_FLAG_MACTIME_END; status->flag |= RX_FLAG_NO_SIGNAL_VAL; ath10k_htt_rx_h_signal(ar, status, rxd); @@ -930,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar, *status = *rx_status; ath10k_dbg(ar, ATH10K_DBG_DATA, - "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, ieee80211_get_SA(hdr), @@ -938,16 +938,15 @@ static void ath10k_process_rx(struct ath10k *ar, is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? - "legacy" : "", - status->flag & RX_FLAG_HT ? "ht" : "", - status->flag & RX_FLAG_VHT ? "vht" : "", - status->flag & RX_FLAG_40MHZ ? "40" : "", - status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", - status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "", - status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", + (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", + (status->encoding == RX_ENC_HT) ? "ht" : "", + (status->encoding == RX_ENC_VHT) ? "vht" : "", + (status->bw == RATE_INFO_BW_40) ? "40" : "", + (status->bw == RATE_INFO_BW_80) ? "80" : "", + (status->bw == RATE_INFO_BW_160) ? "160" : "", + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, - status->vht_nss, + status->nss, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 86b427f5e2bcbc5cd8eb83ae5ddc12773bdff59b..685faac1368fdf7388c5dde865cc2de0a2646b8f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -526,7 +526,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) memset(req, 0, sizeof(*req)); /* currently we support only max 8 bit masks so no need to worry - * about endian support */ + * about endian support + */ req->upload_types[0] = mask; req->reset_types[0] = mask; req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; @@ -1008,7 +1009,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus - * improve performance. */ + * improve performance. + */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index d9f37ee4bfdd3eee9e0a0c11e30165a8b6a7d5ea..c866ab524571ef1fc03844d832dea5b2a340c240 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -19,6 +19,7 @@ #include "hw.h" #include "hif.h" #include "wmi-ops.h" +#include "bmi.h" const struct ath10k_hw_regs qca988x_regs = { .rtc_soc_base_address = 0x00004000, @@ -72,6 +73,9 @@ const struct ath10k_hw_regs qca6174_regs = { .pcie_intr_fw_mask = 0x00000400, .pcie_intr_ce_mask_all = 0x0007f800, .pcie_intr_clr_address = 0x00000014, + .cpu_pll_init_address = 0x00404020, + .cpu_speed_address = 0x00404024, + .core_clk_div_address = 0x00404028, }; const struct ath10k_hw_regs qca99x0_regs = { @@ -187,6 +191,73 @@ const struct ath10k_hw_values qca4019_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = { + { + .refclk = 48000000, + .div = 0xe, + .rnfrac = 0x2aaa8, + .settle_time = 2400, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 19200000, + .div = 0x24, + .rnfrac = 0x2aaa8, + .settle_time = 960, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 24000000, + .div = 0x1d, + .rnfrac = 0x15551, + .settle_time = 1200, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 26000000, + .div = 0x1b, + .rnfrac = 0x4ec4, + .settle_time = 1300, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 37400000, + .div = 0x12, + .rnfrac = 0x34b49, + .settle_time = 1870, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 38400000, + .div = 0x12, + .rnfrac = 0x15551, + .settle_time = 1920, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 40000000, + .div = 0x12, + .rnfrac = 0x26665, + .settle_time = 2000, + .refdiv = 0, + .outdiv = 1, + }, + { + .refclk = 52000000, + .div = 0x1b, + .rnfrac = 0x4ec4, + .settle_time = 2600, + .refdiv = 0, + .outdiv = 1, + }, +}; + void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) { @@ -361,6 +432,195 @@ static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar, mutex_unlock(&ar->conf_mutex); } +/** + * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock + * @ar: the ath10k blob + * + * This function is very hardware specific, the clock initialization + * steps is very sensitive and could lead to unknown crash, so they + * should be done in sequence. + * + * *** Be aware if you planned to refactor them. *** + * + * Return: 0 if successfully enable the pll, otherwise EINVAL + */ +static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar) +{ + int ret, wait_limit; + u32 clk_div_addr, pll_init_addr, speed_addr; + u32 addr, reg_val, mem_val; + struct ath10k_hw_params *hw; + const struct ath10k_hw_clk_params *hw_clk; + + hw = &ar->hw_params; + + if (ar->regs->core_clk_div_address == 0 || + ar->regs->cpu_pll_init_address == 0 || + ar->regs->cpu_speed_address == 0) + return -EINVAL; + + clk_div_addr = ar->regs->core_clk_div_address; + pll_init_addr = ar->regs->cpu_pll_init_address; + speed_addr = ar->regs->cpu_speed_address; + + /* Read efuse register to find out the right hw clock configuration */ + addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + /* sanitize if the hw refclk index is out of the boundary */ + if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT) + return -EINVAL; + + hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)]; + + /* Set the rnfrac and outdiv params to bb_pll register */ + addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK); + reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) | + SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV)); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the correct settle time value to pll_settle register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK; + reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the clock_ctrl div to core_clk_ctrl register */ + addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK; + reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* Set the clock_div register */ + mem_val = 1; + ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val, + sizeof(mem_val)); + if (ret) + return -EINVAL; + + /* Configure the pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) | + SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) | + SM(1, WLAN_PLL_CONTROL_NOPWD)); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* busy wait (max 1s) the rtc_sync status register indicate ready */ + wait_limit = 100000; + addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); + do { + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + break; + + wait_limit--; + udelay(10); + + } while (wait_limit > 0); + + if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + return -EINVAL; + + /* Unset the pll_bypass in pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK; + reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* busy wait (max 1s) the rtc_sync status register indicate ready */ + wait_limit = 100000; + addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); + do { + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + break; + + wait_limit--; + udelay(10); + + } while (wait_limit > 0); + + if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) + return -EINVAL; + + /* Enable the hardware cpu clock register */ + addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK; + reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD); + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* unset the nopwd from pll_control register */ + addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); + ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); + if (ret) + return -EINVAL; + + reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK; + ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); + if (ret) + return -EINVAL; + + /* enable the pll_init register */ + mem_val = 1; + ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val, + sizeof(mem_val)); + if (ret) + return -EINVAL; + + /* set the target clock frequency to speed register */ + ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq, + sizeof(hw->target_cpu_freq)); + if (ret) + return -EINVAL; + + return 0; +} + const struct ath10k_hw_ops qca988x_ops = { .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, }; @@ -374,3 +634,8 @@ static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd) const struct ath10k_hw_ops qca99x0_ops = { .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, }; + +const struct ath10k_hw_ops qca6174_ops = { + .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, + .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, +}; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index f0fda0f2b3b487147dc6eb147eb55b8b4287b15a..5b1e90bb2a4db2ddeebbd1c3383f096889e1e9a3 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -129,7 +129,7 @@ enum qca9377_chip_id_rev { #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 #define ATH10K_FW_FILE_BASE "firmware" -#define ATH10K_FW_API_MAX 5 +#define ATH10K_FW_API_MAX 6 #define ATH10K_FW_API_MIN 2 #define ATH10K_FW_API2_FILE "firmware-2.bin" @@ -141,6 +141,9 @@ enum qca9377_chip_id_rev { /* HTT id conflict fix for management frames over HTT */ #define ATH10K_FW_API5_FILE "firmware-5.bin" +/* the firmware-6.bin blob */ +#define ATH10K_FW_API6_FILE "firmware-6.bin" + #define ATH10K_FW_UTF_FILE "utf.bin" #define ATH10K_FW_UTF_API2_FILE "utf-2.bin" @@ -255,6 +258,9 @@ struct ath10k_hw_regs { u32 pcie_intr_fw_mask; u32 pcie_intr_ce_mask_all; u32 pcie_intr_clr_address; + u32 cpu_pll_init_address; + u32 cpu_speed_address; + u32 core_clk_div_address; }; extern const struct ath10k_hw_regs qca988x_regs; @@ -293,7 +299,8 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap * - raw have FCS, nwifi doesn't * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher - * param, llc/snap) are aligned to 4byte boundaries each */ + * param, llc/snap) are aligned to 4byte boundaries each + */ enum ath10k_hw_txrx_mode { ATH10K_HW_TXRX_RAW = 0, @@ -363,6 +370,30 @@ enum ath10k_hw_cc_wraparound_type { ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2, }; +enum ath10k_hw_refclk_speed { + ATH10K_HW_REFCLK_UNKNOWN = -1, + ATH10K_HW_REFCLK_48_MHZ = 0, + ATH10K_HW_REFCLK_19_2_MHZ = 1, + ATH10K_HW_REFCLK_24_MHZ = 2, + ATH10K_HW_REFCLK_26_MHZ = 3, + ATH10K_HW_REFCLK_37_4_MHZ = 4, + ATH10K_HW_REFCLK_38_4_MHZ = 5, + ATH10K_HW_REFCLK_40_MHZ = 6, + ATH10K_HW_REFCLK_52_MHZ = 7, + + /* must be the last one */ + ATH10K_HW_REFCLK_COUNT, +}; + +struct ath10k_hw_clk_params { + u32 refclk; + u32 div; + u32 rnfrac; + u32 settle_time; + u32 refdiv; + u32 outdiv; +}; + struct ath10k_hw_params { u32 id; u16 dev_id; @@ -416,6 +447,13 @@ struct ath10k_hw_params { /* Number of bytes used for alignment in rx_hdr_status of rx desc. */ int decap_align_bytes; + + /* hw specific clock control parameters */ + const struct ath10k_hw_clk_params *hw_clk; + int target_cpu_freq; + + /* Number of bytes to be discarded for each FFT sample */ + int spectral_bin_discard; }; struct htt_rx_desc; @@ -424,10 +462,14 @@ struct htt_rx_desc; struct ath10k_hw_ops { int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); void (*set_coverage_class)(struct ath10k *ar, s16 value); + int (*enable_pll_clk)(struct ath10k *ar); }; extern const struct ath10k_hw_ops qca988x_ops; extern const struct ath10k_hw_ops qca99x0_ops; +extern const struct ath10k_hw_ops qca6174_ops; + +extern const struct ath10k_hw_clk_params qca6174_clk[]; static inline int ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, @@ -847,4 +889,38 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define WAVE1_PHYCLK_USEC_MASK 0x0000007F #define WAVE1_PHYCLK_USEC_LSB 0 +/* qca6174 PLL offset/mask */ +#define SOC_CORE_CLK_CTRL_OFFSET 0x00000114 +#define SOC_CORE_CLK_CTRL_DIV_LSB 0 +#define SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007 + +#define EFUSE_OFFSET 0x0000032c +#define EFUSE_XTAL_SEL_LSB 8 +#define EFUSE_XTAL_SEL_MASK 0x00000700 + +#define BB_PLL_CONFIG_OFFSET 0x000002f4 +#define BB_PLL_CONFIG_FRAC_LSB 0 +#define BB_PLL_CONFIG_FRAC_MASK 0x0003ffff +#define BB_PLL_CONFIG_OUTDIV_LSB 18 +#define BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000 + +#define WLAN_PLL_SETTLE_OFFSET 0x0018 +#define WLAN_PLL_SETTLE_TIME_LSB 0 +#define WLAN_PLL_SETTLE_TIME_MASK 0x000007ff + +#define WLAN_PLL_CONTROL_OFFSET 0x0014 +#define WLAN_PLL_CONTROL_DIV_LSB 0 +#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff +#define WLAN_PLL_CONTROL_REFDIV_LSB 10 +#define WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00 +#define WLAN_PLL_CONTROL_BYPASS_LSB 16 +#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000 +#define WLAN_PLL_CONTROL_NOPWD_LSB 18 +#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000 + +#define RTC_SYNC_STATUS_OFFSET 0x0244 +#define RTC_SYNC_STATUS_PLL_CHANGING_LSB 5 +#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020 +/* qca6174 PLL offset/mask end */ + #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 3029f257a19a5988e13368a3922e9e55fc961258..4674ff33d3203ac1eb00c8b268bc3b2ffc7d32bc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -457,7 +457,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, for (;;) { /* since ath10k_install_key we can't hold data_lock all the - * time, so we try to remove the keys incrementally */ + * time, so we try to remove the keys incrementally + */ spin_lock_bh(&ar->data_lock); i = 0; list_for_each_entry(peer, &ar->peers, list) { @@ -609,7 +610,8 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity) case 2: case 3: /* Our lower layer calculations limit our precision to - 1 microsecond */ + * 1 microsecond + */ return 1; case 4: return 2; @@ -978,7 +980,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.channel.band_center_freq2 = chandef->center_freq2; /* TODO setup this dynamically, what in case we - don't have any vifs? */ + * don't have any vifs? + */ arg.channel.mode = chan_to_phymode(chandef); arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); @@ -2373,9 +2376,10 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, } /* TODO setup this based on STA listen interval and - beacon interval. Currently we don't know - sta->listen_interval - mac80211 patch required. - Currently use 10 seconds */ + * beacon interval. Currently we don't know + * sta->listen_interval - mac80211 patch required. + * Currently use 10 seconds + */ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10); @@ -2451,6 +2455,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, enum nl80211_band band; const u16 *vht_mcs_mask; u8 ampdu_factor; + u8 max_nss, vht_mcs; + int i; if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) return; @@ -2478,7 +2484,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to * zero in VHT IE. Using it would result in degraded throughput. * arg->peer_max_mpdu at this point contains HT max_mpdu so keep - * it if VHT max_mpdu is smaller. */ + * it if VHT max_mpdu is smaller. + */ arg->peer_max_mpdu = max(arg->peer_max_mpdu, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); @@ -2489,6 +2496,18 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, if (sta->bandwidth == IEEE80211_STA_RX_BW_160) arg->peer_flags |= ar->wmi.peer_flags->bw160; + /* Calculate peer NSS capability from VHT capabilities if STA + * supports VHT. + */ + for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { + vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> + (2 * i) & 3; + + if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && + vht_mcs_mask[i]) + max_nss = i + 1; + } + arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->peer_vht_rates.rx_mcs_set = @@ -2779,7 +2798,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, } /* ap_sta must be accessed only within rcu section which must be left - * before calling ath10k_setup_peer_smps() which might sleep. */ + * before calling ath10k_setup_peer_smps() which might sleep. + */ ht_cap = ap_sta->ht_cap; vht_cap = ap_sta->vht_cap; @@ -3050,7 +3070,8 @@ static int ath10k_update_channel_list(struct ath10k *ar) /* FIXME: why use only legacy modes, why not any * HT/VHT modes? Would that even make any - * difference? */ + * difference? + */ if (channel->band == NL80211_BAND_2GHZ) ch->mode = MODE_11G; else @@ -3114,7 +3135,8 @@ static void ath10k_regd_update(struct ath10k *ar) } /* Target allows setting up per-band regdomain but ath_common provides - * a combined one only */ + * a combined one only + */ ret = ath10k_wmi_pdev_set_regdomain(ar, regpair->reg_domain, regpair->reg_domain, /* 2ghz */ @@ -3126,6 +3148,21 @@ static void ath10k_regd_update(struct ath10k *ar) ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); } +static void ath10k_mac_update_channel_list(struct ath10k *ar, + struct ieee80211_supported_band *band) +{ + int i; + + if (ar->low_5ghz_chan && ar->high_5ghz_chan) { + for (i = 0; i < band->n_channels; i++) { + if (band->channels[i].center_freq < ar->low_5ghz_chan || + band->channels[i].center_freq > ar->high_5ghz_chan) + band->channels[i].flags |= + IEEE80211_CHAN_DISABLED; + } + } +} + static void ath10k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { @@ -3149,6 +3186,10 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, if (ar->state == ATH10K_STATE_ON) ath10k_regd_update(ar); mutex_unlock(&ar->conf_mutex); + + if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) + ath10k_mac_update_channel_list(ar, + ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); } /***************/ @@ -3644,7 +3685,8 @@ void ath10k_offchan_tx_work(struct work_struct *work) * never transmitted. We delete the peer upon tx completion. * It is unlikely that a peer for offchannel tx will already be * present. However it may be in some rare cases so account for that. - * Otherwise we might remove a legitimate peer and break stuff. */ + * Otherwise we might remove a legitimate peer and break stuff. + */ for (;;) { skb = skb_dequeue(&ar->offchan_tx_queue); @@ -4684,6 +4726,7 @@ static void ath10k_stop(struct ieee80211_hw *hw) } mutex_unlock(&ar->conf_mutex); + cancel_work_sync(&ar->set_coverage_class_work); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->restart_work); } @@ -5683,7 +5726,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } /* the peer should not disappear in mid-way (unless FW goes awry) since - * we already hold conf_mutex. we just make sure its there now. */ + * we already hold conf_mutex. we just make sure its there now. + */ spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); @@ -5695,8 +5739,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = -EOPNOTSUPP; goto exit; } else { - /* if the peer doesn't exist there is no key to disable - * anymore */ + /* if the peer doesn't exist there is no key to disable anymore */ goto exit; } } @@ -6555,7 +6598,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, long time_left; /* mac80211 doesn't care if we really xmit queued frames or not - * we'll collect those frames either way if we stop/delete vdevs */ + * we'll collect those frames either way if we stop/delete vdevs + */ if (drop) return; @@ -6606,7 +6650,8 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); /* If device failed to restart it will be in a different state, e.g. - * ATH10K_STATE_WEDGED */ + * ATH10K_STATE_WEDGED + */ if (ar->state == ATH10K_STATE_RESTARTED) { ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; @@ -7129,7 +7174,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, lockdep_assert_held(&ar->data_lock); WARN_ON(ctx && vifs); - WARN_ON(vifs && n_vifs != 1); + WARN_ON(vifs && !n_vifs); /* FIXME: Sort of an optimization and a workaround. Peers and vifs are * on a linked list now. Doing a lookup peer -> vif -> chanctx for each @@ -8248,6 +8293,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err(ar, "failed to register ieee80211: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 6094372307aae46b296cd1cff40498a06427d8a3..1e9806f57ee41e7b0cc01e0dd24559a78609d6c0 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -720,14 +720,16 @@ void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) { /* IMPORTANT: INTR_CLR register has to be set after * INTR_ENABLE is set to 0, otherwise interrupt can not be - * really cleared. */ + * really cleared. + */ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 0); ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); /* IMPORTANT: this extra read transaction is required to - * flush the posted write buffer. */ + * flush the posted write buffer. + */ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS); } @@ -739,7 +741,8 @@ void ath10k_pci_enable_legacy_irq(struct ath10k *ar) PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); /* IMPORTANT: this extra read transaction is required to - * flush the posted write buffer. */ + * flush the posted write buffer. + */ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS); } @@ -970,12 +973,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } remaining_bytes -= nbytes; - - if (ret) { - ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", - address, ret); - break; - } memcpy(data, data_buf, nbytes); address += nbytes; @@ -2914,7 +2911,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar) * host won't know when target writes BAR to CORE_CTRL. * This write might get lost if target has NOT written BAR. * For now, fix the race by repeating the write in below - * synchronization checking. */ + * synchronization checking. + */ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, @@ -3430,6 +3428,7 @@ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 3.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 034e7a54c5b2789b15ba59ff0dda2325d6c63ae2..c1022a1cf8555b97b91af020185b810a05899bf8 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -439,19 +439,22 @@ struct rx_mpdu_end { * c) A-MSDU subframe header (14 bytes) if appliable * d) LLC/SNAP (RFC1042, 8 bytes) * - * In case of A-MSDU only first frame in sequence contains (a) and (b). */ + * In case of A-MSDU only first frame in sequence contains (a) and (b). + */ enum rx_msdu_decap_format { RX_MSDU_DECAP_RAW = 0, /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in - * htt_rx_desc contains the original decapped 802.11 header. */ + * htt_rx_desc contains the original decapped 802.11 header. + */ RX_MSDU_DECAP_NATIVE_WIFI = 1, /* Payload contains an ethernet header (struct ethhdr). */ RX_MSDU_DECAP_ETHERNET2_DIX = 2, /* Payload contains two 48-bit addresses and 2-byte length (14 bytes - * total), followed by an RFC1042 header (8 bytes). */ + * total), followed by an RFC1042 header (8 bytes). + */ RX_MSDU_DECAP_8023_SNAP_LLC = 3 }; @@ -867,7 +870,7 @@ struct rx_ppdu_start { * * reserved_9 * Reserved: HW should fill with 0, FW should ignore. -*/ + */ #define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) #define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) @@ -1207,7 +1210,7 @@ struct rx_ppdu_end { * Every time HW sets this bit in memory FW/SW must clear this * bit in memory. FW will initialize all the ppdu_done dword * to 0. -*/ + */ #define FW_RX_DESC_INFO0_DISCARD (1 << 0) #define FW_RX_DESC_INFO0_FORWARD (1 << 1) diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index c061d6958bd130f7d5756d6eaea6268397dd764e..dd9cc0939ea811d01493f9b08e2f58eb82dae957 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -56,6 +56,21 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, return max_exp; } +static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar, + size_t bin_len) +{ + /* some chipsets reports bin size as 2^n bytes + 'm' bytes in + * report mode 2. First 2^n bytes carries inband tones and last + * 'm' bytes carries band edge detection data mainly used in + * radar detection purpose. Strip last 'm' bytes to make bin size + * as a valid one. 'm' can take possible values of 4, 12. + */ + if (!is_power_of_2(bin_len)) + bin_len -= ar->hw_params.spectral_bin_discard; + + return bin_len; +} + int ath10k_spectral_process_fft(struct ath10k *ar, struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_fft_report *fftr, @@ -70,18 +85,11 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample = (struct fft_sample_ath10k *)&buf; + bin_len = ath10k_spectral_fix_bin_size(ar, bin_len); + if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS) return -EINVAL; - /* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in - * report mode 2. First 64 bytes carries inband tones (-32 to +31) - * and last 4 byte carries band edge detection data (+32) mainly - * used in radar detection purpose. Strip last 4 byte to make bin - * size is valid one. - */ - if (bin_len == 68) - bin_len -= 4; - reg0 = __le32_to_cpu(fftr->reg0); reg1 = __le32_to_cpu(fftr->reg1); @@ -536,15 +544,15 @@ int ath10k_spectral_create(struct ath10k *ar) 1140, 2500, &rfs_spec_scan_cb, NULL); debugfs_create_file("spectral_scan_ctl", - S_IRUSR | S_IWUSR, + 0600, ar->debug.debugfs_phy, ar, &fops_spec_scan_ctl); debugfs_create_file("spectral_count", - S_IRUSR | S_IWUSR, + 0600, ar->debug.debugfs_phy, ar, &fops_spectral_count); debugfs_create_file("spectral_bins", - S_IRUSR | S_IWUSR, + 0600, ar->debug.debugfs_phy, ar, &fops_spectral_bins); diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index a47cab44d9c85cb913da046e0ccc2564b60493ce..cbac9e4252d6fdd60c8c6750fdaeaafeaa98e9e7 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -268,13 +268,13 @@ struct host_interest { #define HI_OPTION_FW_BRIDGE_SHIFT 0x04 /* -Fw Mode/SubMode Mask -|-----------------------------------------------------------------------------| -| SUB | SUB | SUB | SUB | | | | | -|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]| -| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | -|-----------------------------------------------------------------------------| -*/ + * Fw Mode/SubMode Mask + *----------------------------------------------------------------------------- + * SUB | SUB | SUB | SUB | | | | + *MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0] + * (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) + *----------------------------------------------------------------------------- + */ #define HI_OPTION_FW_MODE_BITS 0x2 #define HI_OPTION_FW_MODE_MASK 0x3 #define HI_OPTION_FW_MODE_SHIFT 0xC @@ -428,8 +428,9 @@ Fw Mode/SubMode Mask #define HI_PWR_SAVE_LPL_ENABLED 0x1 /*b1-b3 reserved*/ /*b4-b5 : dev0 LPL type : 0 - none - 1- Reduce Pwr Search - 2- Reduce Pwr Listen*/ + * 1- Reduce Pwr Search + * 2- Reduce Pwr Listen + */ /*b6-b7 : dev1 LPL type and so on for Max 8 devices*/ #define HI_PWR_SAVE_LPL_DEV0_LSB 4 #define HI_PWR_SAVE_LPL_DEV_MASK 0x3 diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 8bb36c18a7491a6d51e75dfaeb3bf0f1d12ff37e..d8564624415c9f23a8fb21ea6efda6b9fc5c6fd1 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -420,8 +420,8 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; int ret; - ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, - ath10k_tm_policy); + ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy, + NULL); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index 0a47269be289d3bee3683f068897451b5d9d0e86..87948aff1bd5a9f5a1ca7772975f82822e57ab74 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -124,7 +124,7 @@ void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature) complete(&ar->thermal.wmi_sync); } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp, +static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp, NULL, 0); static struct attribute *ath10k_hwmon_attrs[] = { @@ -191,7 +191,8 @@ int ath10k_thermal_register(struct ath10k *ar) return 0; /* Avoid linking error on devm_hwmon_device_register_with_groups, I - * guess linux/hwmon.h is missing proper stubs. */ + * guess linux/hwmon.h is missing proper stubs. + */ if (!IS_REACHABLE(CONFIG_HWMON)) return 0; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 9852c5d51139ebdaabcd943463afd258a818bde1..d4986f626c35c6e78a1170924a4044fec1948bc0 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -34,7 +34,8 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) /* If the original wait_for_completion() timed out before * {data,mgmt}_tx_completed() was called then we could complete * offchan_tx_completed for a different skb. Prevent this by using - * offchan_tx_skb. */ + * offchan_tx_skb. + */ spin_lock_bh(&ar->data_lock); if (ar->offchan_tx_skb != skb) { ath10k_warn(ar, "completed old offchannel frame\n"); diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index c7956e181f80d3c72d08b687920f95ff55daaff6..2fc3f24ff1caad9d302bc78ad2194b8cec581942 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -390,7 +390,8 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) return ret; /* FIXME There's no ACK event for Management Tx. This probably - * shouldn't be called here either. */ + * shouldn't be called here either. + */ info->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(ar->hw, msdu); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2f1743e60fa1303331fb654a79c745e725cc9eaf..6afc8d27f0d511e04ad399112fa4a837e7ca715b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3210,7 +3210,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; /* if next SWBA has no tim_changed the tim_bitmap is garbage. - * we must copy the bitmap upon change and reuse it later */ + * we must copy the bitmap upon change and reuse it later + */ if (__le32_to_cpu(tim_info->tim_changed)) { int i; @@ -3529,7 +3530,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) * before telling mac80211 to decrement CSA counter * * Once CSA counter is completed stop sending beacons until - * actual channel switch is done */ + * actual channel switch is done + */ if (arvif->vif->csa_active && ieee80211_csa_is_complete(arvif->vif)) { ieee80211_csa_finish(arvif->vif); @@ -3643,6 +3645,11 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, spin_lock_bh(&ar->data_lock); ch = ar->rx_channel; + + /* fetch target operating channel during channel change */ + if (!ch) + ch = ar->tgt_oper_chan; + spin_unlock_bh(&ar->data_lock); if (!ch) { @@ -3686,7 +3693,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, ATH10K_DFS_STAT_INC(ar, radar_detected); /* Control radar events reporting in debugfs file - dfs_block_radar_events */ + * dfs_block_radar_events + */ if (ar->dfs_block_radar_events) { ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); return; @@ -4593,6 +4601,8 @@ ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, arg->phy_capab = ev->phy_capability; arg->num_rf_chains = ev->num_rf_chains; arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; + arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; arg->num_mem_reqs = ev->num_mem_reqs; arg->service_map = ev->wmi_service_bitmap; arg->service_map_len = sizeof(ev->wmi_service_bitmap); @@ -4629,6 +4639,8 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, arg->phy_capab = ev->phy_capability; arg->num_rf_chains = ev->num_rf_chains; arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan; + arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan; arg->num_mem_reqs = ev->num_mem_reqs; arg->service_map = ev->wmi_service_bitmap; arg->service_map_len = sizeof(ev->wmi_service_bitmap); @@ -4682,6 +4694,8 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) ar->phy_capability = __le32_to_cpu(arg.phy_capab); ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd); + ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan); + ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan); ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", arg.service_map, arg.service_map_len); @@ -4758,9 +4772,10 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) num_units = ar->max_num_peers + 1; } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { /* number of units to allocate is number of - * peers, 1 extra for self peer on target */ - /* this needs to be tied, host and target - * can get out of sync */ + * peers, 1 extra for self peer on target + * this needs to be tied, host and target + * can get out of sync + */ num_units = ar->max_num_peers + 1; } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { num_units = ar->max_num_vdevs + 1; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 386aa51435f1c4d57e0affa556153c4989b2c8e8..1b4865a555954f8787d30a32384c1c7337857d9d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1038,7 +1038,8 @@ enum wmi_cmd_id { WMI_STA_UAPSD_AUTO_TRIG_CMDID, /* STA Keep alive parameter configuration, - Requires WMI_SERVICE_STA_KEEP_ALIVE */ + * Requires WMI_SERVICE_STA_KEEP_ALIVE + */ WMI_STA_KEEPALIVE_CMD, /* misc command group */ @@ -1774,7 +1775,8 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) break; /* no default handler to allow compiler to check that the - * enum is fully handled */ + * enum is fully handled + */ }; return ""; @@ -2974,7 +2976,8 @@ struct wmi_start_scan_arg { /* When set, DFS channels will not be scanned */ #define WMI_SCAN_BYPASS_DFS_CHN 0x40 /* Different FW scan engine may choose to bail out on errors. - * Allow the driver to have influence over that. */ + * Allow the driver to have influence over that. + */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ @@ -3182,7 +3185,7 @@ struct wmi_10_4_phyerr_event { struct phyerr_radar_report { __le32 reg0; /* RADAR_REPORT_REG0_* */ - __le32 reg1; /* REDAR_REPORT_REG1_* */ + __le32 reg1; /* RADAR_REPORT_REG1_* */ } __packed; #define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000 @@ -4447,14 +4450,16 @@ enum wmi_vdev_subtype_10_4 { /* values for vdev_start_request flags */ /* * Indicates that AP VDEV uses hidden ssid. only valid for - * AP/GO */ + * AP/GO + */ #define WMI_VDEV_START_HIDDEN_SSID (1 << 0) /* * Indicates if robust management frame/management frame * protection is enabled. For GO/AP vdevs, it indicates that * it may support station/client associations with RMF enabled. * For STA/client vdevs, it indicates that sta will - * associate with AP with RMF enabled. */ + * associate with AP with RMF enabled. + */ #define WMI_VDEV_START_PMF_ENABLED (1 << 1) struct wmi_p2p_noa_descriptor { @@ -4814,7 +4819,8 @@ enum wmi_vdev_param { * An associated STA is considered unresponsive if there is no recent * TX/RX activity and downlink frames are buffered for it. Once a STA * exceeds the maximum unresponsive time, the AP will send a - * WMI_STA_KICKOUT event to the host so the STA can be deleted. */ + * WMI_STA_KICKOUT event to the host so the STA can be deleted. + */ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ @@ -4941,7 +4947,8 @@ enum wmi_10x_vdev_param { * An associated STA is considered unresponsive if there is no recent * TX/RX activity and downlink frames are buffered for it. Once a STA * exceeds the maximum unresponsive time, the AP will send a - * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */ + * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. + */ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ @@ -5605,12 +5612,14 @@ struct wmi_tim_info_arg { struct wmi_p2p_noa_info { /* Bit 0 - Flag to indicate an update in NOA schedule - Bits 7-1 - Reserved */ + * Bits 7-1 - Reserved + */ u8 changed; /* NOA index */ u8 index; /* Bit 0 - Opp PS state of the AP - Bits 1-7 - Ctwindow in TUs */ + * Bits 1-7 - Ctwindow in TUs + */ u8 ctwindow_oppps; /* Number of NOA descriptors */ u8 num_descriptors; @@ -6000,7 +6009,8 @@ struct wmi_main_peer_assoc_complete_cmd { struct wmi_common_peer_assoc_complete_cmd cmd; /* HT Operation Element of the peer. Five bytes packed in 2 - * INT32 array and filled from lsb to msb. */ + * INT32 array and filled from lsb to msb. + */ __le32 peer_ht_info[2]; } __packed; @@ -6335,6 +6345,8 @@ struct wmi_svc_rdy_ev_arg { __le32 num_rf_chains; __le32 eeprom_rd; __le32 num_mem_reqs; + __le32 low_5ghz_chan; + __le32 high_5ghz_chan; const __le32 *service_map; size_t service_map_len; const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index d98fd421c7ec58c3c815947a5fd8a0597eada4d5..527afcf39246079eac0f0fbcdbb4e145fda11809 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1414,10 +1414,10 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); switch (ah->ah_bwmode) { case AR5K_BWMODE_5MHZ: - rxs->flag |= RX_FLAG_5MHZ; + rxs->bw = RATE_INFO_BW_5; break; case AR5K_BWMODE_10MHZ: - rxs->flag |= RX_FLAG_10MHZ; + rxs->bw = RATE_INFO_BW_10; break; default: break; @@ -1425,7 +1425,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, if (rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) - rxs->flag |= RX_FLAG_SHORTPRE; + rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE; trace_ath5k_rx(ah, skb); @@ -2564,6 +2564,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) hw->extra_tx_headroom = 2; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + /* * Mark the device as detached to avoid processing * interrupts until setup is complete. diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 363b30a549c2b69244e9dca3d0ac77ab4629a496..414b5b596efcd92b7f97a059b710668e059f218c 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -102,10 +102,8 @@ static struct ieee80211_channel ath6kl_2ghz_channels[] = { }; static struct ieee80211_channel ath6kl_5ghz_a_channels[] = { - CHAN5G(34, 0), CHAN5G(36, 0), - CHAN5G(38, 0), CHAN5G(40, 0), - CHAN5G(42, 0), CHAN5G(44, 0), - CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(36, 0), CHAN5G(40, 0), + CHAN5G(44, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), @@ -171,7 +169,7 @@ static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif) if (!stopped) return; - cfg80211_sched_scan_stopped(ar->wiphy); + cfg80211_sched_scan_stopped(ar->wiphy, 0); } static int ath6kl_set_wpa_version(struct ath6kl_vif *vif, @@ -808,9 +806,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel, WLAN_STATUS_SUCCESS, GFP_KERNEL); cfg80211_put_bss(ar->wiphy, bss); } else if (vif->sme_state == SME_CONNECTED) { + struct cfg80211_roam_info roam_info = { + .bss = bss, + .req_ie = assoc_req_ie, + .req_ie_len = assoc_req_len, + .resp_ie = assoc_resp_ie, + .resp_ie_len = assoc_resp_len, + }; /* inform roam event to cfg80211 */ - cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len, - assoc_resp_ie, assoc_resp_len, GFP_KERNEL); + cfg80211_roamed(vif->ndev, &roam_info, GFP_KERNEL); } } @@ -1505,7 +1509,6 @@ static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) { struct ath6kl *ar = wiphy_priv(wiphy); @@ -1552,7 +1555,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy, static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct ath6kl_vif *vif = netdev_priv(ndev); @@ -3355,7 +3358,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy, } static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, - struct net_device *dev) + struct net_device *dev, u64 reqid) { struct ath6kl_vif *vif = netdev_priv(dev); bool stopped; @@ -3976,7 +3979,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities)) - ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + ar->wiphy->max_sched_scan_reqs = 1; if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, ar->fw_capabilities)) diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h index 0614393dd7ae7c1f6f3de79bce6ecf3338082592..94297572914fec7d0401508c33598d13aa798362 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.h +++ b/drivers/net/wireless/ath/ath6kl/debug.h @@ -63,6 +63,7 @@ int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif); #ifdef CONFIG_ATH6KL_DEBUG +__printf(2, 3) void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...); void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask, const char *msg, const char *prefix, @@ -83,6 +84,7 @@ int ath6kl_debug_init_fs(struct ath6kl *ar); void ath6kl_debug_cleanup(struct ath6kl *ar); #else +__printf(2, 3) static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask, const char *fmt, ...) { diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c index ca1a18c86c0d79d64ad93f26f42fdaf87bcb6411..d127a08d60df65ba45fdc578ce9b44a825981c64 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -995,7 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, if (netlen < (payload_len + HTC_HDR_LENGTH)) { ath6kl_dbg(ATH6KL_DBG_HTC, - "HTC Rx: insufficient length, got:%d expected =%u\n", + "HTC Rx: insufficient length, got:%d expected =%zu\n", netlen, payload_len + HTC_HDR_LENGTH); status = -EINVAL; goto free_skb; diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c index d67170ea1038a1d35f3acc56261e68ed2051cad4..d8dcacda9add2b23e5f38bc66263190fe4ba4be1 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.c +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -74,8 +74,8 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, int err, buf_len; void *buf; - err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, - ath6kl_tm_policy); + err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy, + NULL); if (err) return err; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 84a6d12c3f8a3b45c8940dfe2eac53bee9274f3e..bfc20b45b806cea3ce1764c8be4add34a8408c1a 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -1082,7 +1082,7 @@ void ath6kl_wmi_sscan_timer(unsigned long ptr) { struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr; - cfg80211_sched_scan_results(vif->ar->wiphy); + cfg80211_sched_scan_results(vif->ar->wiphy, 0); } static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, @@ -1596,7 +1596,7 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len, rate = le32_to_cpu(ev->rate); pkts = le32_to_cpu(ev->pkts); - ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n", + ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d%% pkts %d intvl %ds\n", vif->bssid, rate, pkts, vif->txe_intvl); cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index cc5bb0a76baf3985c205f17f6db912a9eaad3b6e..68fcbe03bce2c2efd789de66d526f1cab91e3d9a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -494,7 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_status = 0; rxs->rs_flags = 0; - rxs->flag = 0; + rxs->enc_flags = 0; + rxs->bw = RATE_INFO_BW_20; rxs->rs_datalen = rxsp->status2 & AR_DataLen; rxs->rs_tstamp = rxsp->status3; @@ -520,8 +521,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); - rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0; - rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0; + rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; + rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 0f71146b781d12dfcb5907fdecb3843b4260ad4b..13ab6bc467753a93748e7c4def72c0292eeca179 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -254,7 +254,9 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan)) continue; - if (h) + if (ah->nf_override) + nfval = ah->nf_override; + else if (h) nfval = h[i].privNF; else nfval = default_nf; @@ -348,6 +350,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) return 0; } +EXPORT_SYMBOL(ath9k_hw_loadnf); static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 0ffa23a615682400905bf31f22b5c9942a8ce282..5e77fe1f5b0db41eb6e6226c454f8b3d7d201894 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -742,6 +742,9 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, return; } + if (!spec_priv->spec_config.enabled) + return; + ath_ps_ops(common)->wakeup(common); rxfilter = ath9k_hw_getrxfilter(ah); ath9k_hw_setrxfilter(ah, rxfilter | diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index b80e08b13b7439bac045f3242174bbd83e09e4a9..c67d0e08bd4c2bbeed447976ff05dbaf66547b54 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -181,14 +181,15 @@ int ath9k_cmn_process_rate(struct ath_common *common, sband = hw->wiphy->bands[band]; if (IS_CHAN_QUARTER_RATE(ah->curchan)) - rxs->flag |= RX_FLAG_5MHZ; + rxs->bw = RATE_INFO_BW_5; else if (IS_CHAN_HALF_RATE(ah->curchan)) - rxs->flag |= RX_FLAG_10MHZ; + rxs->bw = RATE_INFO_BW_10; if (rx_stats->rs_rate & 0x80) { /* HT rate */ - rxs->flag |= RX_FLAG_HT; - rxs->flag |= rx_stats->flag; + rxs->encoding = RX_ENC_HT; + rxs->enc_flags |= rx_stats->enc_flags; + rxs->bw = rx_stats->bw; rxs->rate_idx = rx_stats->rs_rate & 0x7f; return 0; } @@ -199,7 +200,7 @@ int ath9k_cmn_process_rate(struct ath_common *common, return 0; } if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { - rxs->flag |= RX_FLAG_SHORTPRE; + rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE; rxs->rate_idx = i; return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 43930c336987ee4fc802b3219a65fe33fd98073e..2e64977a8ab664c64d3ece94f5fd57ce0c13f729 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1191,6 +1191,65 @@ static const struct file_operations fops_tpc = { .llseek = default_llseek, }; +static ssize_t read_file_nf_override(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + char buf[32]; + unsigned int len; + + if (ah->nf_override == 0) + len = sprintf(buf, "off\n"); + else + len = sprintf(buf, "%d\n", ah->nf_override); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_nf_override(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (strncmp("off", buf, 3) == 0) + val = 0; + else if (kstrtol(buf, 0, &val)) + return -EINVAL; + + if (val > 0) + return -EINVAL; + + if (val < -120) + return -EINVAL; + + ah->nf_override = val; + + if (ah->curchan) + ath9k_hw_loadnf(ah, ah->curchan); + + return count; +} + +static const struct file_operations fops_nf_override = { + .read = read_file_nf_override, + .write = write_file_nf_override, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + /* Ethtool support for get-stats */ #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" @@ -1402,5 +1461,8 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->airtime_flags); + debugfs_create_file("nf_override", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_nf_override); + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index 524cbf13ca9c25d4d6fbc80d50f601d67b2c3f97..efc692ee67d4b96f989b5a60fcb97252c410deaf 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -116,12 +116,12 @@ void ath_debug_rate_stats(struct ath_softc *sc, if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats)) goto exit; - if (rxs->flag & RX_FLAG_40MHZ) + if ((rxs->bw == RATE_INFO_BW_40)) rstats->ht_stats[rxs->rate_idx].ht40_cnt++; else rstats->ht_stats[rxs->rate_idx].ht20_cnt++; - if (rxs->flag & RX_FLAG_SHORT_GI) + if (rxs->enc_flags & RX_ENC_FLAG_SHORT_GI) rstats->ht_stats[rxs->rate_idx].sgi_cnt++; else rstats->ht_stats[rxs->rate_idx].lgi_cnt++; @@ -130,7 +130,7 @@ void ath_debug_rate_stats(struct ath_softc *sc, } if (IS_CCK_RATE(rs->rs_rate)) { - if (rxs->flag & RX_FLAG_SHORTPRE) + if (rxs->enc_flags & RX_ENC_FLAG_SHORTPRE) rstats->cck_stats[rxs->rate_idx].cck_sp_cnt++; else rstats->cck_stats[rxs->rate_idx].cck_lp_cnt++; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index fb80ec86e53d07690ea186148305a18bcc7fdc9e..6ccf248145141db26f931d83b1a8327d656b516f 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -112,7 +112,7 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size, off_t offset, u16 *data) { - if (offset > blob_size) + if (offset >= blob_size) return false; *data = blob[offset]; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 30bf722e33ede0f1a778e45d3a01875e66ff1dcb..31390af6c33e3fd4062f7fe052a25fa420af684f 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -106,7 +106,7 @@ #define AR9285_RDEXT_DEFAULT 0x1F #define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) -#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) +#define FREQ2FBIN(x, y) (u8)((y) ? ((x) - 2300) : (((x) - 4800) / 5)) #define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) #define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index de2d212f39ecef2c3c9c6ca2621bd21c7c5eb410..12aa8abbcba40ce2afd27535d82a378487151070 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */ + { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */ { USB_DEVICE(0x0cf3, 0x7015), .driver_info = AR9287_USB }, /* Atheros */ @@ -1219,6 +1220,9 @@ static int send_eject_command(struct usb_interface *interface) u8 bulk_out_ep; int r; + if (iface_desc->desc.bNumEndpoints < 2) + return -ENODEV; + /* Find bulk out endpoint */ for (r = 1; r >= 0; r--) { endpoint = &iface_desc->endpoint[r].desc; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index b65c1b661adeb4c86f79722d86958816072b768d..defacc6c9c995ea6b2b755a1ac11978d805006eb 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -780,6 +780,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, } SET_IEEE80211_PERM_ADDR(hw, common->macaddr); + + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); } static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index f333ef1e3e7b4171cbe0ff37a21c43f87f4937b3..b38a586ea59ad33c74255590a0f18bf807590e41 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -929,11 +929,12 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv) static inline void convert_htc_flag(struct ath_rx_status *rx_stats, struct ath_htc_rx_status *rxstatus) { - rx_stats->flag = 0; + rx_stats->enc_flags = 0; + rx_stats->bw = RATE_INFO_BW_20; if (rxstatus->rs_flags & ATH9K_RX_2040) - rx_stats->flag |= RX_FLAG_40MHZ; + rx_stats->bw = RATE_INFO_BW_40; if (rxstatus->rs_flags & ATH9K_RX_GI) - rx_stats->flag |= RX_FLAG_SHORT_GI; + rx_stats->enc_flags |= RX_ENC_FLAG_SHORT_GI; } static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats, diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9cbca1229bac02862211c14d14048f08197dd39d..4ac70827d142c9739c9987cef5cb20cf5ca58776 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -803,6 +803,7 @@ struct ath_hw { u32 rfkill_gpio; u32 rfkill_polarity; u32 ah_flags; + s16 nf_override; bool reset_power_on; bool htc_reset_init; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fa4b3cc1ba22c8dcf3882dd51b738dd412ddea75..fd9a61834c17f8ac8f3612f3c89de13a78de5189 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -955,6 +955,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) ath9k_cmn_reload_chainmask(ah); SET_IEEE80211_PERM_ADDR(hw, common->macaddr); + + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); } int ath9k_init_device(u16 devid, struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index d937c39b3a0b3569a7e4a67ec24973e17c9e4211..6128c2bb23d8d1a46b94f2bd68bffdfc52bfcedc 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -535,7 +535,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_status = 0; rs->rs_flags = 0; - rs->flag = 0; + rs->enc_flags = 0; + rs->bw = RATE_INFO_BW_20; rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; rs->rs_tstamp = ads.AR_RcvTimestamp; @@ -577,15 +578,15 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); /* directly mapped flags for ieee80211_rx_status */ - rs->flag |= - (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0; - rs->flag |= - (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0; + rs->enc_flags |= + (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; + rs->enc_flags |= + (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; if (AR_SREV_9280_20_OR_LATER(ah)) - rs->flag |= + rs->enc_flags |= (ads.ds_rxstatus3 & AR_STBC) ? /* we can only Nss=1 STBC */ - (1 << RX_FLAG_STBC_SHIFT) : 0; + (1 << RX_ENC_FLAG_STBC_SHIFT) : 0; if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 770fc11b41d183eae8f6b6a4599347732af88f1d..fd6aa49adadfee3594a8afd105222100e48eee92 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -16,6 +16,7 @@ #ifndef MAC_H #define MAC_H +#include #define set11nTries(_series, _index) \ (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) @@ -143,7 +144,8 @@ struct ath_rx_status { u32 evm2; u32 evm3; u32 evm4; - u32 flag; /* see enum mac80211_rx_flags */ + u16 enc_flags; + enum rate_info_bw bw; }; struct ath_htc_rx_status { diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index aff473dfa10df6a6f8eb2140d89f0f4aff9d4e2c..7b7627f85d3a57dcdf8693cbdeb97f92a23f3d7a 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -383,6 +383,11 @@ static const struct pci_device_id ath_pci_id_table[] = { 0x10CF, /* Fujitsu */ 0x1783), .driver_data = ATH9K_PCI_WOW }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0034, + PCI_VENDOR_ID_DELL, + 0x020B), + .driver_data = ATH9K_PCI_WOW }, /* Killer Wireless (2x2) */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index d79837fe333f45ebd3203649d039e891aa0d0125..2197aee2bb72adcd24cb24949a6c82a142dfde7d 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1037,11 +1037,11 @@ static void ath_rx_count_airtime(struct ath_softc *sc, rxs = IEEE80211_SKB_RXCB(skb); - is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI); - is_40 = !!(rxs->flag & RX_FLAG_40MHZ); - is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE); + is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI); + is_40 = !!(rxs->bw == RATE_INFO_BW_40); + is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE); - if (!!(rxs->flag & RX_FLAG_HT)) { + if (!!(rxs->encoding == RX_ENC_HT)) { /* MCS rates */ airtime += ath_pkt_duration(sc, rxs->rate_idx, len, diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index ffb22a04beeb748a8a7339bf93ed9c9a0fdc298e..988c8857d78c9d21abbe3b007ce605b13e04b109 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1874,6 +1874,8 @@ void *carl9170_alloc(size_t priv_size) for (i = 0; i < ARRAY_SIZE(ar->noise); i++) ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + return ar; err_nomem: diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 0c34c8729dc610fbdf7d05a9bc2a50af8c4ae158..b2166726b05d2651840ade7a1e285e3b8faf7674 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -358,7 +358,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, switch (mac->status & AR9170_RX_STATUS_MODULATION) { case AR9170_RX_STATUS_MODULATION_CCK: if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; switch (head->plcp[0]) { case AR9170_RX_PHY_RATE_CCK_1M: status->rate_idx = 0; @@ -423,12 +423,12 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, case AR9170_RX_STATUS_MODULATION_HT: if (head->plcp[3] & 0x80) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; if (head->plcp[6] & 0x80) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f); - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; break; default: diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 43afa83a9f0c1b70ac63eb464c3722088b16e1e1..e25bfdf78c2e1b103a2a62ad23e4eb32d2b14c9b 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain) EXPORT_SYMBOL(ath_is_49ghz_allowed); /* Frequency is one where radar detection is required */ -static bool ath_is_radar_freq(u16 center_freq) +static bool ath_is_radar_freq(u16 center_freq, + struct ath_regulatory *reg) + { + if (reg->country_code == CTRY_INDIA) + return (center_freq >= 5500 && center_freq <= 5700); return (center_freq >= 5260 && center_freq <= 5700); } @@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *ch) { - if (ath_is_radar_freq(ch->center_freq) || + if (ath_is_radar_freq(ch->center_freq, reg) || (ch->flags & IEEE80211_CHAN_RADAR)) return; @@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy, } } -/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ -static void ath_reg_apply_radar_flags(struct wiphy *wiphy) +/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */ +static void ath_reg_apply_radar_flags(struct wiphy *wiphy, + struct ath_regulatory *reg) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; @@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy) for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; - if (!ath_is_radar_freq(ch->center_freq)) + if (!ath_is_radar_freq(ch->center_freq, reg)) continue; /* We always enable radar detection/DFS on this * frequency range. Additionally we also apply on @@ -506,7 +511,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy, struct ath_common *common = container_of(reg, struct ath_common, regulatory); /* We always apply this */ - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); /* * This would happen when we have sent a custom regulatory request @@ -654,7 +659,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, } wiphy_apply_custom_regulatory(wiphy, regd); - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig index 4b83e87f0b9450bf431b48a168901371f1cab33c..20bf967a70b969d6be6e593e7ae04330d1d85daa 100644 --- a/drivers/net/wireless/ath/wcn36xx/Kconfig +++ b/drivers/net/wireless/ath/wcn36xx/Kconfig @@ -2,7 +2,7 @@ config WCN36XX tristate "Qualcomm Atheros WCN3660/3680 support" depends on MAC80211 && HAS_DMA depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n - depends on QCOM_SMD || QCOM_SMD=n + depends on RPMSG || RPMSG=n ---help--- This module adds support for wireless adapters based on Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets. diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 7a0c2e7da7f643333506e595d39fd988b535825e..d5e993dc9b238c6fd306b801083346570eec8790 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include "wcn36xx.h" @@ -337,10 +337,10 @@ static int wcn36xx_start(struct ieee80211_hw *hw) wcn36xx_smd_stop(wcn); out_free_smd_buf: kfree(wcn->hal_buf); -out_free_dxe_pool: - wcn36xx_dxe_free_mem_pools(wcn); out_free_dxe_ctl: wcn36xx_dxe_free_ctl_blks(wcn); +out_free_dxe_pool: + wcn36xx_dxe_free_mem_pools(wcn); out_smd_close: wcn36xx_smd_close(wcn); out_err: @@ -1112,6 +1112,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta); wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif); + wiphy_ext_feature_set(wcn->hw->wiphy, + NL80211_EXT_FEATURE_CQM_RSSI_LIST); + return ret; } @@ -1218,15 +1221,13 @@ static int wcn36xx_probe(struct platform_device *pdev) INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); - wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process); + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); if (IS_ERR(wcn->smd_channel)) { wcn36xx_err("failed to open WLAN_CTRL channel\n"); ret = PTR_ERR(wcn->smd_channel); goto out_wq; } - qcom_smd_set_drvdata(wcn->smd_channel, hw); - addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret); if (addr && ret != ETH_ALEN) { wcn36xx_err("invalid local-mac-address\n"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 1c2966f7db7a3f7c41660027444e17f4a908c54d..9c6590d5348ad53f64d130287f51b9fe7ccf95d8 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "smd.h" struct wcn36xx_cfg_val { @@ -254,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) init_completion(&wcn->hal_rsp_compl); start = jiffies; - ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len); + ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { wcn36xx_err("HAL TX failed\n"); goto out; @@ -2205,11 +2205,11 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, return ret; } -int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, - const void *buf, size_t len) +int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, + void *buf, int len, void *priv, u32 addr) { const struct wcn36xx_hal_msg_header *msg_header = buf; - struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel); + struct ieee80211_hw *hw = priv; struct wcn36xx *wcn = hw->priv; struct wcn36xx_hal_ind_msg *msg_ind; wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 8892ccd67b144903ae25cde3287e79f951e5a8c6..013fc9546f56d4b48d60a3452b4a72d3204cdaa6 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -51,7 +51,7 @@ struct wcn36xx_hal_ind_msg { }; struct wcn36xx; -struct qcom_smd_channel; +struct rpmsg_device; int wcn36xx_smd_open(struct wcn36xx *wcn); void wcn36xx_smd_close(struct wcn36xx *wcn); @@ -129,8 +129,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); -int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel, - const void *buf, size_t len); +int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, + void *buf, int len, void *priv, u32 addr); int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 8c387a0a3c091c474e8f7dd948dc31b9060c0813..22304edc59489e3e667b78495acd9a7b9a33a2b1 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -68,7 +68,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) RX_FLAG_MMIC_STRIPPED | RX_FLAG_DECRYPTED; - wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag); + wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 7423998ddeb492cab624134fcb5d363db42093d9..b52b4da9a967bab2ad6b5298f76797f3557e8395 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -195,7 +195,7 @@ struct wcn36xx { void __iomem *ccu_base; void __iomem *dxe_base; - struct qcom_smd_channel *smd_channel; + struct rpmsg_endpoint *smd_channel; struct qcom_smem_state *tx_enable_state; unsigned tx_enable_state_bit; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 83155b5ddbfb1fe8018e59760a7c2b571215b0ae..fdaa99c541ac0a51ea9b26aa185f5ef7a9052637 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -178,9 +178,8 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, BIT(NL80211_STA_INFO_RX_DROP_MISC) | BIT(NL80211_STA_INFO_TX_FAILED); - sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; + sinfo->txrate.flags = RATE_INFO_FLAGS_60G; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); - sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; sinfo->rxrate.mcs = stats->last_mcs_rx; sinfo->rx_bytes = stats->rx_bytes; sinfo->rx_packets = stats->rx_packets; @@ -256,7 +255,7 @@ static struct wireless_dev * wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) + struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct net_device *ndev = wil_to_ndev(wil); @@ -307,7 +306,7 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy, static int wil_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); @@ -334,11 +333,8 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_GO: break; case NL80211_IFTYPE_MONITOR: - if (flags) - wil->monitor_flags = *flags; - else - wil->monitor_flags = 0; - + if (params->flags) + wil->monitor_flags = params->flags; break; default: return -EOPNOTSUPP; @@ -390,22 +386,23 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } mutex_unlock(&wil->p2p_wdev_mutex); - /* social scan on P2P_DEVICE is handled as p2p search */ - if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE && - wil_p2p_is_social_scan(request)) { + if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { if (!wil->p2p.p2p_dev_started) { wil_err(wil, "P2P search requested on stopped P2P device\n"); rc = -EIO; goto out; } - wil->scan_request = request; - wil->radio_wdev = wdev; - rc = wil_p2p_search(wil, request); - if (rc) { - wil->radio_wdev = wil_to_wdev(wil); - wil->scan_request = NULL; + /* social scan on P2P_DEVICE is handled as p2p search */ + if (wil_p2p_is_social_scan(request)) { + wil->scan_request = request; + wil->radio_wdev = wdev; + rc = wil_p2p_search(wil, request); + if (rc) { + wil->radio_wdev = wil_to_wdev(wil); + wil->scan_request = NULL; + } + goto out; } - goto out; } (void)wil_p2p_stop_discovery(wil); @@ -415,9 +412,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, for (i = 0; i < request->n_ssids; i++) { wil_dbg_misc(wil, "SSID[%d]", i); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - request->ssids[i].ssid, - request->ssids[i].ssid_len); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + request->ssids[i].ssid, + request->ssids[i].ssid_len, true); } if (request->n_ssids) @@ -454,8 +451,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } if (request->ie_len) - print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, - request->ie, request->ie_len); + wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1, + request->ie, request->ie_len, true); else wil_dbg_misc(wil, "Scan has no IE's\n"); @@ -679,6 +676,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { netif_carrier_on(ndev); + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); + wil->bss = bss; /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); @@ -707,6 +706,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, return 0; } + wil->locally_generated_disc = true; rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, WMI_DISCONNECT_EVENTID, NULL, 0, WIL6210_DISCONNECT_TO_MS); @@ -760,7 +760,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, */ wil_dbg_misc(wil, "mgmt_tx\n"); - print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { @@ -1093,18 +1094,18 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, static void wil_print_bcon_data(struct cfg80211_beacon_data *b) { - print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, - b->head, b->head_len); - print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET, - b->tail, b->tail_len); - print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET, - b->beacon_ies, b->beacon_ies_len); - print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET, - b->probe_resp, b->probe_resp_len); - print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET, - b->proberesp_ies, b->proberesp_ies_len); - print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET, - b->assocresp_ies, b->assocresp_ies_len); + wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1, + b->head, b->head_len, true); + wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1, + b->tail, b->tail_len, true); + wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->beacon_ies, b->beacon_ies_len, true); + wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, + b->probe_resp, b->probe_resp_len, true); + wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->proberesp_ies, b->proberesp_ies_len, true); + wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->assocresp_ies, b->assocresp_ies_len, true); } /* internal functions for device reset and starting AP */ @@ -1198,6 +1199,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wil->pbss = pbss; netif_carrier_on(ndev); + wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go); if (rc) @@ -1213,6 +1215,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, wmi_pcp_stop(wil); err_pcp_start: netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); out: mutex_unlock(&wil->mutex); return rc; @@ -1298,8 +1301,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); wil_dbg_misc(wil, "PBSS %d\n", info->pbss); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - info->ssid, info->ssid_len); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + info->ssid, info->ssid_len, true); wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); @@ -1319,6 +1322,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, wil_dbg_misc(wil, "stop_ap\n"); netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); @@ -1555,12 +1559,6 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); enum wmi_ps_profile_type ps_profile; - int rc; - - if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) { - wil_err(wil, "set_power_mgmt not supported\n"); - return -EOPNOTSUPP; - } wil_dbg_misc(wil, "enabled=%d, timeout=%d\n", enabled, timeout); @@ -1570,11 +1568,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, else ps_profile = WMI_PS_PROFILE_TYPE_PS_DISABLED; - rc = wmi_ps_dev_profile_cfg(wil, ps_profile); - if (rc) - wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc); - - return rc; + return wil_ps_update(wil, ps_profile); } static const struct cfg80211_ops wil_cfg80211_ops = { diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 3e8cdf12fedad5251a36aaedecf7470f80ef4d7e..5648ebbd0e1663720c2fd398d7e29fc623f9832b 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -524,9 +524,8 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; - wil_memcpy_fromio_halp_vote(wil_blob->wil, buf, - (const volatile void __iomem *) - wil_blob->blob.data + pos, count); + wil_memcpy_fromio_32(buf, (const void __iomem *) + wil_blob->blob.data + pos, count); ret = copy_to_user(user_buf, buf, count); kfree(buf); diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index f4901587c0057a9fc749e29a4e5bd7f0ccc5b1f3..e01acac88825d895dbae09fe67f6e21b0ad3fbd2 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -554,5 +554,7 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name) rc = request_firmware(&fw, name, wil_to_dev(wil)); if (!rc) release_firmware(fw); - return rc != -ENOENT; + else + wil_dbg_fw(wil, "<%s> not available: %d\n", name, rc); + return !rc; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index efb1f59aafd9956e5995eede2d606f202cc639ca..32086792dfc30bb7f23a1cc34e69ed7600965fa8 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -30,8 +30,8 @@ bool debug_fw; /* = false; */ module_param(debug_fw, bool, 0444); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); -static bool oob_mode; -module_param(oob_mode, bool, 0444); +static u8 oob_mode; +module_param(oob_mode, byte, 0444); MODULE_PARM_DESC(oob_mode, " enable out of the box (OOB) mode in FW, for diagnostics and certification"); @@ -130,17 +130,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, u32 *d = dst; const volatile u32 __iomem *s = src; - /* size_t is unsigned, if (count%4 != 0) it will wrap */ - for (count += 4; count > 4; count -= 4) + for (; count >= 4; count -= 4) *d++ = __raw_readl(s++); -} -void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst, - const volatile void __iomem *src, size_t count) -{ - wil_halp_vote(wil); - wil_memcpy_fromio_32(dst, src, count); - wil_halp_unvote(wil); + if (unlikely(count)) { + /* count can be 1..3 */ + u32 tmp = __raw_readl(s); + + memcpy(d, &tmp, count); + } } void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, @@ -149,17 +147,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, volatile u32 __iomem *d = dst; const u32 *s = src; - for (count += 4; count > 4; count -= 4) + for (; count >= 4; count -= 4) __raw_writel(*s++, d++); -} -void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil, - volatile void __iomem *dst, - const void *src, size_t count) -{ - wil_halp_vote(wil); - wil_memcpy_toio_32(dst, src, count); - wil_halp_unvote(wil); + if (unlikely(count)) { + /* count can be 1..3 */ + u32 tmp = 0; + + memcpy(&tmp, s, count); + __raw_writel(tmp, d); + } } static void wil_disconnect_cid(struct wil6210_priv *wil, int cid, @@ -274,15 +271,20 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, wil_bcast_fini(wil); wil_update_net_queues_bh(wil, NULL, true); netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); if (test_bit(wil_status_fwconnected, wil->status)) { clear_bit(wil_status_fwconnected, wil->status); cfg80211_disconnected(ndev, reason_code, - NULL, 0, false, GFP_KERNEL); + NULL, 0, + wil->locally_generated_disc, + GFP_KERNEL); + wil->locally_generated_disc = false; } else if (test_bit(wil_status_fwconnecting, wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + wil->bss = NULL; } clear_bit(wil_status_fwconnecting, wil->status); break; @@ -304,10 +306,34 @@ static void wil_disconnect_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, disconnect_worker); + struct net_device *ndev = wil_to_ndev(wil); + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_disconnect_event evt; + } __packed reply; - mutex_lock(&wil->mutex); - _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false); - mutex_unlock(&wil->mutex); + if (test_bit(wil_status_fwconnected, wil->status)) + /* connect succeeded after all */ + return; + + if (!test_bit(wil_status_fwconnecting, wil->status)) + /* already disconnected */ + return; + + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), + WIL6210_DISCONNECT_TO_MS); + if (rc) { + wil_err(wil, "disconnect error %d\n", rc); + return; + } + + wil_update_net_queues_bh(wil, NULL, true); + netif_carrier_off(ndev); + cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + clear_bit(wil_status_fwconnecting, wil->status); } static void wil_connect_timer_fn(ulong x) @@ -547,6 +573,9 @@ int wil_priv_init(struct wil6210_priv *wil) if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT) rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT; + + wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT; + return 0; out_wmi_wq: @@ -555,6 +584,12 @@ int wil_priv_init(struct wil6210_priv *wil) return -EAGAIN; } +void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps) +{ + if (wil->platform_ops.bus_request) + wil->platform_ops.bus_request(wil->platform_handle, kbps); +} + /** * wil6210_disconnect - disconnect one connection * @wil: driver context @@ -607,13 +642,25 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) wil_w(wil, RGF_USER_USER_CPU_0, 1); } -static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable) +static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) { - wil_info(wil, "enable=%d\n", enable); - if (enable) + wil_info(wil, "oob_mode to %d\n", mode); + switch (mode) { + case 0: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE | + BIT_USER_OOB_R2_MODE); + break; + case 1: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); - else + break; + case 2: wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); + break; + default: + wil_err(wil, "invalid oob_mode: %d\n", mode); + } } static int wil_target_reset(struct wil6210_priv *wil) @@ -856,6 +903,24 @@ void wil_abort_scan(struct wil6210_priv *wil, bool sync) } } +int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) +{ + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_PS_CONFIG, wil->fw_capabilities)) { + wil_err(wil, "set_power_mgmt not supported\n"); + return -EOPNOTSUPP; + } + + rc = wmi_ps_dev_profile_cfg(wil, ps_profile); + if (rc) + wil_err(wil, "wmi_ps_dev_profile_cfg failed (%d)\n", rc); + else + wil->ps_profile = ps_profile; + + return rc; +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -901,15 +966,15 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) /* Disable device led before reset*/ wmi_led_cfg(wil, false); + mutex_lock(&wil->p2p_wdev_mutex); + wil_abort_scan(wil, false); + mutex_unlock(&wil->p2p_wdev_mutex); + /* prevent NAPI from being scheduled and prevent wmi commands */ mutex_lock(&wil->wmi_mutex); bitmap_zero(wil->status, wil_status_last); mutex_unlock(&wil->wmi_mutex); - mutex_lock(&wil->p2p_wdev_mutex); - wil_abort_scan(wil, false); - mutex_unlock(&wil->p2p_wdev_mutex); - wil_mask_irq(wil); wmi_event_flush(wil); @@ -986,6 +1051,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } + if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT) + wil_ps_update(wil, wil->ps_profile); + wil_collect_fw_info(wil); if (wil->platform_ops.notify) { @@ -1066,9 +1134,7 @@ int __wil_up(struct wil6210_priv *wil) napi_enable(&wil->napi_tx); set_bit(wil_status_napi_en, wil->status); - if (wil->platform_ops.bus_request) - wil->platform_ops.bus_request(wil->platform_handle, - WIL_MAX_BUS_REQUEST_KBPS); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); return 0; } @@ -1092,8 +1158,7 @@ int __wil_down(struct wil6210_priv *wil) set_bit(wil_status_resetting, wil->status); - if (wil->platform_ops.bus_request) - wil->platform_ops.bus_request(wil->platform_handle, 0); + wil6210_bus_request(wil, 0); wil_disable_irq(wil); if (test_and_clear_bit(wil_status_napi_en, wil->status)) { @@ -1154,6 +1219,7 @@ void wil_halp_vote(struct wil6210_priv *wil) wil->halp.ref_cnt); if (++wil->halp.ref_cnt == 1) { + reinit_completion(&wil->halp.comp); wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 874c787727fe5a013acfc558a7804679ee5aa39e..b38515fc7ce7a5eea8387c143e415a8a3f2f06ef 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -211,6 +211,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "wil_if_alloc failed: %d\n", rc); return rc; } + wil->pdev = pdev; pci_set_drvdata(pdev, wil); /* rollback to if_free */ @@ -224,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* rollback to err_plat */ + /* device supports 48bit addresses */ + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (rc) { + dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc); + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, + "dma_set_mask_and_coherent(32) failed: %d\n", + rc); + goto err_plat; + } + } else { + wil->use_extended_dma_addr = 1; + } + rc = pci_enable_device(pdev); if (rc) { wil_err(wil, diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index a0acb2d0cb7933e21f62c98d6b96a80cb51f9ef7..2ae4fe85cc8cd2d788d56d17015c5c7989b6c01d 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -71,6 +71,11 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + if (test_bit(wil_status_suspended, wil->status)) { + wil_dbg_pm(wil, "trying to suspend while suspended\n"); + return 0; + } + /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { rc = wil_down(wil); @@ -80,12 +85,24 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) } } - if (wil->platform_ops.suspend) + /* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */ + wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n"); + wil_disable_irq(wil); + + if (wil->platform_ops.suspend) { rc = wil->platform_ops.suspend(wil->platform_handle); + if (rc) { + wil_enable_irq(wil); + goto out; + } + } + + set_bit(wil_status_suspended, wil->status); out: wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); + return rc; } @@ -104,12 +121,18 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) } } + wil_dbg_pm(wil, "Enabling PCIe IRQ\n"); + wil_enable_irq(wil); + /* if netif up, bring hardware up * During open(), IFF_UP set after actual device method - * invocation. This prevent recursive call to wil_up() + * invocation. This prevent recursive call to wil_up(). + * wil_status_suspended will be cleared in wil_reset */ if (ndev->flags & IFF_UP) rc = wil_up(wil); + else + clear_bit(wil_status_suspended, wil->status); out: wil_dbg_pm(wil, "resume: %s => %d\n", diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 3ff4f4ce9feffe24c46754c258c3deb811e69056..2e301b6b32a9ce2a81a97e77affb2f8590cfe390 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -107,13 +107,28 @@ void wil_pmc_alloc(struct wil6210_priv *wil, /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 - * This is granted by the dma_alloc_coherent + * This is granted by the dma_alloc_coherent. + * + * HW has limitation that all vrings addresses must share the same + * upper 16 msb bits part of 48 bits address. To workaround that, + * if we are using 48 bit addresses switch to 32 bit allocation + * before allocating vring memory. + * + * There's no check for the return value of dma_set_mask_and_coherent, + * since we assume if we were able to set the mask during + * initialization in this system it will not fail if we set it again */ + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + pmc->pring_va = dma_alloc_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, &pmc->pring_pa, GFP_KERNEL); + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + wil_dbg_misc(wil, "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", pmc->pring_va, &pmc->pring_pa, @@ -185,7 +200,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil, release_pmc_skbs: wil_err(wil, "exit on error: Releasing skbs...\n"); - for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { + for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, descriptor_size, pmc->descriptors[i].va, @@ -268,7 +283,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) int i; for (i = 0; - pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { + i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, pmc->descriptor_size, pmc->descriptors[i].va, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 7404b6f39c6aff08495dd53c6f463d5c20f0e15c..a43cffcf1bbf7aa4b10d039ae364cf2b82f732f1 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -343,8 +343,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_err(wil, "BACK requested unsupported ba_policy == 1\n"); status = WLAN_STATUS_INVALID_QOS_PARAM; } - if (status == WLAN_STATUS_SUCCESS) - agg_wsize = wil_agg_size(wil, req_agg_wsize); + if (status == WLAN_STATUS_SUCCESS) { + if (req_agg_wsize == 0) { + wil_dbg_misc(wil, "Suggest BACK wsize %d\n", + WIL_MAX_AGG_WSIZE); + agg_wsize = WIL_MAX_AGG_WSIZE; + } else { + agg_wsize = min_t(u16, + WIL_MAX_AGG_WSIZE, req_agg_wsize); + } + } rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status, agg_amsdu, agg_wsize, agg_timeout); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 072182e527e69afe888b4ef54789495ba37d4c6a..edab4c0a900ff9ab0a022465002690bc5e3f7017 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -37,6 +37,10 @@ bool rx_align_2; module_param(rx_align_2, bool, 0444); MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); +bool rx_large_buf; +module_param(rx_large_buf, bool, 0444); +MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); + static inline uint wil_rx_snaplen(void) { return rx_align_2 ? 6 : 0; @@ -123,15 +127,32 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) vring->va = NULL; return -ENOMEM; } + /* vring->va should be aligned on its size rounded up to power of 2 - * This is granted by the dma_alloc_coherent + * This is granted by the dma_alloc_coherent. + * + * HW has limitation that all vrings addresses must share the same + * upper 16 msb bits part of 48 bits address. To workaround that, + * if we are using 48 bit addresses switch to 32 bit allocation + * before allocating vring memory. + * + * There's no check for the return value of dma_set_mask_and_coherent, + * since we assume if we were able to set the mask during + * initialization in this system it will not fail if we set it again */ + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); if (!vring->va) { kfree(vring->ctx); vring->ctx = NULL; return -ENOMEM; } + + if (wil->use_extended_dma_addr) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + /* initially, all descriptors are SW owned * For Tx and Rx, ownership bit is at the same location, thus * we can use any @@ -238,7 +259,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, u32 i, int headroom) { struct device *dev = wil_to_dev(wil); - unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen(); + unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[i].rx; dma_addr_t pa; @@ -402,7 +423,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, struct sk_buff *skb; dma_addr_t pa; unsigned int snaplen = wil_rx_snaplen(); - unsigned int sz = mtu_max + ETH_HLEN + snaplen; + unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen; u16 dmalen; u8 ftype; int cid; @@ -763,6 +784,20 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) wil_rx_refill(wil, v->size); } +static void wil_rx_buf_len_init(struct wil6210_priv *wil) +{ + wil->rx_buf_len = rx_large_buf ? + WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD; + if (mtu_max > wil->rx_buf_len) { + /* do not allow RX buffers to be smaller than mtu_max, for + * backward compatibility (mtu_max parameter was also used + * to support receiving large packets) + */ + wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max); + wil->rx_buf_len = mtu_max; + } +} + int wil_rx_init(struct wil6210_priv *wil, u16 size) { struct vring *vring = &wil->vring_rx; @@ -775,6 +810,8 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size) return -EINVAL; } + wil_rx_buf_len_init(wil); + vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 085a2dbfa21d615887c458e6385ac06cae8dcca3..b00c803a1e83c5f18cc9d0220d21363484cef147 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -32,6 +32,7 @@ extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; extern u32 vring_idle_trsh; extern bool rx_align_2; +extern bool rx_large_buf; extern bool debug_fw; extern bool disable_ap_sme; @@ -40,6 +41,7 @@ extern bool disable_ap_sme; #define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */ #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ +#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ /** @@ -139,6 +141,7 @@ struct RGF_ICR { #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) #define BIT_USER_OOB_MODE BIT(31) + #define BIT_USER_OOB_R2_MODE BIT(30) #define RGF_USER_HW_MACHINE_STATE (0x8801dc) #define HW_MACHINE_BOOT_DONE (0x3fffffd) #define RGF_USER_USER_CPU_0 (0x8801e0) @@ -409,6 +412,7 @@ enum { /* for wil6210_priv.status */ wil_status_irqen, /* FIXME: interrupts enabled - for debug */ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ wil_status_resetting, /* reset in progress */ + wil_status_suspended, /* suspend completed, device is suspended */ wil_status_last /* keep last */ }; @@ -612,6 +616,8 @@ struct wil6210_priv { u16 channel; /* relevant in AP mode */ int sinfo_gen; u32 ap_isolate; /* no intra-BSS communication */ + struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ + int locally_generated_disc; /* relevant in STA mode */ /* interrupt moderation */ u32 tx_max_burst_duration; u32 tx_interframe_timeout; @@ -652,11 +658,13 @@ struct wil6210_priv { struct work_struct probe_client_worker; /* DMA related */ struct vring vring_rx; + unsigned int rx_buf_len; struct vring vring_tx[WIL6210_MAX_TX_RINGS]; struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; + bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ /* scan */ struct cfg80211_scan_request *scan_request; @@ -686,6 +694,8 @@ struct wil6210_priv { /* High Access Latency Policy voting */ struct wil_halp halp; + enum wmi_ps_profile_type ps_profile; + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; @@ -764,6 +774,12 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) print_hex_dump_debug("DBG[ WMI]" prefix_str,\ prefix_type, rowsize, \ groupsize, buf, len, ascii) + +#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump_debug("DBG[MISC]" prefix_str,\ + prefix_type, rowsize, \ + groupsize, buf, len, ascii) #else /* defined(CONFIG_DYNAMIC_DEBUG) */ static inline void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize, @@ -776,18 +792,18 @@ void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } + +static inline +void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize, + int groupsize, const void *buf, size_t len, bool ascii) +{ +} #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, size_t count); void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, size_t count); -void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst, - const volatile void __iomem *src, - size_t count); -void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil, - volatile void __iomem *dst, - const void *src, size_t count); void *wil_if_alloc(struct device *dev); void wil_if_free(struct wil6210_priv *wil); @@ -795,6 +811,8 @@ int wil_if_add(struct wil6210_priv *wil); void wil_if_remove(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil); +int wil_ps_update(struct wil6210_priv *wil, + enum wmi_ps_profile_type ps_profile); int wil_reset(struct wil6210_priv *wil, bool no_fw); void wil_fw_error_recovery(struct wil6210_priv *wil); void wil_set_recovery_state(struct wil6210_priv *wil, int state); @@ -899,7 +917,7 @@ int wmi_pcp_stop(struct wil6210_priv *wil); int wmi_led_cfg(struct wil6210_priv *wil, bool enable); int wmi_abort_scan(struct wil6210_priv *wil); void wil_abort_scan(struct wil6210_priv *wil, bool sync); - +void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps); void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event); void wil_probe_client_flush(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 1f22c19696b11914e7e79a470882f7de692753c0..814c35645b737a3664bd3047de49d4c9fc10fc69 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -518,16 +518,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) assoc_resp_ielen = 0; } - mutex_lock(&wil->mutex); if (test_bit(wil_status_resetting, wil->status) || !test_bit(wil_status_fwready, wil->status)) { wil_err(wil, "status_resetting, cancel connect event, CID %d\n", evt->cid); - mutex_unlock(&wil->mutex); /* no need for cleanup, wil_reset will do that */ return; } + mutex_lock(&wil->mutex); + if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (!test_bit(wil_status_fwconnecting, wil->status)) { @@ -565,6 +565,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (rc) { netif_carrier_off(ndev); + wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); wil_err(wil, "cfg80211_connect_result with failure\n"); cfg80211_connect_result(ndev, evt->bssid, NULL, 0, NULL, 0, @@ -572,12 +573,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) GFP_KERNEL); goto out; } else { - cfg80211_connect_result(ndev, evt->bssid, - assoc_req_ie, assoc_req_ielen, - assoc_resp_ie, assoc_resp_ielen, - WLAN_STATUS_SUCCESS, - GFP_KERNEL); + struct wiphy *wiphy = wil_to_wiphy(wil); + + cfg80211_ref_bss(wiphy, wil->bss); + cfg80211_connect_bss(ndev, evt->bssid, wil->bss, + assoc_req_ie, assoc_req_ielen, + assoc_resp_ie, assoc_resp_ielen, + WLAN_STATUS_SUCCESS, GFP_KERNEL, + NL80211_TIMEOUT_UNSPECIFIED); } + wil->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (rc) { @@ -626,6 +631,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, wil->sinfo_gen++; + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel disconnect event\n"); + /* no need for cleanup, wil_reset will do that */ + return; + } + mutex_lock(&wil->mutex); wil6210_disconnect(wil, evt->bssid, reason_code, true); mutex_unlock(&wil->mutex); @@ -1393,7 +1405,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) struct wmi_cfg_rx_chain_cmd cmd = { .action = WMI_RX_CHAIN_ADD, .rx_sw_ring = { - .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)), + .max_mpdu_size = cpu_to_le16( + wil_mtu2macbuf(wil->rx_buf_len)), .ring_mem_base = cpu_to_le64(vring->pa), .ring_size = cpu_to_le16(vring->size), }, @@ -1492,6 +1505,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); + wil->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, @@ -1733,14 +1747,19 @@ int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid) void wmi_event_flush(struct wil6210_priv *wil) { + ulong flags; struct pending_wmi_event *evt, *t; wil_dbg_wmi(wil, "event_flush\n"); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); + list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); kfree(evt); } + + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); } static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 7c9fee57aa9110695e10dc96535169a2f988c62a..f7f5f4f801e3862a8405a1a7f64702fd024efc19 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -58,6 +58,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, WMI_FW_CAPABILITY_WMI_ONLY = 5, + WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, WMI_FW_CAPABILITY_MAX, }; @@ -142,8 +143,6 @@ enum wmi_command_id { WMI_MAINTAIN_RESUME_CMDID = 0x851, WMI_RS_MGMT_CMDID = 0x852, WMI_RF_MGMT_CMDID = 0x853, - WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854, - WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855, WMI_OTP_READ_CMDID = 0x856, WMI_OTP_WRITE_CMDID = 0x857, WMI_LED_CFG_CMDID = 0x858, @@ -192,6 +191,8 @@ enum wmi_command_id { WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931, WMI_NEW_STA_CMDID = 0x935, WMI_DEL_STA_CMDID = 0x936, + WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940, + WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -438,16 +439,6 @@ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; -/* WMI_THERMAL_THROTTLING_CTRL_CMDID */ -#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF) - -/* WMI_THERMAL_THROTTLING_CTRL_CMDID */ -struct wmi_thermal_throttling_ctrl_cmd { - __le32 time_on_usec; - __le32 time_off_usec; - __le32 max_txop_length_usec; -} __packed; - /* WMI_RF_RX_TEST_CMDID */ struct wmi_rf_rx_test_cmd { __le32 sector; @@ -549,7 +540,7 @@ struct wmi_pcp_start_cmd { u8 hidden_ssid; u8 is_go; u8 reserved0[5]; - /* abft_len override if non-0 */ + /* A-BFT length override if non-0 */ u8 abft_len; u8 disable_ap_sme; u8 network_type; @@ -910,6 +901,39 @@ struct wmi_set_mgmt_retry_limit_cmd { u8 reserved[3]; } __packed; +/* Zones: HIGH, MAX, CRITICAL */ +#define WMI_NUM_OF_TT_ZONES (3) + +struct wmi_tt_zone_limits { + /* Above this temperature this zone is active */ + u8 temperature_high; + /* Below this temperature the adjacent lower zone is active */ + u8 temperature_low; + u8 reserved[2]; +} __packed; + +/* Struct used for both configuration and status commands of thermal + * throttling + */ +struct wmi_tt_data { + /* Enable/Disable TT algorithm for baseband */ + u8 bb_enabled; + u8 reserved0[3]; + /* Define zones for baseband */ + struct wmi_tt_zone_limits bb_zones[WMI_NUM_OF_TT_ZONES]; + /* Enable/Disable TT algorithm for radio */ + u8 rf_enabled; + u8 reserved1[3]; + /* Define zones for all radio chips */ + struct wmi_tt_zone_limits rf_zones[WMI_NUM_OF_TT_ZONES]; +} __packed; + +/* WMI_SET_THERMAL_THROTTLING_CFG_CMDID */ +struct wmi_set_thermal_throttling_cfg_cmd { + /* Command data */ + struct wmi_tt_data tt_data; +} __packed; + /* WMI_NEW_STA_CMDID */ struct wmi_new_sta_cmd { u8 dst_mac[WMI_MAC_LEN]; @@ -1040,7 +1064,6 @@ enum wmi_event_id { WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, WMI_RS_MGMT_DONE_EVENTID = 0x1852, WMI_RF_MGMT_STATUS_EVENTID = 0x1853, - WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855, WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, WMI_RX_MGMT_PACKET_EVENTID = 0x1840, WMI_TX_MGMT_PACKET_EVENTID = 0x1841, @@ -1090,6 +1113,8 @@ enum wmi_event_id { WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924, WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930, WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, + WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940, + WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, @@ -1133,13 +1158,6 @@ struct wmi_rf_mgmt_status_event { __le32 rf_status; } __packed; -/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */ -struct wmi_thermal_throttling_status_event { - __le32 time_on_usec; - __le32 time_off_usec; - __le32 max_txop_length_usec; -} __packed; - /* WMI_GET_STATUS_DONE_EVENTID */ struct wmi_get_status_done_event { __le32 is_associated; @@ -2206,6 +2224,19 @@ struct wmi_tof_get_capabilities_event { __le32 aoa_supported_types; } __packed; +/* WMI_SET_THERMAL_THROTTLING_CFG_EVENTID */ +struct wmi_set_thermal_throttling_cfg_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_THERMAL_THROTTLING_CFG_EVENTID */ +struct wmi_get_thermal_throttling_cfg_event { + /* Status data */ + struct wmi_tt_data tt_data; +} __packed; + enum wmi_tof_session_end_status { WMI_TOF_SESSION_END_NO_ERROR = 0x00, WMI_TOF_SESSION_END_FAIL = 0x01, diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 0e180677c7fc5dc6030f7d577ce16eb14eb03999..09defbcedd5e4b6dd769a9883e67c7e906b8c156 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2377,6 +2377,8 @@ static int at76_init_new_device(struct at76_priv *priv, wiphy->hw_version = priv->board_type; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(priv->hw); if (ret) { printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n", diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index e12f62356fd14791299160c2f63d8a0d281ad206..27b110dc8cc622c5f17f184263ff28406c5154b8 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -513,7 +513,7 @@ struct atmel_private { } station_state; int operating_mode, power_mode; - time_t last_qual; + unsigned long last_qual; int beacons_this_sec; int channel; int reg_domain, config_reg_domain; diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 52f3541ecbcfe40f3289bbba681503d974900625..d23aac7503d332cfe741442fb66c9bc7e1373923 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5598,6 +5598,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wl->hw_registred = false; hw->max_rates = 2; SET_IEEE80211_DEV(hw, dev->dev); diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index b068d5aeee24ef1c859dd4fc0c23a3c65b75bcfc..1b9c191e2a2290e8030c095e96aafeb245e64cd5 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -694,7 +694,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) if (unlikely(phystat0 & (B43_RX_PHYST0_PLCPHCF | B43_RX_PHYST0_PLCPFV))) status.flag |= RX_FLAG_FAILED_PLCP_CRC; if (phystat0 & B43_RX_PHYST0_SHORTPRMBL) - status.flag |= RX_FLAG_SHORTPRE; + status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (macstat & B43_RX_MAC_DECERR) { /* Decryption with the given key failed. * Drop the packet. We also won't be able to decrypt it with diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index cdafebb9c936b445b89ed2cf8fcab26302efb0a2..f1e3dad576292ef5b8e2f03a01a8c185c927a101 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3850,6 +3850,8 @@ static int b43legacy_wireless_init(struct ssb_device *dev) else SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + /* Get and initialize struct b43legacy_wl */ wl = hw_to_b43legacy_wl(hw); memset(wl, 0, sizeof(*wl)); diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig index ab42b1fea03c170e6728f5b71fe720aa61174053..9d99eb42d9176f0f833048b3f87a906542c9e90c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/Kconfig +++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig @@ -18,14 +18,14 @@ config BRCMSMAC module, the driver will be called brcmsmac.ko. config BRCMFMAC - tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver" + tristate "Broadcom FullMAC WLAN driver" depends on CFG80211 select BRCMUTIL ---help--- - This module adds support for embedded wireless adapters based on - Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least - one of the bus interface support. If you choose to build a module, - it'll be called brcmfmac.ko. + This module adds support for wireless adapters based on Broadcom + FullMAC chipsets. It has to work with at least one of the bus + interface support. If you choose to build a module, it'll be called + brcmfmac.ko. config BRCMFMAC_PROTO_BCDC bool diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 0383ba559edccaaad719ff2ec08bec1606a56108..1f5a9b948abf49a2ce4baddf476c67bf6288ec5a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -25,7 +25,6 @@ brcmfmac-objs += \ chip.o \ fwil.o \ fweh.o \ - fwsignal.o \ p2p.o \ proto.o \ common.o \ @@ -36,7 +35,8 @@ brcmfmac-objs += \ vendor.o \ pno.o brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ - bcdc.o + bcdc.o \ + fwsignal.o brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ commonring.o \ flowring.o \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 384b1873e7e38dfbeac989ce7f46138548f8d750..9f2d0b0cf6e5c452ad85a3caef58cf16a8cdad46 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -103,9 +103,17 @@ struct brcmf_bcdc { u8 bus_header[BUS_HEADER_LEN]; struct brcmf_proto_bcdc_dcmd msg; unsigned char buf[BRCMF_DCMD_MAXLEN]; + struct brcmf_fws_info *fws; }; +struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr) +{ + struct brcmf_bcdc *bcdc = drvr->proto->pd; + + return bcdc->fws; +} + static int brcmf_proto_bcdc_msg(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, uint len, bool set) @@ -330,8 +338,9 @@ static int brcmf_proto_bcdc_tx_queue_data(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb) { struct brcmf_if *ifp = brcmf_get_ifp(drvr, ifidx); + struct brcmf_bcdc *bcdc = drvr->proto->pd; - if (!brcmf_fws_queue_skbs(drvr->fws)) + if (!brcmf_fws_queue_skbs(bcdc->fws)) return brcmf_proto_txdata(drvr, ifidx, 0, skb); return brcmf_fws_process_skb(ifp, skb); @@ -345,6 +354,36 @@ brcmf_proto_bcdc_txdata(struct brcmf_pub *drvr, int ifidx, u8 offset, return brcmf_bus_txdata(drvr->bus_if, pktbuf); } +void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + + brcmf_dbg(TRACE, "Enter\n"); + + brcmf_fws_bus_blocked(drvr, state); +} + +void +brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, + bool success) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_bcdc *bcdc = bus_if->drvr->proto->pd; + struct brcmf_if *ifp; + + /* await txstatus signal for firmware if active */ + if (brcmf_fws_fc_active(bcdc->fws)) { + if (!success) + brcmf_fws_bustxfail(bcdc->fws, txp); + } else { + if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp)) + brcmu_pkt_buf_free_skb(txp); + else + brcmf_txfinalize(ifp, txp, success); + } +} + static void brcmf_proto_bcdc_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, enum proto_addr_mode addr_mode) @@ -369,6 +408,38 @@ static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp, brcmf_fws_rxreorder(ifp, skb); } +static void +brcmf_proto_bcdc_add_if(struct brcmf_if *ifp) +{ + brcmf_fws_add_interface(ifp); +} + +static void +brcmf_proto_bcdc_del_if(struct brcmf_if *ifp) +{ + brcmf_fws_del_interface(ifp); +} + +static void +brcmf_proto_bcdc_reset_if(struct brcmf_if *ifp) +{ + brcmf_fws_reset_interface(ifp); +} + +static int +brcmf_proto_bcdc_init_done(struct brcmf_pub *drvr) +{ + struct brcmf_bcdc *bcdc = drvr->proto->pd; + struct brcmf_fws_info *fws; + + fws = brcmf_fws_attach(drvr); + if (IS_ERR(fws)) + return PTR_ERR(fws); + + bcdc->fws = fws; + return 0; +} + int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc; @@ -392,6 +463,10 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer; drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer; drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder; + drvr->proto->add_if = brcmf_proto_bcdc_add_if; + drvr->proto->del_if = brcmf_proto_bcdc_del_if; + drvr->proto->reset_if = brcmf_proto_bcdc_reset_if; + drvr->proto->init_done = brcmf_proto_bcdc_init_done; drvr->proto->pd = bcdc; drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES; @@ -406,6 +481,9 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) { - kfree(drvr->proto->pd); + struct brcmf_bcdc *bcdc = drvr->proto->pd; + drvr->proto->pd = NULL; + brcmf_fws_detach(bcdc->fws); + kfree(bcdc); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 6003179c0ceb286ce2a396d96eb1f436d727b80f..3b0e9eff21b5826883bd2a0377c3baeb023490c5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h @@ -19,6 +19,10 @@ #ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state); +void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, + bool success); +struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr); #else static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 5bc2ba214735af2a8f44394834e1c40c45820487..9b970dc2b922a8dd0e4c4199442f5976f7649798 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 76693df347425951397a211c863f19046dbf07f5..b55c3293c4b48054f803382f2f98c1bcf95527c4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -229,11 +229,6 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); void brcmf_detach(struct device *dev); /* Indication from bus module that dongle should be reset */ void brcmf_dev_reset(struct device *dev); -/* Indication from bus module to change flow-control state */ -void brcmf_txflowblock(struct device *dev, bool state); - -/* Notify the bus has transferred the tx packet to firmware */ -void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 944b83cfc51978d06ccc82a9f23ce64472af3fc8..cd1d6730eab73d514db5a1b88d442607c7834e3d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -575,12 +575,11 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) * * @wiphy: wiphy device of new interface. * @name: name of the new interface. - * @flags: not used. * @params: contains mac address for AP device. */ static struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, - u32 *flags, struct vif_params *params) + struct vif_params *params) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); @@ -653,7 +652,6 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) { struct wireless_dev *wdev; @@ -674,12 +672,12 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, case NL80211_IFTYPE_MESH_POINT: return ERR_PTR(-EOPNOTSUPP); case NL80211_IFTYPE_AP: - wdev = brcmf_ap_add_vif(wiphy, name, flags, params); + wdev = brcmf_ap_add_vif(wiphy, name, params); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: - wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params); + wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, params); break; case NL80211_IFTYPE_UNSPECIFIED: default: @@ -764,7 +762,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, brcmf_dbg(SCAN, "scheduled scan completed\n"); cfg->internal_escan = false; if (!aborted) - cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); + cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0); } else if (scan_request) { struct cfg80211_scan_info info = { .aborted = aborted, @@ -858,7 +856,7 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); @@ -3097,6 +3095,9 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, status = e->status; + if (status == BRCMF_E_STATUS_ABORT) + goto exit; + if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx); return -EPERM; @@ -3213,7 +3214,7 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req, { struct ieee80211_channel *chan; enum nl80211_band band; - int freq; + int freq, i; if (channel <= CH_MAX_2G_CHANNEL) band = NL80211_BAND_2GHZ; @@ -3228,10 +3229,22 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req, if (!chan) return -EINVAL; - req->channels[req->n_channels++] = chan; - memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len); - req->ssids[req->n_ssids++].ssid_len = ssid_len; + for (i = 0; i < req->n_channels; i++) { + if (req->channels[i] == chan) + break; + } + if (i == req->n_channels) + req->channels[req->n_channels++] = chan; + for (i = 0; i < req->n_ssids; i++) { + if (req->ssids[i].ssid_len == ssid_len && + !memcmp(req->ssids[i].ssid, ssid, ssid_len)) + break; + } + if (i == req->n_ssids) { + memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len); + req->ssids[req->n_ssids++].ssid_len = ssid_len; + } return 0; } @@ -3297,6 +3310,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, struct brcmf_pno_scanresults_le *pfn_result; u32 result_count; u32 status; + u32 datalen; brcmf_dbg(SCAN, "Enter\n"); @@ -3323,6 +3337,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, brcmf_err("FALSE PNO Event. (pfn_count == 0)\n"); goto out_err; } + + netinfo_start = brcmf_get_netinfo_array(pfn_result); + datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result); + if (datalen < result_count * sizeof(*netinfo)) { + brcmf_err("insufficient event data\n"); + goto out_err; + } + request = brcmf_alloc_internal_escan_request(wiphy, result_count); if (!request) { @@ -3330,17 +3352,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, goto out_err; } - netinfo_start = brcmf_get_netinfo_array(pfn_result); - for (i = 0; i < result_count; i++) { netinfo = &netinfo_start[i]; - if (!netinfo) { - brcmf_err("Invalid netinfo ptr. index: %d\n", - i); - err = -EINVAL; - goto out_err; - } + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n", netinfo->SSID, netinfo->channel); err = brcmf_internal_escan_add_info(request, @@ -3356,7 +3372,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, goto free_req; out_err: - cfg80211_sched_scan_stopped(wiphy); + cfg80211_sched_scan_stopped(wiphy, 0); free_req: kfree(request); return err; @@ -3389,7 +3405,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, } static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy, - struct net_device *ndev) + struct net_device *ndev, u64 reqid) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); @@ -3591,7 +3607,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) cfg->wowl.pre_pmmode); cfg->wowl.active = false; if (cfg->wowl.nd_enabled) { - brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev); + brcmf_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev, 0); brcmf_fweh_unregister(cfg->pub, BRCMF_E_PFN_NET_FOUND); brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, brcmf_notify_sched_scan_results); @@ -3675,7 +3691,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, /* Stop scheduled scan */ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) - brcmf_cfg80211_sched_scan_stop(wiphy, ndev); + brcmf_cfg80211_sched_scan_stop(wiphy, ndev, 0); /* end any scanning */ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) @@ -5343,6 +5359,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, struct ieee80211_supported_band *band; struct brcmf_bss_info_le *bi; struct brcmu_chan ch; + struct cfg80211_roam_info roam_info = {}; u32 freq; s32 err = 0; u8 *buf; @@ -5381,9 +5398,15 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, done: kfree(buf); - cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid, - conn_info->req_ie, conn_info->req_ie_len, - conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); + + roam_info.channel = notify_channel; + roam_info.bssid = profile->bssid; + roam_info.req_ie = conn_info->req_ie; + roam_info.req_ie_len = conn_info->req_ie_len; + roam_info.resp_ie = conn_info->resp_ie; + roam_info.resp_ie_len = conn_info->resp_ie_len; + + cfg80211_roamed(ndev, &roam_info, GFP_KERNEL); brcmf_dbg(CONN, "Report roaming result\n"); set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); @@ -6358,11 +6381,11 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) static void brcmf_wiphy_pno_params(struct wiphy *wiphy) { /* scheduled scan settings */ + wiphy->max_sched_scan_reqs = 1; wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD; - wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; } #ifdef CONFIG_PM @@ -6450,7 +6473,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) | BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST); - wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | + wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_PS_ON_BY_DEFAULT | WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) @@ -6549,7 +6573,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) if (err) goto default_conf_out; err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype, - NULL, NULL); + NULL); if (err) goto default_conf_out; @@ -6736,6 +6760,10 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, s32 err; int i; + /* The country code gets set to "00" by default at boot, ignore */ + if (req->alpha2[0] == '0' && req->alpha2[1] == '0') + return; + /* ignore non-ISO3166 country codes */ for (i = 0; i < sizeof(req->alpha2); i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 33b133f7e63aad3b5a6bb14018fd461ec4fb90c6..7a2b49587b4d32dde1af56a2979d4e1818500f84 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -161,7 +161,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) strsep(&ptr, "\n"); /* Print fw version info */ - brcmf_err("Firmware version = %s\n", buf); + brcmf_info("Firmware version = %s\n", buf); /* locate firmware version number for ethtool */ ptr = strrchr(buf, ' ') + 1; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 60da86a8d95b0190b22c76e99cfc88543939e1a1..a3d82368f1a9f9722e62920e44ef52f7ee8602ef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -32,7 +32,6 @@ #include "p2p.h" #include "cfg80211.h" #include "fwil.h" -#include "fwsignal.h" #include "feature.h" #include "proto.h" #include "pcie.h" @@ -198,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, int ret; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - struct ethhdr *eh = (struct ethhdr *)(skb->data); + struct ethhdr *eh; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); @@ -211,22 +210,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } - /* Make sure there's enough room for any header */ - if (skb_headroom(skb) < drvr->hdrlen) { - struct sk_buff *skb2; - - brcmf_dbg(INFO, "%s: insufficient headroom\n", + /* Make sure there's enough writable headroom*/ + ret = skb_cow_head(skb, drvr->hdrlen); + if (ret < 0) { + brcmf_err("%s: skb_cow_head failed\n", brcmf_ifname(ifp)); - drvr->bus_if->tx_realloc++; - skb2 = skb_realloc_headroom(skb, drvr->hdrlen); dev_kfree_skb(skb); - skb = skb2; - if (skb == NULL) { - brcmf_err("%s: skb_realloc_headroom failed\n", - brcmf_ifname(ifp)); - ret = -ENOMEM; - goto done; - } + goto done; } /* validate length for ether packet */ @@ -236,6 +226,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, goto done; } + eh = (struct ethhdr *)(skb->data); + if (eh->h_proto == htons(ETH_P_PAE)) atomic_inc(&ifp->pend_8021x_cnt); @@ -283,16 +275,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, spin_unlock_irqrestore(&ifp->netif_stop_lock, flags); } -void brcmf_txflowblock(struct device *dev, bool state) -{ - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - - brcmf_dbg(TRACE, "Enter\n"); - - brcmf_fws_bus_blocked(drvr, state); -} - void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { if (skb->pkt_type == PACKET_MULTICAST) @@ -393,24 +375,6 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) brcmu_pkt_buf_free_skb(txp); } -void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) -{ - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_pub *drvr = bus_if->drvr; - struct brcmf_if *ifp; - - /* await txstatus signal for firmware if active */ - if (brcmf_fws_fc_active(drvr->fws)) { - if (!success) - brcmf_fws_bustxfail(drvr->fws, txp); - } else { - if (brcmf_proto_hdrpull(drvr, false, txp, &ifp)) - brcmu_pkt_buf_free_skb(txp); - else - brcmf_txfinalize(ifp, txp, success); - } -} - static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -504,8 +468,9 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) ndev->needed_headroom += drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; - /* set the mac address */ + /* set the mac address & netns */ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); + dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config))); INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list); INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable); @@ -734,10 +699,28 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked) return; brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx, ifp->ifidx); - brcmf_fws_del_interface(ifp); + brcmf_proto_del_if(ifp->drvr, ifp); brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked); } +static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp, + const struct brcmf_event_msg *evtmsg, + void *data) +{ + int err; + + brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); + + brcmf_err("PSM's watchdog has fired!\n"); + + err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data, + evtmsg->datalen); + if (err) + brcmf_err("Failed to get memory dump, %d\n", err); + + return err; +} + #ifdef CONFIG_INET #define ARPOL_MAX_ENTRIES 8 static int brcmf_inetaddr_changed(struct notifier_block *nb, @@ -917,6 +900,10 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) goto fail; } + /* Attach to events important for core code */ + brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, + brcmf_psm_watchdog_notify); + /* attach firmware event handler */ brcmf_fweh_attach(drvr); @@ -992,11 +979,11 @@ int brcmf_bus_started(struct device *dev) } brcmf_feat_attach(drvr); - ret = brcmf_fws_init(drvr); + ret = brcmf_proto_init_done(drvr); if (ret < 0) goto fail; - brcmf_fws_add_interface(ifp); + brcmf_proto_add_if(drvr, ifp); drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev, drvr->settings->p2p_enable); @@ -1040,10 +1027,6 @@ int brcmf_bus_started(struct device *dev) brcmf_cfg80211_detach(drvr->config); drvr->config = NULL; } - if (drvr->fws) { - brcmf_fws_del_interface(ifp); - brcmf_fws_deinit(drvr); - } brcmf_net_detach(ifp->ndev, false); if (p2p_ifp) brcmf_net_detach(p2p_ifp->ndev, false); @@ -1109,8 +1092,6 @@ void brcmf_detach(struct device *dev) brcmf_cfg80211_detach(drvr->config); - brcmf_fws_deinit(drvr); - brcmf_bus_stop(drvr->bus_if); brcmf_proto_detach(drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 6aecd8dfd824d9656890b96359734aa80a03ca35..a4dd313140f37dc1fbb6955e1148f48bccf22f44 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -127,8 +127,6 @@ struct brcmf_pub { struct brcmf_fweh_info fweh; - struct brcmf_fws_info *fws; - struct brcmf_ampdu_rx_reorder *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS]; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index f4644cf371c7e058401fcba2797fb9bd11f63993..1447a8352383707f35ab04a67c32c7c3bb0d97bc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -27,8 +27,8 @@ static struct dentry *root_folder; -static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, - size_t len) +int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, + size_t len) { void *dump; size_t ramsize; @@ -54,24 +54,6 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, return 0; } -static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp, - const struct brcmf_event_msg *evtmsg, - void *data) -{ - int err; - - brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); - - brcmf_err("PSM's watchdog has fired!\n"); - - err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data, - evtmsg->datalen); - if (err) - brcmf_err("Failed to get memory dump, %d\n", err); - - return err; -} - void brcmf_debugfs_init(void) { root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); @@ -99,9 +81,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr) if (IS_ERR(drvr->dbgfs_dir)) return PTR_ERR(drvr->dbgfs_dir); - - return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, - brcmf_debug_psm_watchdog_notify); + return 0; } void brcmf_debug_detach(struct brcmf_pub *drvr) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index 066126123e9663e89277a28d0dd2fa48ad9f0a72..fe264a5798f15f55e03741554a5dfe58e37b4892 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -59,6 +59,10 @@ void __brcmf_err(const char *func, const char *fmt, ...); } while (0) #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING) + +/* For debug/tracing purposes treat info messages as errors */ +#define brcmf_info brcmf_err + __printf(3, 4) void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...); #define brcmf_dbg(level, fmt, ...) \ @@ -77,6 +81,11 @@ do { \ #else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */ +#define brcmf_info(fmt, ...) \ + do { \ + pr_info("%s: " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + #define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #define BRCMF_DATA_ON() 0 @@ -99,6 +108,7 @@ do { \ extern int brcmf_msg_level; +struct brcmf_bus; struct brcmf_pub; #ifdef DEBUG void brcmf_debugfs_init(void); @@ -108,6 +118,8 @@ void brcmf_debug_detach(struct brcmf_pub *drvr); struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)); +int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, + size_t len); #else static inline void brcmf_debugfs_init(void) { @@ -128,6 +140,12 @@ int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn, { return 0; } +static inline +int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, + size_t len) +{ + return 0; +} #endif #endif /* BRCMFMAC_DEBUG_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index c79306b575324f05a63b38a80068179db316f6a3..4eb1e1ce9aceccc9214657fa6081ba5327c021ca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -22,9 +22,9 @@ #include "core.h" #include "debug.h" #include "tracepoint.h" -#include "fwsignal.h" #include "fweh.h" #include "fwil.h" +#include "proto.h" /** * struct brcmf_fweh_queue_item - event item on event queue. @@ -172,14 +172,14 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, if (IS_ERR(ifp)) return; if (!is_p2pdev) - brcmf_fws_add_interface(ifp); + brcmf_proto_add_if(drvr, ifp); if (!drvr->fweh.evt_handler[BRCMF_E_IF]) if (brcmf_net_attach(ifp, false) < 0) return; } if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) - brcmf_fws_reset_interface(ifp); + brcmf_proto_reset_if(drvr, ifp); err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 5f1a5929cb307e8c8e9353e264b1c313210cdd88..72373e59308e8fe54cf14090efd135a045273f4a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -36,6 +36,7 @@ #include "p2p.h" #include "cfg80211.h" #include "proto.h" +#include "bcdc.h" #include "common.h" /** @@ -1586,7 +1587,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_fws_info *fws = ifp->drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); int i; u8 *credits = data; @@ -1617,7 +1618,7 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_fws_info *fws = ifp->drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); if (fws) { brcmf_fws_lock(fws); @@ -1826,7 +1827,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) { struct brcmf_skb_reorder_data *rd; - struct brcmf_fws_info *fws = ifp->drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); u8 *signal_data; s16 data_len; u8 type; @@ -2091,8 +2092,7 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p, int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) { - struct brcmf_pub *drvr = ifp->drvr; - struct brcmf_fws_info *fws = drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct ethhdr *eh = (struct ethhdr *)(skb->data); int fifo = BRCMF_FWS_FIFO_BCMC; @@ -2142,10 +2142,10 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp) void brcmf_fws_add_interface(struct brcmf_if *ifp) { - struct brcmf_fws_info *fws = ifp->drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); struct brcmf_fws_mac_descriptor *entry; - if (!ifp->ndev) + if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) return; entry = &fws->desc.iface[ifp->ifidx]; @@ -2160,16 +2160,17 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) void brcmf_fws_del_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; + struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); if (!entry) return; - brcmf_fws_lock(ifp->drvr->fws); + brcmf_fws_lock(fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_macdesc_deinit(entry); - brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); - brcmf_fws_unlock(ifp->drvr->fws); + brcmf_fws_cleanup(fws, ifp->ifidx); + brcmf_fws_unlock(fws); } static void brcmf_fws_dequeue_worker(struct work_struct *worker) @@ -2243,7 +2244,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); - struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats; + struct brcmf_fws_stats *fwstats = &(drvr_to_fws(bus_if->drvr)->stats); seq_printf(seq, "header_pulls: %u\n" @@ -2308,7 +2309,7 @@ static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) } #endif -int brcmf_fws_init(struct brcmf_pub *drvr) +struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws; struct brcmf_if *ifp; @@ -2316,17 +2317,15 @@ int brcmf_fws_init(struct brcmf_pub *drvr) int rc; u32 mode; - drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); - if (!drvr->fws) { + fws = kzalloc(sizeof(*fws), GFP_KERNEL); + if (!fws) { rc = -ENOMEM; goto fail; } - fws = drvr->fws; - spin_lock_init(&fws->spinlock); - /* set linkage back */ + /* store drvr reference */ fws->drvr = drvr; fws->fcmode = drvr->settings->fcmode; @@ -2334,7 +2333,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr) (fws->fcmode == BRCMF_FWS_FCMODE_NONE)) { fws->avoid_queueing = true; brcmf_dbg(INFO, "FWS queueing will be avoided\n"); - return 0; + return fws; } fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); @@ -2396,6 +2395,7 @@ int brcmf_fws_init(struct brcmf_pub *drvr) brcmf_fws_hanger_init(&fws->hanger); brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0); brcmf_fws_macdesc_set_name(fws, &fws->desc.other); + brcmf_dbg(INFO, "added %s\n", fws->desc.other.name); brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); @@ -2405,27 +2405,24 @@ int brcmf_fws_init(struct brcmf_pub *drvr) brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", fws->fw_signals ? "enabled" : "disabled", tlv); - return 0; + return fws; fail: - brcmf_fws_deinit(drvr); - return rc; + brcmf_fws_detach(fws); + return ERR_PTR(rc); } -void brcmf_fws_deinit(struct brcmf_pub *drvr) +void brcmf_fws_detach(struct brcmf_fws_info *fws) { - struct brcmf_fws_info *fws = drvr->fws; - if (!fws) return; - if (drvr->fws->fws_wq) - destroy_workqueue(drvr->fws->fws_wq); + if (fws->fws_wq) + destroy_workqueue(fws->fws_wq); /* cleanup */ brcmf_fws_lock(fws); brcmf_fws_cleanup(fws, -1); - drvr->fws = NULL; brcmf_fws_unlock(fws); /* free top structure */ @@ -2461,7 +2458,7 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) { - struct brcmf_fws_info *fws = drvr->fws; + struct brcmf_fws_info *fws = drvr_to_fws(drvr); struct brcmf_if *ifp; int i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 96df66073b2a182d050dd777893ded4b4da13473..ba07bd972002025081cc1b2264054d93150bcc34 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -18,8 +18,8 @@ #ifndef FWSIGNAL_H_ #define FWSIGNAL_H_ -int brcmf_fws_init(struct brcmf_pub *drvr); -void brcmf_fws_deinit(struct brcmf_pub *drvr); +struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); +void brcmf_fws_detach(struct brcmf_fws_info *fws); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 85d949e03f79f7c9566c7b00a9bbe99853df7f16..aa299c47bfa24e7cfc4af283efb29458ca90d11e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2141,12 +2141,11 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, * @name: name of the new interface. * @name_assign_type: origin of the interface name * @type: nl80211 interface type. - * @flags: not used. * @params: contains mac address for P2P device. */ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 8ce9447533ef8fa519caab0f985949487346464d..0e8b34d2d85cb1b3dbc0ab9716e93c78628b132c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -150,7 +150,7 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced); void brcmf_p2p_detach(struct brcmf_p2p_info *p2p); struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params); int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 6fae4cf3f6ab2876eb6ae2c3b5b07d02bcb0d3ca..f36b96dc6acdfc2160ba35e187a46356a3b41c28 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1877,6 +1877,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev) BRCMF_PCIE_MBDATA_TIMEOUT); if (!devinfo->mbdata_completed) { brcmf_err("Timeout on response for entering D3 substate\n"); + brcmf_bus_change_state(bus, BRCMF_BUS_UP); return -EIO; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c index 9a25e79a46cf2d256c70c905bf9d03ebbe19afe8..6c3bde83d07085e46a5418673f09df5a5790a1aa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c @@ -182,7 +182,6 @@ int brcmf_pno_clean(struct brcmf_if *ifp) int brcmf_pno_start_sched_scan(struct brcmf_if *ifp, struct cfg80211_sched_scan_request *req) { - struct brcmu_d11inf *d11inf; struct brcmf_pno_config_le pno_cfg; struct cfg80211_ssid *ssid; u16 chan; @@ -209,7 +208,6 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp, } /* configure channels to use */ - d11inf = &ifp->drvr->config->d11inf; for (i = 0; i < req->n_channels; i++) { chan = req->channels[i]->hw_value; pno_cfg.channel_list[i] = cpu_to_le16(chan); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index 34b59feedeba86b592dfb08c0d737b340b57c798..2404f8a7c31c9901b77e8929e9e2012fd2f55b2a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -44,6 +44,10 @@ struct brcmf_proto { void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]); void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb); + void (*add_if)(struct brcmf_if *ifp); + void (*del_if)(struct brcmf_if *ifp); + void (*reset_if)(struct brcmf_if *ifp); + int (*init_done)(struct brcmf_pub *drvr); void *pd; }; @@ -118,4 +122,36 @@ brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) ifp->drvr->proto->rxreorder(ifp, skb); } +static inline void +brcmf_proto_add_if(struct brcmf_pub *drvr, struct brcmf_if *ifp) +{ + if (!drvr->proto->add_if) + return; + drvr->proto->add_if(ifp); +} + +static inline void +brcmf_proto_del_if(struct brcmf_pub *drvr, struct brcmf_if *ifp) +{ + if (!drvr->proto->del_if) + return; + drvr->proto->del_if(ifp); +} + +static inline void +brcmf_proto_reset_if(struct brcmf_pub *drvr, struct brcmf_if *ifp) +{ + if (!drvr->proto->reset_if) + return; + drvr->proto->reset_if(ifp); +} + +static inline int +brcmf_proto_init_done(struct brcmf_pub *drvr) +{ + if (!drvr->proto->init_done) + return 0; + return drvr->proto->init_done(drvr); +} + #endif /* BRCMFMAC_PROTO_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 65689469c5a12e2fcfd6123ca584944da79ec184..fc64b8913aa6a11c0111fec3b9d900174dc250c3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -44,6 +44,7 @@ #include "firmware.h" #include "core.h" #include "common.h" +#include "bcdc.h" #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) @@ -539,7 +540,11 @@ static int qcount[NUMPRIO]; /* Limit on rounding up frames */ static const uint max_roundup = 512; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define ALIGNMENT 8 +#else #define ALIGNMENT 4 +#endif enum brcmf_sdio_frmtype { BRCMF_SDIO_FT_NORMAL, @@ -2265,7 +2270,8 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq, bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP; skb_queue_walk_safe(pktq, pkt_next, tmp) { __skb_unlink(pkt_next, pktq); - brcmf_txcomplete(bus->sdiodev->dev, pkt_next, ret == 0); + brcmf_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next, + ret == 0); } return ret; } @@ -2328,7 +2334,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) if ((bus->sdiodev->state == BRCMF_SDIOD_DATA) && bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { bus->txoff = false; - brcmf_txflowblock(bus->sdiodev->dev, false); + brcmf_proto_bcdc_txflowblock(bus->sdiodev->dev, false); } return cnt; @@ -2753,7 +2759,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) if (pktq_len(&bus->txq) >= TXHI) { bus->txoff = true; - brcmf_txflowblock(dev, true); + brcmf_proto_bcdc_txflowblock(dev, true); } spin_unlock_bh(&bus->txq_lock); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index d93ebbdc773757adda218b16b816c30202792973..e4d545f9edeef6f119a0b67f50db679bedfcd91f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -29,6 +29,7 @@ #include "usb.h" #include "core.h" #include "common.h" +#include "bcdc.h" #define IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) @@ -482,13 +483,13 @@ static void brcmf_usb_tx_complete(struct urb *urb) req->skb); brcmf_usb_del_fromq(devinfo, req); - brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); + brcmf_proto_bcdc_txcomplete(devinfo->dev, req->skb, urb->status == 0); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); if (devinfo->tx_freecount > devinfo->tx_high_watermark && devinfo->tx_flowblock) { - brcmf_txflowblock(devinfo->dev, false); + brcmf_proto_bcdc_txflowblock(devinfo->dev, false); devinfo->tx_flowblock = false; } spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); @@ -635,7 +636,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); if (devinfo->tx_freecount < devinfo->tx_low_watermark && !devinfo->tx_flowblock) { - brcmf_txflowblock(dev, true); + brcmf_proto_bcdc_txflowblock(dev, true); devinfo->tx_flowblock = true; } spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 7c2a9a9bc372c19502d3bf1d7091ccbd4654b061..ddfdfe177e245cae6396145bc18500ed672f087f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -1082,6 +1082,8 @@ static int ieee_hw_init(struct ieee80211_hw *hw) * hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; */ + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + hw->rate_control_algorithm = "minstrel_ht"; hw->sta_data_size = 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index c2a938b5904460833f77378031a01b1d8cad1b86..0a14942b821671ea9de11f30a4cba34a4fa50537 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -7092,9 +7092,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, rspec = brcms_c_compute_rspec(rxh, plcp); if (is_mcs_rate(rspec)) { rx_status->rate_idx = rspec & RSPEC_RATE_MASK; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; if (rspec_is40mhz(rspec)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; } else { switch (rspec2rate(rspec)) { case BRCM_RATE_1M: @@ -7149,9 +7149,9 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, /* Determine short preamble and rate_idx */ if (is_cck_rate(rspec)) { if (rxh->PhyRxStatus_0 & PRXS0_SHORTH) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } else if (is_ofdm_rate(rspec)) { - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } else { brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n", __func__); @@ -7159,7 +7159,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, } if (plcp3_issgi(plcp[3])) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rxh->RxStatus1 & RXS_DECERR) { rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 5ef3c5cc47c5f75e4aee7c727f4ae08e7225c98f..bbc579b647b61b6aa15b8ed3b7f4455cb8dd404c 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3539,9 +3539,6 @@ static int ipw_load(struct ipw_priv *priv) fw_img = &fw->data[le32_to_cpu(fw->boot_size) + le32_to_cpu(fw->ucode_size)]; - if (rc < 0) - goto error; - if (!priv->rxq) priv->rxq = ipw_rx_queue_alloc(priv); else diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index e8e65115feba4ca892623975a6b55e901df545aa..38bf403bb1e1ad06e45c0587832156276a176c03 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3592,6 +3592,8 @@ il3945_setup_mac(struct il_priv *il) il_leds_init(il); + wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(il->hw); if (ret) { IL_ERR("Failed to register hw (error %d)\n", ret); diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index 03ad9b8b55f4e2e6052a1ae8b9b99bb29d2662f4..b2f35dfbc01bd6a98b92c342b56ccf6f0550ab8a 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -656,7 +656,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, rate_mask = sta->supp_rates[sband->band]; /* get user max rate if set */ - max_rate_idx = txrc->max_rate_idx; + max_rate_idx = fls(txrc->rate_idx_mask) - 1; if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1) max_rate_idx += IL_FIRST_OFDM_RATE; if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 4db327a954142459d3bfa188b94ddecbeae6846d..080ea8155b908499d8efc8cf0a33ad0e2da0493b 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -570,7 +570,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) /* set the preamble flag if appropriate */ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if ((unlikely(rx_stats->phy_count > 20))) { D_DROP("dsp size out of range [0,20]: %d\n", diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 2781f5728d0768169276d380d625695f7513e917..5d5faa3cad247b4a01b2bcf94fe10fedddbce54b 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -728,15 +728,15 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { /* We know which subframes of an A-MPDU belong @@ -5799,6 +5799,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) il_leds_init(il); + wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(il->hw); if (ret) { IL_ERR("Failed to register hw (error %d)\n", ret); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index a867ae7f4095b80c93ca644ebf5e130a4250c143..c055f6da11c6ab4453d427954c52fdbe9a6a37b0 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2211,7 +2211,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, /* Get max rate if user set max rate */ if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; + lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1) lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE; diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 92e611841200d586c65202b336d3cfe5725e06c4..411cb91c102f98a0dd1c45d1792b0fcd2588e5c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -7,6 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o +iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o iwlwifi-objs += iwl-trans.o diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 6c2d6da7eec6d9cb62056c6a7765b1b98e9ead1f..74e52f7c5aa13afd8185ce41273b35beac9ed428 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) goto done; } IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); + iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff); done: ieee80211_wake_queues(priv->hw); mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 2a04d0cd71aefc5b09022daee9e6470e924075a7..444c74371929bfaf3d372e46dde93065d3c7e562 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -213,6 +213,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, iwl_leds_init(priv); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); @@ -1143,7 +1145,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); - iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); + iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues); done: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index ff44ebc5829d664d10f52c1125f0205a958364e1..ddcd8c2d66cde3b10e48c4736241dff7a66a3867 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2720,7 +2720,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, /* Get max rate if user set max rate */ if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; + lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; if ((sband->band == NL80211_BAND_5GHZ) && (lq_sta->max_rate_idx != -1)) lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index dfa2041cfdac06a175dfa319d368e916e602ca7a..1ee1ba9931a7b24af74015693bbf8b1b515ebe53 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -873,7 +873,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* set the preamble flag if appropriate */ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) { /* @@ -887,13 +887,13 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, /* Set up the HT phy flags */ if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_GF_MSK) - rx_status.flag |= RX_FLAG_HT_GF; + rx_status.enc_flags |= RX_ENC_FLAG_HT_GF; iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index a72e58623d3ab013137f7fdf4a469de3d2d28a40..3b3e076571d6d7089ae2df09d2dfbd2008cf81e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 28 -#define IWL3168_UCODE_API_MAX 28 +#define IWL7265D_UCODE_API_MAX 29 +#define IWL3168_UCODE_API_MAX 29 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index b7953bf55f6fea72b45e7a44685ec421cca2cc94..b9718c0cf17480dc4c1ab212fdbfabecb3125775 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 28 -#define IWL8265_UCODE_API_MAX 28 +#define IWL8000_UCODE_API_MAX 30 +#define IWL8265_UCODE_API_MAX 30 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 17 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index a5f0c0bf85ec8dd7f271a8954cd6d0823acbc9b1..110ceefccc15387e0fa068a46fedf69ab6ab9a14 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,10 +55,10 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 28 +#define IWL9000_UCODE_API_MAX 30 /* Lowest firmware API version supported */ -#define IWL9000_UCODE_API_MIN 17 +#define IWL9000_UCODE_API_MIN 30 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d @@ -73,14 +73,14 @@ #define IWL9000_SMEM_LEN 0x68000 #define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" -#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" -#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-" +#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" +#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" #define IWL9000_MODULE_FIRMWARE(api) \ IWL9000_FW_PRE "-" __stringify(api) ".ucode" -#define IWL9260_MODULE_FIRMWARE(api) \ - IWL9260_FW_PRE "-" __stringify(api) ".ucode" -#define IWL9000LC_MODULE_FIRMWARE(api) \ - IWL9000LC_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9260A_MODULE_FIRMWARE(api) \ + IWL9260A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9260B_MODULE_FIRMWARE(api) \ + IWL9260B_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 @@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", - .fw_name_pre = IWL9260_FW_PRE, + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_next_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, .ht_params = &iwl9000_ht_params, .nvm_ver = IWL9000_NVM_VERSION, @@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = { const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9260_FW_PRE, + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_next_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, .ht_params = &iwl9000_ht_params, .nvm_ver = IWL9000_NVM_VERSION, @@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = { const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", - .fw_name_pre = IWL9260_FW_PRE, + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_next_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, .ht_params = &iwl9000_ht_params, .nvm_ver = IWL9000_NVM_VERSION, @@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .integrated = true, }; -/* - * TODO the struct below is for internal testing only this should be - * removed by EO 2016~ - */ -const struct iwl_cfg iwl9000lc_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 9000", - .fw_name_pre = IWL9000LC_FW_PRE, - IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .integrated = true, -}; - MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index 15dd7f6137c8fff1bae35a9b8eb165ab8ce7d3b6..c648cfb981a3154643d3c56d11d3d5e11a9dba82 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 28 +#define IWL_A000_UCODE_API_MAX 30 /* Lowest firmware API version supported */ #define IWL_A000_UCODE_API_MIN 24 @@ -65,15 +65,16 @@ #define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */ /* Memory offsets and lengths */ -#define IWL_A000_DCCM_OFFSET 0x800000 -#define IWL_A000_DCCM_LEN 0x18000 +#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */ #define IWL_A000_DCCM2_OFFSET 0x880000 #define IWL_A000_DCCM2_LEN 0x8000 #define IWL_A000_SMEM_OFFSET 0x400000 -#define IWL_A000_SMEM_LEN 0x68000 +#define IWL_A000_SMEM_LEN 0xD0000 -#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" +#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" +#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" +#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" @@ -84,7 +85,7 @@ static const struct iwl_base_params iwl_a000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000, - .num_of_queues = 31, + .num_of_queues = 512, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = { .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ .use_tfh = true, \ - .rf_id = true + .rf_id = true, \ + .gen2 = true const struct iwl_cfg iwla000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC a000", @@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = { + .name = "Intel(R) Dual Band Wireless AC a000", + .fw_name_pre = IWL_A000_HR_CDB_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .cdb = true, +}; + const struct iwl_cfg iwla000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC a000", .fw_name_pre = IWL_A000_JF_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 94f8a51b633eb25e7c5731944c5e5f00b5673743..a12197e3ce7837fe8aeadc71719d8837e93bb9f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2016 Intel Deutschland GmbH + * Copyright (C) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2016 Intel Deutschland GmbH + * Copyright (C) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,16 +90,6 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_8000, }; -static inline bool iwl_has_secure_boot(u32 hw_rev, - enum iwl_device_family family) -{ - /* return 1 only for family 8000 B0 */ - if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC)) - return true; - - return false; -} - /* * LED mode * IWL_LED_DEFAULT: use device default @@ -180,7 +170,7 @@ struct iwl_base_params { apmg_wake_up_wa:1, scd_chain_ext_wa:1; - u8 num_of_queues; /* def: HW dependent */ + u16 num_of_queues; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; @@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff { * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre.ucode. + * @fw_name_pre_next_step: same as @fw_name_pre, only for next step + * (if supported) * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section @@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff { * @vht_mu_mimo_supported: VHT MU-MIMO support * @rf_id: need to read rf_id to determine the firmware image * @integrated: discrete or integrated + * @gen2: a000 and on transport operation + * @cdb: CDB support * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -330,6 +324,7 @@ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; + const char *fw_name_pre_next_step; /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ @@ -365,7 +360,9 @@ struct iwl_cfg { vht_mu_mimo_supported:1, rf_id:1, integrated:1, - use_tfh:1; + use_tfh:1, + gen2:1, + cdb:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; -extern const struct iwl_cfg iwl9000lc_2ac_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwla000_2ac_cfg_hr; +extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h new file mode 100644 index 0000000000000000000000000000000000000000..b870c09867449ed899f9886d5d88f13a06386e09 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -0,0 +1,203 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_context_info_file_h__ +#define __iwl_context_info_file_h__ + +/* maximmum number of DRAM map entries supported by FW */ +#define IWL_MAX_DRAM_ENTRY 64 +#define CSR_CTXT_INFO_BA 0x40 + +/** + * enum iwl_context_info_flags - Context information control flags + * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting + * the init done for driver command that configures several system modes + * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug + * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump + * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size + * exponent, the actual size is 2**value, valid sizes are 8-2048. + * The value is four bits long. Maximum valid exponent is 12 + * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the + * default is short format - not supported by the driver) + */ +enum iwl_context_info_flags { + IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0), + IWL_CTXT_INFO_EARLY_DEBUG = BIT(1), + IWL_CTXT_INFO_ENABLE_CDMP = BIT(2), + IWL_CTXT_INFO_RB_SIZE_4K = BIT(3), + IWL_CTXT_INFO_RB_CB_SIZE_POS = 4, + IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8), +}; + +/* + * struct iwl_context_info_version - version structure + * @mac_id: SKU and revision id + * @version: context information version id + * @size: the size of the context information in DWs + */ +struct iwl_context_info_version { + __le16 mac_id; + __le16 version; + __le16 size; + __le16 reserved; +} __packed; + +/* + * struct iwl_context_info_control - version structure + * @control_flags: context information flags see &enum iwl_context_info_flags + */ +struct iwl_context_info_control { + __le32 control_flags; + __le32 reserved; +} __packed; + +/* + * struct iwl_context_info_dram - images DRAM map + * each entry in the map represents a DRAM chunk of up to 32 KB + * @umac_img: UMAC image DRAM map + * @lmac_img: LMAC image DRAM map + * @virtual_img: paged image DRAM map + */ +struct iwl_context_info_dram { + __le64 umac_img[IWL_MAX_DRAM_ENTRY]; + __le64 lmac_img[IWL_MAX_DRAM_ENTRY]; + __le64 virtual_img[IWL_MAX_DRAM_ENTRY]; +} __packed; + +/* + * struct iwl_context_info_rbd_cfg - RBDs configuration + * @free_rbd_addr: default queue free RB CB base address + * @used_rbd_addr: default queue used RB CB base address + * @status_wr_ptr: default queue used RB status write pointer + */ +struct iwl_context_info_rbd_cfg { + __le64 free_rbd_addr; + __le64 used_rbd_addr; + __le64 status_wr_ptr; +} __packed; + +/* + * struct iwl_context_info_hcmd_cfg - command queue configuration + * @cmd_queue_addr: address of command queue + * @cmd_queue_size: number of entries + */ +struct iwl_context_info_hcmd_cfg { + __le64 cmd_queue_addr; + u8 cmd_queue_size; + u8 reserved[7]; +} __packed; + +/* + * struct iwl_context_info_dump_cfg - Core Dump configuration + * @core_dump_addr: core dump (debug DRAM address) start address + * @core_dump_size: size, in DWs + */ +struct iwl_context_info_dump_cfg { + __le64 core_dump_addr; + __le32 core_dump_size; + __le32 reserved; +} __packed; + +/* + * struct iwl_context_info_pnvm_cfg - platform NVM data configuration + * @platform_nvm_addr: Platform NVM data start address + * @platform_nvm_size: size in DWs + */ +struct iwl_context_info_pnvm_cfg { + __le64 platform_nvm_addr; + __le32 platform_nvm_size; + __le32 reserved; +} __packed; + +/* + * struct iwl_context_info_early_dbg_cfg - early debug configuration for + * dumping DRAM addresses + * @early_debug_addr: early debug start address + * @early_debug_size: size in DWs + */ +struct iwl_context_info_early_dbg_cfg { + __le64 early_debug_addr; + __le32 early_debug_size; + __le32 reserved; +} __packed; + +/* + * struct iwl_context_info - device INIT configuration + * @version: version information of context info and HW + * @control: control flags of FH configurations + * @rbd_cfg: default RX queue configuration + * @hcmd_cfg: command queue configuration + * @dump_cfg: core dump data + * @edbg_cfg: early debug configuration + * @pnvm_cfg: platform nvm configuration + * @dram: firmware image addresses in DRAM + */ +struct iwl_context_info { + struct iwl_context_info_version version; + struct iwl_context_info_control control; + __le64 reserved0; + struct iwl_context_info_rbd_cfg rbd_cfg; + struct iwl_context_info_hcmd_cfg hcmd_cfg; + __le32 reserved1[4]; + struct iwl_context_info_dump_cfg dump_cfg; + struct iwl_context_info_early_dbg_cfg edbg_cfg; + struct iwl_context_info_pnvm_cfg pnvm_cfg; + __le32 reserved2[16]; + struct iwl_context_info_dram dram; + __le32 reserved3[16]; +} __packed; + +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); +void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); +void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); + +#endif /* __iwl_context_info_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 4ee3b621ec27ab58c899e91c6552f15fc0d5a57a..fa120fb553737b8b5bdd96100ffb69a3679b86c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -348,7 +348,6 @@ enum { /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105000) -#define CSR_HW_RF_ID_TYPE_LC (0x00101000) #define CSR_HW_RF_ID_TYPE_HR (0x00109000) /* EEPROM REG */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index be466a074c1df8ad351c2b1ce9142a13955fae47..5cfacb0bca84df8fa603aafa44ad281af5e04fd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -7,7 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, static int iwl_request_firmware(struct iwl_drv *drv, bool first) { - const char *name_pre = drv->trans->cfg->fw_name_pre; + const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; + const char *fw_pre_name; + + if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP) + fw_pre_name = cfg->fw_name_pre_next_step; + else + fw_pre_name = cfg->fw_name_pre; if (first) { - drv->fw_index = drv->trans->cfg->ucode_api_max; + drv->fw_index = cfg->ucode_api_max; sprintf(tag, "%d", drv->fw_index); } else { drv->fw_index--; sprintf(tag, "%d", drv->fw_index); } - if (drv->fw_index < drv->trans->cfg->ucode_api_min) { + if (drv->fw_index < cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); + + if (cfg->ucode_api_min == cfg->ucode_api_max) { + IWL_ERR(drv, "%s%d is required\n", fw_pre_name, + cfg->ucode_api_max); + } else { + IWL_ERR(drv, "minimum version required: %s%d\n", + fw_pre_name, + cfg->ucode_api_min); + IWL_ERR(drv, "maximum version supported: %s%d\n", + fw_pre_name, + cfg->ucode_api_max); + } + + IWL_ERR(drv, + "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n"); return -ENOENT; } snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - name_pre, tag); + fw_pre_name, tag); IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); @@ -1260,7 +1282,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) - return; + goto out_free_fw; if (!ucode_raw) goto try_again; @@ -1472,7 +1494,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * or hangs loading. */ if (load_module) { - err = request_module("%s", op->name); + request_module("%s", op->name); #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, @@ -1490,17 +1512,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto free; out_free_fw: - IWL_ERR(drv, "failed to allocate pci memory\n"); iwl_dealloc_ucode(drv); release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: - for (i = 0; i < ARRAY_SIZE(pieces->img); i++) - kfree(pieces->img[i].sec); - kfree(pieces->dbg_mem_tlv); - kfree(pieces); + if (pieces) { + for (i = 0; i < ARRAY_SIZE(pieces->img); i++) + kfree(pieces->img[i].sec); + kfree(pieces->dbg_mem_tlv); + kfree(pieces); + } } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 33ef5372d1951b2b58618a5acb9c4b89657252eb..62f9fe926d781ee3dc87fff75b4fd270e5ab97f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \ IWL_MAX_RX_HW_QUEUES * \ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) +/* cb size is the exponent */ +#define RX_QUEUE_CB_SIZE(x) ilog2(x) #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 @@ -639,6 +641,8 @@ struct iwl_rb_status { #define TFD_QUEUE_SIZE_MAX (256) +/* cb size is the exponent - 3 */ +#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) @@ -647,7 +651,7 @@ struct iwl_rb_status { static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) { - return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; + return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; } /** * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index d01701ee477702272d4597a33d7c5710510f4fda..44419e82da1bf15d332d15934aba21dee3736989 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -241,6 +241,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * iteration complete notification, and the timestamp reported for RX * received during scan, are reported in TSF of the mac specified in the * scan request. + * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of + * ADD_MODIFY_STA_KEY_API_S_VER_2. + * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -250,6 +253,8 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, + IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -344,6 +349,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index a9f69fdd170b0fd0fb039ffb949a657c7dcefefd..9c8b09cf1f7bb89d9895b22bfca58aff95b2334d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32); void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val) { trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val); - iwl_trans_write32(trans, ofs, val & 0xffffffff); - iwl_trans_write32(trans, ofs + 4, val >> 32); + iwl_trans_write32(trans, ofs, lower_32_bits(val)); + iwl_trans_write32(trans, ofs + 4, upper_32_bits(val)); } IWL_EXPORT_SYMBOL(iwl_write64); @@ -246,6 +246,9 @@ void iwl_force_nmi(struct iwl_trans *trans) DEVICE_SET_NMI_VAL_DRV); iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_HW); + } else if (trans->cfg->gen2) { + iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, + DEVICE_SET_NMI_8000_VAL); } else { iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, DEVICE_SET_NMI_8000_VAL); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c index 88f260db3744006244509a71b938d0d2d26b4082..68412ff2112e34ec4cba0bc00a5814baec966798 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c @@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) } IWL_EXPORT_SYMBOL(iwl_notification_wait_init); -void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt) +bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt) { bool triggered = false; @@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, } } spin_unlock(¬if_wait->notif_wait_lock); - } - if (triggered) - wake_up_all(¬if_wait->notif_waitq); + return triggered; } -IWL_EXPORT_SYMBOL(iwl_notification_wait_notify); +IWL_EXPORT_SYMBOL(iwl_notification_wait); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h index 0f9995ed71cdef320a518cea5c2178dce4ec635a..368884be4e7c9641f80e402bb94b9e4a92d114fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,10 +90,10 @@ struct iwl_notif_wait_data { * * This structure is not used directly, to wait for a * notification declare it on the stack, and call - * iwlagn_init_notification_wait() with appropriate + * iwl_init_notification_wait() with appropriate * parameters. Then do whatever will cause the ucode * to notify the driver, and to wait for that then - * call iwlagn_wait_notification(). + * call iwl_wait_notification(). * * Each notification is one-shot. If at some point we * need to support multi-shot notifications (which @@ -114,10 +115,24 @@ struct iwl_notification_wait { /* caller functions */ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); -void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, - struct iwl_rx_packet *pkt); +bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); +static inline void +iwl_notification_notify(struct iwl_notif_wait_data *notif_data) +{ + wake_up_all(¬if_data->notif_waitq); +} + +static inline void +iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt) +{ + if (iwl_notification_wait(notif_data, pkt)) + iwl_notification_notify(notif_data); +} + /* user functions */ void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 3bd6fc1b76d4933aa11581d73302a25f55fad460..721ae6bef5daf8ac2e8ba5efb2fccb204dbc10ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -438,25 +439,16 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } -static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *ch_section, - u8 tx_chains, u8 rx_chains, bool lar_supported) +void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, + u8 tx_chains, u8 rx_chains, bool lar_supported) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS], lar_supported); - else - n_channels = iwl_init_channel_map( - dev, cfg, data, - &ch_section[NVM_CHANNELS_FAMILY_8000], - lar_supported); - + n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, + lar_supported); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -482,6 +474,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } +IWL_EXPORT_SYMBOL(iwl_init_sbands); static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) @@ -559,8 +552,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) dest[5] = hw_addr[0]; } -static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data) +void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); @@ -578,6 +571,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } +IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr); static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, @@ -718,7 +712,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; - ch_section = nvm_sw; + ch_section = &nvm_sw[NVM_CHANNELS]; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_FAMILY_8000_OLD : @@ -728,7 +722,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED_FAMILY_8000); lar_enabled = data->lar_enabled; - ch_section = regulatory; + ch_section = ®ulatory[NVM_CHANNELS_FAMILY_8000]; } /* If no valid mac address was found - bail out */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 7249e5b403f4e832689ebf2f684e3de4ff6759c0..3fd6506a02ab816a6ca85213ad37b51a0f6b9e96 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -81,6 +82,19 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); +/** + * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on + */ +void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data); + +/** + * iwl_init_sbands - parse and set all channel profiles + */ +void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, + u8 tx_chains, u8 rx_chains, bool lar_supported); + /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 406ef301b8ab8aedd8a1e8e93e8800d53872a233..306bc967742ee9a7b5629bab00f20cab77309fa1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -114,6 +114,7 @@ #define DEVICE_SET_NMI_VAL_DRV BIT(7) #define DEVICE_SET_NMI_8000_REG 0x00a01c24 #define DEVICE_SET_NMI_8000_VAL 0x1000000 +#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 @@ -294,9 +295,6 @@ /*********************** END TX SCHEDULER *************************************/ -/* tcp checksum offload */ -#define RX_EN_CSUM (0x00a00d88) - /* Oscillator clock */ #define OSC_CLK (0xa04068) #define OSC_CLK_FORCE_CONTROL (0x8) @@ -309,6 +307,7 @@ * Note this address is cleared after MAC reset. */ #define UREG_UCODE_LOAD_STATUS (0xa05c40) +#define UREG_CPU_INIT_RUN (0xa05c44) #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) @@ -316,6 +315,8 @@ #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) +#define LMAC2_PRPH_OFFSET (0x100000) + /* Rx FIFO */ #define RXF_SIZE_ADDR (0xa00c88) #define RXF_RD_D_SPACE (0xa00c40) @@ -378,6 +379,7 @@ #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 +#define WFPM_GP2 0xA030B4 enum { ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, @@ -398,6 +400,8 @@ enum aux_misc_master1_en { #define PREG_AUX_BUS_WPROT_0 0xA04CC0 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 +#define UMAG_SB_CPU_1_STATUS 0xA038C0 +#define UMAG_SB_CPU_2_STATUS 0xA038C4 /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index d42cab2910257d816c2281fdeb1aae1ae3dda0fc..0bde26bab15df35c2d43cd38252975cc59ebb675 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -70,8 +70,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_cfg *cfg, - const struct iwl_trans_ops *ops, - size_t dev_cmd_headroom) + const struct iwl_trans_ops *ops) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, trans->dev = dev; trans->cfg = cfg; trans->ops = ops; - trans->dev_cmd_headroom = dev_cmd_headroom; trans->num_rx_queues = 1; snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd) - + trans->dev_cmd_headroom, + sizeof(struct iwl_device_cmd), sizeof(void *), SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0296124a7f9cf013c01f46583d11d5c110185707..0ebfdbb22992c10a7375dea3cc4a9aa1783fab16 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -7,7 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -396,7 +396,10 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) * currently supports */ #define IWL_MAX_HW_QUEUES 32 +#define IWL_MAX_TVQM_QUEUES 512 + #define IWL_MAX_TID_COUNT 8 +#define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 @@ -530,6 +533,44 @@ struct iwl_trans_txq_scd_cfg { int frame_limit; }; +/* Available options for &struct iwl_tx_queue_cfg_cmd */ +enum iwl_tx_queue_cfg_actions { + TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), + TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), +}; + +/** + * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command + * @sta_id: station id + * @tid: tid of the queue + * @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format + * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. + * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) + * @byte_cnt_addr: address of byte count table + * @tfdq_addr: address of TFD circular buffer + */ +struct iwl_tx_queue_cfg_cmd { + u8 sta_id; + u8 tid; + __le16 flags; + __le32 cb_size; + __le64 byte_cnt_addr; + __le64 tfdq_addr; +} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config + * @queue_number: queue number assigned to this RA -TID + * @flags: set on failure + * @write_pointer: initial value for write pointer + */ +struct iwl_tx_queue_cfg_rsp { + __le16 queue_number; + __le16 flags; + __le16 write_pointer; + __le16 reserved; +} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ + /** * struct iwl_trans_ops - transport specific operations * @@ -640,13 +681,17 @@ struct iwl_trans_ops { unsigned int queue_wdg_timeout); void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); + /* a000 functions */ + int (*txq_alloc)(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int queue_wdg_timeout); + void (*txq_free)(struct iwl_trans *trans, int queue); void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); - dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id); - - int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); + int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); @@ -774,9 +819,6 @@ enum iwl_plat_pm_mode { * the transport must set this before calling iwl_drv_start() * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. - * @dev_cmd_headroom: room needed for the transport's private use before the - * device_cmd for Tx - for internal use only - * The user should use iwl_trans_{alloc,free}_tx_cmd. * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the @@ -827,7 +869,6 @@ struct iwl_trans { /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; - size_t dev_cmd_headroom; char dev_cmd_pool_name[50]; struct dentry *dbgfs_dir; @@ -1000,13 +1041,13 @@ iwl_trans_dump_data(struct iwl_trans *trans, static inline struct iwl_device_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { - u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + struct iwl_device_cmd *dev_cmd_ptr = + kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); if (unlikely(dev_cmd_ptr == NULL)) return NULL; - return (struct iwl_device_cmd *) - (dev_cmd_ptr + trans->dev_cmd_headroom); + return dev_cmd_ptr; } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -1014,9 +1055,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, struct iwl_device_cmd *dev_cmd) { - u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom; - - kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr); + kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, @@ -1065,20 +1104,39 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } -static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, - int queue, bool shared_mode) +static inline void +iwl_trans_txq_free(struct iwl_trans *trans, int queue) { - if (trans->ops->txq_set_shared_mode) - trans->ops->txq_set_shared_mode(trans, queue, shared_mode); + if (WARN_ON_ONCE(!trans->ops->txq_free)) + return; + + trans->ops->txq_free(trans, queue); } -static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans, - int queue) +static inline int +iwl_trans_txq_alloc(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int queue_wdg_timeout) { - /* we should never be called if the trans doesn't support it */ - BUG_ON(!trans->ops->get_txq_byte_table); + might_sleep(); + + if (WARN_ON_ONCE(!trans->ops->txq_alloc)) + return -ENOTSUPP; + + if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + return -EIO; + } - return trans->ops->get_txq_byte_table(trans, queue); + return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout); +} + +static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int queue, bool shared_mode) +{ + if (trans->ops->txq_set_shared_mode) + trans->ops->txq_set_shared_mode(trans, queue, shared_mode); } static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, @@ -1137,15 +1195,15 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, trans->ops->block_txq_ptrs(trans, block); } -static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, - u32 txqs) +static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, + u32 txqs) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } - return trans->ops->wait_tx_queue_empty(trans, txqs); + return trans->ops->wait_tx_queues_empty(trans, txqs); } static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1248,8 +1306,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_cfg *cfg, - const struct iwl_trans_ops *ops, - size_t dev_cmd_headroom); + const struct iwl_trans_ops *ops); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 7cb68f6ed1b0d49f58c152d796cd750d13833ff8..75d35f6b041ef50ed81a020a5d007b9f8746da71 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -82,9 +84,22 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; int i, ret; u32 status; + int size; memset(&cmd, 0, sizeof(cmd)); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { + size = sizeof(cmd); + if (phyctxt->channel->band == NL80211_BAND_2GHZ || + !iwl_mvm_is_cdb_supported(mvm)) + cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); + else + cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); + } else { + size = IWL_BINDING_CMD_SIZE_V1; + } + cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); cmd.action = cpu_to_le32(action); @@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(cmd), &cmd, &status); + size, &cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 5bdb6c2c8390b65149c07f61f986cc8f577a43ff..49b4418e6c35dac968a57fc8a9232af4db3af090 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) + if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) return; /* No BT - reports should be disabled */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c7eb1983c4f9a191fc20d3cec151f069e7704b5d..119a3bd92c50f72c7c47756928734b99b7397f90 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_binding_cmd binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; u32 status; + int size; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { + size = sizeof(binding_cmd); + if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ || + !iwl_mvm_is_cdb_supported(mvm)) + binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); + else + binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); + } else { + size = IWL_BINDING_CMD_SIZE_V1; + } /* add back the PHY */ if (WARN_ON(!mvmvif->phy_ctxt)) @@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, - sizeof(binding_cmd), &binding_cmd, - &status); + size, &binding_cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; @@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, goto out; } - if (key_data.use_tkip) { + if (key_data.use_tkip && + !fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, cmd_flags, sizeof(tkip_cmd), @@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { + if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; @@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) */ iwl_mvm_update_changed_regdom(mvm); + if (!unified_image) + /* Re-configure default SAR profile */ + iwl_mvm_sar_select_profile(mvm, 1, 1); + if (mvm->net_detect) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f4d75ffe3d8a8cfae24d1cea61bf918622564ed4..5d475b4850ae331002cf35c1d42a5d61dc5e7c84 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && - ap_sta_id != IWL_MVM_STATION_COUNT) { + ap_sta_id != IWL_MVM_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 077bfd8f4c0cd85505a3ef7de375bfbaf4cddef5..402846650cbec93a3a871f64381cde6b7021220d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index 480a54af453477ac7194766d9a8ada9b3f10c923..970b030ed28df8bb68d245c858ba76fe6bd87ee0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -73,7 +73,9 @@ #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) -#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_INVALID_STA 0xFF + #define IWL_MVM_TDLS_STA_COUNT 4 enum iwl_ac { @@ -155,7 +157,8 @@ enum iwl_tsf_id { * @bi_reciprocal: 2^32 / bi * @dtim_interval: dtim transmit time in TU * @dtim_reciprocal: 2^32 / dtim_interval - * @mcast_qid: queue ID for multicast traffic + * @mcast_qid: queue ID for multicast traffic. + * NOTE: obsolete from VER2 and on * @beacon_template: beacon template ID */ struct iwl_mac_data_ap { @@ -167,7 +170,7 @@ struct iwl_mac_data_ap { __le32 dtim_reciprocal; __le32 mcast_qid; __le32 beacon_template; -} __packed; /* AP_MAC_DATA_API_S_VER_1 */ +} __packed; /* AP_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_ibss - configuration data for IBSS MAC context diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h index 3fa43d1348a2231ed3b5ac583a69bffcf1241582..750510aff70b0e8195015c282224d2700a5c496d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd { u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ +#define IWL_NUM_GEO_PROFILES 3 + +/** + * enum iwl_geo_per_chain_offset_operation - type of operation + * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. + * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. + */ +enum iwl_geo_per_chain_offset_operation { + IWL_PER_CHAIN_OFFSET_SET_TABLES, + IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, +}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ + +/** + * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. + * @max_tx_power: maximum allowed tx power. + * @chain_a: tx power offset for chain a. + * @chain_b: tx power offset for chain b. + */ +struct iwl_per_chain_offset { + __le16 max_tx_power; + u8 chain_a; + u8 chain_b; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ + +struct iwl_per_chain_offset_group { + struct iwl_per_chain_offset lb; + struct iwl_per_chain_offset hb; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ + +/** + * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + */ +struct iwl_geo_tx_power_profiles_cmd { + __le32 ops; + struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; +} __packed; /* GEO_TX_POWER_LIMIT */ + /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h index ad9cc03e16c43159da41b4af9c9db132979d3dac..1b7d265ffb0acb476228b5b2a10040c09e41d61c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -227,6 +229,9 @@ enum { */ #define RATE_LEGACY_RATE_MSK 0xff +/* Bit 10 - OFDM HE */ +#define RATE_MCS_OFDM_HE_POS 10 +#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz @@ -255,18 +260,29 @@ enum { #define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK #define RATE_MCS_ANT_NUM 3 -/* Bit 17-18: (0) SS, (1) SS*2 */ +/* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 -#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS) -#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS) +#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) + +/* Bit 18: OFDM-HE dual carrier mode */ +#define RATE_HE_DUAL_CARRIER_MODE 18 +#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ #define RATE_MCS_BF_POS 19 #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) -/* Bit 20: (0) ZLF is off, (1) ZLF is on */ -#define RATE_MCS_ZLF_POS 20 -#define RATE_MCS_ZLF_MSK (1 << RATE_MCS_ZLF_POS) +/* + * Bit 20-21: HE guard interval and LTF type. + * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, + * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us + */ +#define RATE_MCS_HE_GI_LTF_POS 20 +#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) + +/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ +#define RATE_MCS_HE_TYPE_POS 22 +#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ #define RATE_MCS_DUP_POS 24 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index c78a0c49945981de464cda0d1d863a95a0173178..3178eb96e395098515784888259fb5c1f670ed79 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -516,7 +516,7 @@ struct iwl_scan_dwell { * scan_config_channel_flag * @channel_array: default supported channels */ -struct iwl_scan_config { +struct iwl_scan_config_v1 { __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -532,7 +532,7 @@ struct iwl_scan_config { #define SCAN_TWO_LMACS 2 -struct iwl_scan_config_cdb { +struct iwl_scan_config { __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -669,7 +669,7 @@ struct iwl_scan_req_umac { u8 n_channels; __le16 reserved; u8 data[]; - } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; @@ -679,13 +679,13 @@ struct iwl_scan_req_umac { u8 n_channels; __le16 reserved; u8 data[]; - } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */ + } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac) -#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) +#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(__le32)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 3b5150e9975d6e49e3e191b67020d5656d0cd0b4..421b9dd1fb66c557e6e552059a3fd5c62a0503e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -179,7 +179,7 @@ enum iwl_sta_key_flag { * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx - * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs + * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count @@ -214,20 +214,6 @@ enum iwl_sta_sleep_flag { STA_SLEEP_STATE_MOREDATA = BIT(2), }; -/* STA ID and color bits definitions */ -#define STA_ID_SEED (0x0f) -#define STA_ID_POS (0) -#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) - -#define STA_COLOR_SEED (0x7) -#define STA_COLOR_POS (4) -#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) - -#define STA_ID_N_COLOR_GET_COLOR(id_n_color) \ - (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) -#define STA_ID_N_COLOR_GET_ID(id_n_color) \ - (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) - #define STA_KEY_MAX_NUM (16) #define STA_KEY_IDX_INVALID (0xff) #define STA_KEY_MAX_DATA_KEY_NUM (4) @@ -323,6 +309,24 @@ struct iwl_mvm_add_sta_cmd_v7 { __le32 tfd_queue_msk; } __packed; /* ADD_STA_CMD_API_S_VER_7 */ +/** + * enum iwl_sta_type - FW station types + * ( REPLY_ADD_STA = 0x18 ) + * @IWL_STA_LINK: Link station - normal RX and TX traffic. + * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons + * and probe responses. + * @IWL_STA_MULTICAST: multicast traffic, + * @IWL_STA_TDLS_LINK: TDLS link station + * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). + */ +enum iwl_sta_type { + IWL_STA_LINK, + IWL_STA_GENERAL_PURPOSE, + IWL_STA_MULTICAST, + IWL_STA_TDLS_LINK, + IWL_STA_AUX_ACTIVITY, +}; + /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) @@ -347,14 +351,17 @@ struct iwl_mvm_add_sta_cmd_v7 { * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. + * @station_type: type of this station. See &enum iwl_sta_type. * @sleep_state_flags: Look at %iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls - * @tfd_queue_msk: tfd queues used by this station + * @tfd_queue_msk: tfd queues used by this station. + * Obselete for new TX API (9 and above). * @rx_ba_window: aggregation window size - * @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means - * that the queues used by this station are in the first 32. + * @sp_length: the size of the SP as it appears in the WME IE + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx @@ -379,37 +386,60 @@ struct iwl_mvm_add_sta_cmd { u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; - __le16 sleep_state_flags; + u8 sleep_state_flags; + u8 station_type; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; __le16 rx_ba_window; - u8 scd_queue_bank; - u8 uapsd_trigger_acs; -} __packed; /* ADD_STA_CMD_API_S_VER_8 */ + u8 sp_length; + u8 uapsd_acs; +} __packed; /* ADD_STA_CMD_API_S_VER_10 */ /** - * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * struct iwl_mvm_add_sta_key_common - add/modify sta key common part * ( REPLY_ADD_STA_KEY = 0x17 ) * @sta_id: index of station in uCode's station table * @key_offset: key offset in key storage * @key_flags: type %iwl_sta_key_flag * @key: key material data * @rx_secur_seq_cnt: RX security sequence counter for the key - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx */ -struct iwl_mvm_add_sta_key_cmd { +struct iwl_mvm_add_sta_key_common { u8 sta_id; u8 key_offset; __le16 key_flags; u8 key[32]; u8 rx_secur_seq_cnt[16]; +} __packed; + +/** + * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + */ +struct iwl_mvm_add_sta_key_cmd_v1 { + struct iwl_mvm_add_sta_key_common common; u8 tkip_rx_tsc_byte2; u8 reserved; __le16 tkip_rx_ttak[5]; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ +/** + * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @rx_mic_key: TKIP RX unicast or multicast key + * @tx_mic_key: TKIP TX key + * @transmit_seq_cnt: TSC, transmit packet number + */ +struct iwl_mvm_add_sta_key_cmd { + struct iwl_mvm_add_sta_key_common common; + __le64 rx_mic_key; + __le64 tx_mic_key; + __le64 transmit_seq_cnt; +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ + /** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command * @ADD_STA_SUCCESS: operation was executed successfully diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index b38cc073adcc7b0eb876bb4773dc2ad22762a8f9..81b98915b1a42e21a318d293c459015688489c7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -123,6 +124,20 @@ enum iwl_tx_flags { TX_CMD_FLG_HCCA_CHUNK = BIT(31) }; /* TX_FLAGS_BITS_API_S_VER_1 */ +/** + * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000 + * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command + * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs + * to a secured STA + * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate + * selection, retry limits and BT kill + */ +enum iwl_tx_cmd_flags { + IWL_TX_FLAGS_CMD_RATE = BIT(0), + IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), + IWL_TX_FLAGS_HIGH_PRI = BIT(2), +}; /* TX_FLAGS_BITS_API_S_VER_3 */ + /** * enum iwl_tx_pm_timeouts - pm timeout values in TX command * @PM_FRAME_NONE: no need to suspend sleep mode @@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl { TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_KEY128 = 0x08, - TX_CMD_SEC_KEY_FROM_TABLE = 0x08, + TX_CMD_SEC_KEY_FROM_TABLE = 0x10, }; /* TODO: how does these values are OK with only 16 bit variable??? */ @@ -301,6 +316,31 @@ struct iwl_tx_cmd { struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_6 */ +struct iwl_dram_sec_info { + __le32 pn_low; + __le16 pn_high; + __le16 aux_info; +} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ + +/** + * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration + * @tx_flags: combination of &iwl_tx_cmd_flags + * @dram_info: FW internal DRAM storage + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + */ +struct iwl_tx_cmd_gen2 { + __le16 len; + __le16 offload_assist; + __le32 flags; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_7 */ + /* * TX response related data */ @@ -508,9 +548,11 @@ struct agg_tx_status { * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control + * @tx_queue: TX queue for this response * @status: for non-agg: frame status TX_STATUS_* * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields * follow this one, up to frame_count. + * For version 6 TX response isn't received for aggregation at all. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. @@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp { u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; - - struct agg_tx_status status; -} __packed; /* TX_RSP_API_S_VER_3 */ + union { + struct { + struct agg_tx_status status; + } v3;/* TX_RSP_API_S_VER_3 */ + struct { + __le16 tx_queue; + __le16 reserved2; + struct agg_tx_status status; + } v6; + }; +} __packed; /* TX_RSP_API_S_VER_6 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA @@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif { * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue * @q_num: TFD queue number * @tfd_index: Index of first un-acked frame in the TFD queue + * @scd_queue: For debug only - the physical queue the TFD queue is bound to */ struct iwl_mvm_compressed_ba_tfd { - u8 q_num; - u8 reserved; + __le16 q_num; __le16 tfd_index; + u8 scd_queue; + u8 reserved; + __le16 reserved2; } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ /** @@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags { * @tx_rate: the rate the aggregation was sent at * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements + * @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd + * for details. + * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See + * &iwl_mvm_compressed_ba_ratid for more details. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; @@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif { __le16 query_frame_cnt; __le16 txed; __le16 done; + __le16 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; @@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd { __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ -/** - * iwl_mvm_get_scd_ssn - returns the SSN of the SCD - * @tx_resp: the Tx response from the fw (agg or non-agg) - * - * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since - * it can't know that everything will go well until the end of the AMPDU, it - * can't know in advance the number of MPDUs that will be sent in the current - * batch. This is why it writes the agg Tx response while it fetches the MPDUs. - * Hence, it can't know in advance what the SSN of the SCD will be at the end - * of the batch. This is why the SSN of the SCD is written at the end of the - * whole struct at a variable offset. This function knows how to cope with the - * variable offset and returns the SSN of the SCD. - */ -static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) -{ - return le32_to_cpup((__le32 *)&tx_resp->status + - tx_resp->frame_count) & 0xfff; -} - /* Available options for the SCD_QUEUE_CFG HCMD */ enum iwl_scd_cfg_actions { SCD_CFG_DISABLE_QUEUE = 0x0, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index cf2b836f38881364c440be63cc7feb76ae738bdc..f545c5f9e4e38e8c1d565885b4b60222589b6230 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CTDP_CONFIG_CMD = 0x03, TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + GEO_TX_POWER_LIMIT = 0x05, CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; enum iwl_system_subcmd_ids { SHARED_MEM_CFG_CMD = 0x0, + INIT_EXTENDED_CFG_CMD = 0x03, }; enum iwl_data_path_subcmd_ids { @@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids { NVM_ACCESS_COMPLETE = 0x0, }; -enum iwl_fmac_debug_cmds { +enum iwl_debug_cmds { LMAC_RD_WR = 0x0, UMAC_RD_WR = 0x1, + MFU_ASSERT_DUMP_NTF = 0xFE, }; /* command groups */ @@ -673,10 +676,8 @@ struct iwl_error_resp { /* Common PHY, MAC and Bindings definitions */ - #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) -#define AUX_BINDING_INDEX (3) /* Used to extract ID and color from the context dword */ #define FW_CTXT_ID_POS (0) @@ -689,7 +690,7 @@ struct iwl_error_resp { (_color << FW_CTXT_COLOR_POS)) /* Possible actions on PHYs, MACs and Bindings */ -enum { +enum iwl_phy_ctxt_action { FW_CTXT_ACTION_STUB = 0, FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_MODIFY, @@ -960,6 +961,7 @@ struct iwl_time_event_notif { * @action: action to perform, one of FW_CTXT_ACTION_* * @macs: array of MAC id and colors which belong to the binding * @phy: PHY id and color which belongs to the binding + * @lmac_id: the lmac id the binding belongs to */ struct iwl_binding_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ @@ -968,7 +970,13 @@ struct iwl_binding_cmd { /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ + /* BINDING_CMD_API_S_VER_1 */ + __le32 lmac_id; +} __packed; /* BINDING_CMD_API_S_VER_2 */ + +#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id) +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 @@ -990,6 +998,9 @@ struct iwl_time_quota_data { * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding + * Note: on non-CDB the fourth one is the auxilary mac and is + * essentially zero. + * On CDB the fourth one is a regular binding. */ struct iwl_time_quota_cmd { struct iwl_time_quota_data quotas[MAX_BINDINGS]; @@ -1230,6 +1241,25 @@ struct iwl_mfuart_load_notif { __le32 image_size; } __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ +/** + * struct iwl_mfu_assert_dump_notif - mfuart dump logs + * ( MFU_ASSERT_DUMP_NTF = 0xfe ) + * @assert_id: mfuart assert id that cause the notif + * @curr_reset_num: number of asserts since uptime + * @index_num: current chunk id + * @parts_num: total number of chunks + * @data_size: number of data bytes sent + * @data: data buffer + */ +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/ + /** * struct iwl_set_calib_default_cmd - set default value for calibration. * ( SET_CALIB_DEFAULT_CMD = 0x8e ) @@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 { __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * Shared memory configuration information from the FW + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; __le32 page_buff_addr; __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ /** @@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd { __le32 reserved; } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ +/** + * enum iwl_extended_cfg_flag - commands driver may send before + * finishing init flow + * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command + * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands + * @IWL_INIT_PHY: driver is going to send PHY_DB commands + */ +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG, + IWL_INIT_NVM, + IWL_INIT_PHY, +}; + +/** + * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * before finishing init flows + * @init_flags: values from iwl_extended_cfg_flags + */ +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index a027b11bbdb38b5040d679982762959c03df97d2..7b86a4f1b574c6f507fbde87c1276241fd534a4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, iwl_trans_release_nic_access(mvm->trans, &flags); } +static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the RXF */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_D_SPACE + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_RD_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + RXF_SET_FENCE_MODE + offset)); + + /* Lock fence */ + iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1); + /* Set fence offset */ + iwl_trans_write_prph(mvm->trans, + RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(mvm->trans, + RXF_FIFO_RD_FENCE_INC + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + +static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the FIFO */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FIFO_ITEM_CNT + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(mvm->trans, + TXF_LOCK_FENCE + offset)); + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset, + TXF_WR_PTR + offset); + + /* Dummy-read to advance the read pointer to the head */ + iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(mvm->trans, + TXF_READ_MODIFY_DATA + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; + struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; @@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) return; - /* Pull RXF data from all RXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) { - /* - * Keep aside the additional offset that might be needed for - * next RXF - */ - u32 offset_diff = RXF_DIFF_FROM_PREV * i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the RXF */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_D_SPACE + - offset_diff)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_WR_PTR + - offset_diff)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_RD_PTR + - offset_diff)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_FENCE_PTR + - offset_diff)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_SET_FENCE_MODE + - offset_diff)); - - /* Lock fence */ - iwl_trans_write_prph(mvm->trans, - RXF_SET_FENCE_MODE + offset_diff, 0x1); - /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_WR2FENCE + offset_diff, 0x1); - /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_FENCE_OFFSET_ADDR + offset_diff, - 0x0); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - RXF_FIFO_RD_FENCE_INC + - offset_diff); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - - /* Pull TXF data from all TXFs */ - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { + /* Pull RXF1 */ + iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); + /* Pull RXF2 */ + iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); + /* Pull LMAC2 RXF1 */ + if (mvm->smem_cfg.num_lmacs > 1) + iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); + + /* Pull TXF data from LMAC1 */ + for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); + iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], + 0, i); + } - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the FIFO */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FIFO_ITEM_CNT)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_WR_PTR)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_RD_PTR)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FENCE_PTR)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_LOCK_FENCE)); - - /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, - TXF_WR_PTR); - - /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = iwl_trans_read_prph(mvm->trans, - TXF_READ_MODIFY_DATA); - *dump_data = iwl_fw_error_next_data(*dump_data); + /* Pull TXF data from LMAC2 */ + if (mvm->smem_cfg.num_lmacs > 1) { + for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(mvm->trans, + TXF_LARC_NUM + LMAC2_PRPH_OFFSET, + i); + iwl_mvm_dump_txf(mvm, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); + } } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; - i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; + fifo_len = mvm->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) @@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size)); + mvm->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(mvm->trans, @@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; + struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg; fifo_data_len = 0; - /* Count RXF size */ - for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { - if (!mem_cfg->rxfifo_size[i]) - continue; - + /* Count RXF2 size */ + if (mem_cfg->rxfifo2_size) { /* Add header info */ - fifo_data_len += mem_cfg->rxfifo_size[i] + + fifo_data_len += mem_cfg->rxfifo2_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } - for (i = 0; i < mem_cfg->num_txfifo_entries; i++) { - if (!mem_cfg->txfifo_size[i]) + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + if (!mem_cfg->lmac[i].rxfifo1_size) continue; /* Add header info */ - fifo_data_len += mem_cfg->txfifo_size[i] + + fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { + if (!mem_cfg->lmac[i].txfifo_size[j]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->lmac[i].txfifo_size[j] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 45cb4f476e761b18dd7a2326ff01cc571e9b3df9..e6c9528eeedac795ddf41e330470969ec2ecd142 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) return 0; } +void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; + __le32 *dump_data = mfu_dump_notif->data; + int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); + int i; + + if (mfu_dump_notif->index_num == 0) + IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", + le32_to_cpu(mfu_dump_notif->assert_id)); + + for (i = 0; i < n_words; i++) + IWL_DEBUG_INFO(mvm, + "MFUART assert dump, dword %u: 0x%08x\n", + le16_to_cpu(mfu_dump_notif->index_num) * + n_words + i, + le32_to_cpu(dump_data[i])); +} + static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) { @@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + struct iwl_trans *trans = mvm->trans; + + if (trans->cfg->gen2) + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), + iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS)); + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", - iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), - iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); + iwl_read_prph(trans, SB_CPU_1_STATUS), + iwl_read_prph(trans, SB_CPU_2_STATUS)); mvm->cur_ucode = old_type; return ret; } @@ -669,6 +698,82 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return 0; } +static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +{ + struct iwl_notification_wait init_wait; + struct iwl_nvm_access_complete_cmd nvm_complete = {}; + struct iwl_init_extended_cfg_cmd init_cfg = { + .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), + }; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_init_notification_wait(&mvm->notif_wait, + &init_wait, + init_complete, + ARRAY_SIZE(init_complete), + iwl_wait_init_complete, + NULL); + + /* Will also start the device */ + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); + if (ret) { + IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + goto error; + } + + /* Send init config command to mark that we are sending NVM access + * commands + */ + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, + INIT_EXTENDED_CFG_CMD), 0, + sizeof(init_cfg), &init_cfg); + if (ret) { + IWL_ERR(mvm, "Failed to run init config command: %d\n", + ret); + goto error; + } + + /* Read the NVM only at driver load time, no need to do this twice */ + if (read_nvm) { + /* Read nvm */ + ret = iwl_nvm_init(mvm, true); + if (ret) { + IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); + goto error; + } + } + + /* In case we read the NVM from external file, load it to the NIC */ + if (mvm->nvm_file_name) + iwl_mvm_load_nvm_to_nic(mvm); + + ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); + if (WARN_ON(ret)) + goto error; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, + NVM_ACCESS_COMPLETE), 0, + sizeof(nvm_complete), &nvm_complete); + if (ret) { + IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", + ret); + goto error; + } + + /* We wait for the INIT complete notification */ + return iwl_wait_notification(&mvm->notif_wait, &init_wait, + MVM_UCODE_ALIVE_TIMEOUT); + +error: + iwl_remove_notification(&mvm->notif_wait, &init_wait); + return ret; +} + static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; @@ -697,6 +802,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_run_unified_mvm_ucode(mvm, true); + lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->calibrating)) @@ -803,97 +911,31 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) return ret; } -int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) -{ - struct iwl_notification_wait init_wait; - struct iwl_nvm_access_complete_cmd nvm_complete = {}; - static const u16 init_complete[] = { - INIT_COMPLETE_NOTIF, - }; - int ret; - - lockdep_assert_held(&mvm->mutex); - - iwl_init_notification_wait(&mvm->notif_wait, - &init_wait, - init_complete, - ARRAY_SIZE(init_complete), - iwl_wait_init_complete, - NULL); - - /* Will also start the device */ - ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); - if (ret) { - IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - goto error; - } - - /* TODO: remove when integrating context info */ - ret = iwl_mvm_init_paging(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to init paging: %d\n", - ret); - goto error; - } - - /* Read the NVM only at driver load time, no need to do this twice */ - if (read_nvm) { - /* Read nvm */ - ret = iwl_nvm_init(mvm, true); - if (ret) { - IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; - } - } - - /* In case we read the NVM from external file, load it to the NIC */ - if (mvm->nvm_file_name) - iwl_mvm_load_nvm_to_nic(mvm); - - ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); - if (WARN_ON(ret)) - goto error; - - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, - NVM_ACCESS_COMPLETE), 0, - sizeof(nvm_complete), &nvm_complete); - if (ret) { - IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", - ret); - goto error; - } - - /* We wait for the INIT complete notification */ - return iwl_wait_notification(&mvm->notif_wait, &init_wait, - MVM_UCODE_ALIVE_TIMEOUT); - -error: - iwl_remove_notification(&mvm->notif_wait, &init_wait); - return ret; -} - static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; - int i; + int i, lmac; + int lmac_num = le32_to_cpu(mem_cfg->lmac_num); - mvm->shared_mem_cfg.num_txfifo_entries = - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->shared_mem_cfg.txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) - mvm->shared_mem_cfg.rxfifo_size[i] = - le32_to_cpu(mem_cfg->rxfifo_size[i]); + if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) + return; + + mvm->smem_cfg.num_lmacs = lmac_num; + mvm->smem_cfg.num_txfifo_entries = + ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); + mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); - BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != - sizeof(mem_cfg->internal_txfifo_size)); + for (lmac = 0; lmac < lmac_num; lmac++) { + struct iwl_shared_mem_lmac_cfg *lmac_cfg = + &mem_cfg->lmac_smem[lmac]; - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); - i++) - mvm->shared_mem_cfg.internal_txfifo_size[i] = - le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) + mvm->smem_cfg.lmac[lmac].txfifo_size[i] = + le32_to_cpu(lmac_cfg->txfifo_size[i]); + mvm->smem_cfg.lmac[lmac].rxfifo1_size = + le32_to_cpu(lmac_cfg->rxfifo1_size); + } } static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, @@ -902,25 +944,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data; int i; - mvm->shared_mem_cfg.num_txfifo_entries = - ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); + mvm->smem_cfg.num_lmacs = 1; + + mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->shared_mem_cfg.txfifo_size[i] = + mvm->smem_cfg.lmac[0].txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) - mvm->shared_mem_cfg.rxfifo_size[i] = - le32_to_cpu(mem_cfg->rxfifo_size[i]); + + mvm->smem_cfg.lmac[0].rxfifo1_size = + le32_to_cpu(mem_cfg->rxfifo_size[0]); + mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); /* new API has more data, from rxfifo_addr field and on */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != + BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != sizeof(mem_cfg->internal_txfifo_size)); for (i = 0; - i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); i++) - mvm->shared_mem_cfg.internal_txfifo_size[i] = + mvm->smem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]); } } @@ -969,85 +1013,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) sizeof(cmd), &cmd); } -#define ACPI_WRDS_METHOD "WRDS" -#define ACPI_WRDS_WIFI (0x07) -#define ACPI_WRDS_TABLE_SIZE 10 +#ifdef CONFIG_ACPI +#define ACPI_WRDS_METHOD "WRDS" +#define ACPI_EWRD_METHOD "EWRD" +#define ACPI_WGDS_METHOD "WGDS" +#define ACPI_WIFI_DOMAIN (0x07) +#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2) +#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \ + IWL_MVM_SAR_TABLE_SIZE + 3) +#define ACPI_WGDS_WIFI_DATA_SIZE 18 +#define ACPI_WGDS_NUM_BANDS 2 +#define ACPI_WGDS_TABLE_SIZE 3 + +static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, + union acpi_object *table, + struct iwl_mvm_sar_profile *profile, + bool enabled) +{ + int i; -struct iwl_mvm_sar_table { - bool enabled; - u8 values[ACPI_WRDS_TABLE_SIZE]; -}; + profile->enabled = enabled; -#ifdef CONFIG_ACPI -static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds, - struct iwl_mvm_sar_table *sar_table) + for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) { + if ((table[i].type != ACPI_TYPE_INTEGER) || + (table[i].integer.value > U8_MAX)) + return -EINVAL; + + profile->table[i] = table[i].integer.value; + } + + return 0; +} + +static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, + union acpi_object *data, + int data_size) { - union acpi_object *data_pkg; - u32 i; + int i; + union acpi_object *wifi_pkg; - /* We need at least two packages, one for the revision and one + /* + * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid * (i.e. it is an integer set to 0). - */ - if (wrds->type != ACPI_TYPE_PACKAGE || - wrds->package.count < 2 || - wrds->package.elements[0].type != ACPI_TYPE_INTEGER || - wrds->package.elements[0].integer.value != 0) { - IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n"); - return -EINVAL; + */ + if (data->type != ACPI_TYPE_PACKAGE || + data->package.count < 2 || + data->package.elements[0].type != ACPI_TYPE_INTEGER || + data->package.elements[0].integer.value != 0) { + IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n"); + return ERR_PTR(-EINVAL); } /* loop through all the packages to find the one for WiFi */ - for (i = 1; i < wrds->package.count; i++) { + for (i = 1; i < data->package.count; i++) { union acpi_object *domain; - data_pkg = &wrds->package.elements[i]; + wifi_pkg = &data->package.elements[i]; /* Skip anything that is not a package with the right * amount of elements (i.e. domain_type, - * enabled/disabled plus the sar table size. + * enabled/disabled plus the actual data size. */ - if (data_pkg->type != ACPI_TYPE_PACKAGE || - data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2) + if (wifi_pkg->type != ACPI_TYPE_PACKAGE || + wifi_pkg->package.count != data_size) continue; - domain = &data_pkg->package.elements[0]; + domain = &wifi_pkg->package.elements[0]; if (domain->type == ACPI_TYPE_INTEGER && - domain->integer.value == ACPI_WRDS_WIFI) + domain->integer.value == ACPI_WIFI_DOMAIN) break; - data_pkg = NULL; + wifi_pkg = NULL; } - if (!data_pkg) - return -ENOENT; - - if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) - return -EINVAL; - - sar_table->enabled = !!(data_pkg->package.elements[1].integer.value); - - for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) { - union acpi_object *entry; - - entry = &data_pkg->package.elements[i + 2]; - if ((entry->type != ACPI_TYPE_INTEGER) || - (entry->integer.value > U8_MAX)) - return -EINVAL; - - sar_table->values[i] = entry->integer.value; - } + if (!wifi_pkg) + return ERR_PTR(-ENOENT); - return 0; + return wifi_pkg; } -static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, - struct iwl_mvm_sar_table *sar_table) +static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) { + union acpi_object *wifi_pkg, *table; acpi_handle root_handle; acpi_handle handle; struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; + bool enabled; int ret; root_handle = ACPI_HANDLE(mvm->dev); @@ -1072,62 +1125,307 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, return -ENOENT; } - ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table); + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer, + ACPI_WRDS_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + + /* position of the actual table */ + table = &wifi_pkg->package.elements[2]; + + /* The profile from WRDS is officially profile 1, but goes + * into sar_profiles[0] (because we don't have a profile 0). + */ + ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], + enabled); + +out_free: kfree(wrds.pointer); + return ret; +} +static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) +{ + union acpi_object *wifi_pkg; + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + bool enabled; + int i, n_profiles, ret; + + root_handle = ACPI_HANDLE(mvm->dev); + if (!root_handle) { + IWL_DEBUG_RADIO(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "EWRD method not found\n"); + return -ENOENT; + } + + /* Call EWRD with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &ewrd); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status); + return -ENOENT; + } + + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer, + ACPI_EWRD_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || + (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { + ret = -EINVAL; + goto out_free; + } + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + n_profiles = wifi_pkg->package.elements[2].integer.value; + + /* in case of BIOS bug */ + if (n_profiles <= 0) { + ret = -EINVAL; + goto out_free; + } + + for (i = 0; i < n_profiles; i++) { + /* the tables start at element 3 */ + static int pos = 3; + + /* The EWRD profiles officially go from 2 to 4, but we + * save them in sar_profiles[1-3] (because we don't + * have profile 0). So in the array we start from 1. + */ + ret = iwl_mvm_sar_set_profile(mvm, + &wifi_pkg->package.elements[pos], + &mvm->sar_profiles[i + 1], + enabled); + if (ret < 0) + break; + + /* go to the next table */ + pos += IWL_MVM_SAR_TABLE_SIZE; + } + +out_free: + kfree(ewrd.pointer); return ret; } -#else /* CONFIG_ACPI */ -static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, - struct iwl_mvm_sar_table *sar_table) + +static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm, + struct iwl_mvm_geo_table *geo_table) { - return -ENOENT; + union acpi_object *wifi_pkg; + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + int i, ret; + + root_handle = ACPI_HANDLE(mvm->dev); + if (!root_handle) { + IWL_DEBUG_RADIO(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WGDS method not found\n"); + return -ENOENT; + } + + /* Call WGDS with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &wgds); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status); + return -ENOENT; + } + + wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer, + ACPI_WGDS_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) { + union acpi_object *entry; + + entry = &wifi_pkg->package.elements[i + 1]; + if ((entry->type != ACPI_TYPE_INTEGER) || + (entry->integer.value > U8_MAX)) + return -EINVAL; + + geo_table->values[i] = entry->integer.value; + } + ret = 0; +out_free: + kfree(wgds.pointer); + return ret; } -#endif /* CONFIG_ACPI */ -static int iwl_mvm_sar_init(struct iwl_mvm *mvm) +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - struct iwl_mvm_sar_table sar_table; struct iwl_dev_tx_power_cmd cmd = { .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; - int ret, i, j, idx; + int i, j, idx; + int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; int len = sizeof(cmd); + BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2); + BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != + IWL_MVM_SAR_TABLE_SIZE); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v3); - ret = iwl_mvm_sar_get_table(mvm, &sar_table); + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + struct iwl_mvm_sar_profile *prof; + + /* don't allow SAR to be disabled (profile 0 means disable) */ + if (profs[i] == 0) + return -EPERM; + + /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */ + if (profs[i] > IWL_MVM_SAR_PROFILE_NUM) + return -EINVAL; + + /* profiles go from 1 to 4, so decrement to access the array */ + prof = &mvm->sar_profiles[profs[i] - 1]; + + /* if the profile is disabled, do nothing */ + if (!prof->enabled) { + IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n", + profs[i]); + /* if one of the profiles is disabled, we fail all */ + return -ENOENT; + } + + IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); + for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { + idx = (i * IWL_NUM_SUB_BANDS) + j; + cmd.v3.per_chain_restriction[i][j] = + cpu_to_le16(prof->table[idx]); + IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", + j, prof->table[idx]); + } + } + + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); +} + +static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_geo_table geo_table; + struct iwl_geo_tx_power_profiles_cmd cmd = { + .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), + }; + int ret, i, j, idx; + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); + + ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table); if (ret < 0) { IWL_DEBUG_RADIO(mvm, - "SAR BIOS table invalid or unavailable. (%d)\n", + "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ return 0; } - if (!sar_table.enabled) - return 0; + IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); - IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); + BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * + ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); - BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != - ACPI_WRDS_TABLE_SIZE); + for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) { + struct iwl_per_chain_offset *chain = + (struct iwl_per_chain_offset *)&cmd.table[i]; - for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { - IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); - for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { - idx = (i * IWL_NUM_SUB_BANDS) + j; - cmd.v3.per_chain_restriction[i][j] = - cpu_to_le16(sar_table.values[idx]); - IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", - j, sar_table.values[idx]); + for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { + u8 *value; + + idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE + + j * ACPI_WGDS_TABLE_SIZE; + value = &geo_table.values[idx]; + chain[j].max_tx_power = cpu_to_le16(value[0]); + chain[j].chain_a = value[1]; + chain[j].chain_b = value[2]; + IWL_DEBUG_RADIO(mvm, + "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", + i, j, value[1], value[2], value[0]); } } + return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); +} - ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); - if (ret) - IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret); +#else /* CONFIG_ACPI */ +static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + +static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) +{ + return -ENOENT; +} + +static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif /* CONFIG_ACPI */ + +static int iwl_mvm_sar_init(struct iwl_mvm *mvm) +{ + int ret; + + ret = iwl_mvm_sar_get_wrds_table(mvm); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* if not available, don't fail and don't bother with EWRD */ + return 0; + } + + ret = iwl_mvm_sar_get_ewrd_table(mvm); + /* if EWRD is not available, we can still use WRDS, so don't fail */ + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "EWRD SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + /* choose profile 1 (WRDS) as default for both chains */ + ret = iwl_mvm_sar_select_profile(mvm, 1, 1); + + /* if we don't have profile 0 from BIOS, just skip it */ + if (ret == -ENOENT) + return 0; return ret; } @@ -1219,7 +1517,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* Init RSS configuration */ - if (iwl_mvm_has_new_rx_api(mvm)) { + /* TODO - remove a000 disablement when we have RXQ config API */ + if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", @@ -1229,10 +1528,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); @@ -1313,10 +1612,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (iwl_mvm_is_csum_supported(mvm) && - mvm->cfg->features & NETIF_F_RXCSUM) - iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); - /* allow FW/transport low power modes if not during restart */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1325,6 +1620,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + ret = iwl_mvm_sar_geo_init(mvm); + if (ret) + goto error; + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: @@ -1362,7 +1661,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) goto error; /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); /* Add auxiliary station for scanning */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index c5734e1a02d27ee3d15ec59ef6dd19430e6aed71..0f1831b419159b606967889ff8ea20c7ef86f0f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -467,13 +467,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, queue = IWL_MVM_DQA_GCAST_QUEUE; } + /* + * For TVQM this will be overwritten later with the FW assigned + * queue value (when queue is enabled). + */ + mvmvif->cab_queue = queue; vif->cab_queue = queue; } else { vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } - mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; @@ -901,7 +907,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, - vif->type); + vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; @@ -1222,7 +1228,9 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period)); - ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_STA_TYPE)) + ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we @@ -1442,6 +1450,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *beacon_notify_hdr; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; + struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); @@ -1449,7 +1458,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, beacon_notify_hdr = &beacon->beacon_notify_hdr; mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); - status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; + agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); + status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 486dcceed17a4f3a5b1ff0084d8a1f6b768199cd..a67aa1f5a51c544f4ff6b352f0009300feea227e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -6,8 +6,8 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,7 +33,8 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -619,7 +620,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; /* we create the 802.11 header and zero length SSID IE. */ @@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->sta_id == IWL_MVM_STATION_COUNT || + if (mvmsta->sta_id == IWL_MVM_INVALID_STA || mvmsta->sta_id != mvm->d0i3_ap_sta_id) goto out; @@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); @@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); @@ -1351,6 +1352,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } + if (iwl_mvm_is_dqa_supported(mvm)) { + /* + * Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, + 0, vif->type, + IWL_STA_MULTICAST); + if (ret) + goto out_release; + } + iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } @@ -1465,7 +1478,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * already marked as draining, so to complete the draining, we * just need to wait until the transport is empty. */ - iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { @@ -1516,6 +1529,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } @@ -1952,7 +1966,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } - } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. @@ -1966,8 +1980,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_ERR(mvm, "failed to remove AP station\n"); if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) @@ -2098,11 +2112,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_remove; + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) + goto out_unbind; + /* Send the bcast station. At this stage the TBTT and DTIM time events * are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) - goto out_unbind; + goto out_rm_mcast; /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2132,6 +2150,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); +out_rm_mcast: + iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: @@ -2177,7 +2197,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); + + /* + * This is not very nice, but the simplest: + * For older FWs removing the mcast sta before the bcast station may + * cause assert 0x2b00. + * This is fixed in later FW (which will stop beaconing when removing + * bcast station). + * So make the order of removal depend on the TLV + */ + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); @@ -2343,6 +2376,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) continue; + if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) + continue; + __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(tid_data) == 0) @@ -2368,7 +2404,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) @@ -3711,7 +3747,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, int err; u32 noa_duration; - err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy); + err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, + NULL); if (err) return err; @@ -3938,7 +3975,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) @@ -3964,7 +4001,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ - iwl_trans_wait_tx_queue_empty(mvm->trans, msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } } @@ -4195,7 +4232,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_has_new_rx_api(mvm)) + /* TODO - remove a000 disablement when we have RXQ config API */ + if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) return; notif->cookie = mvm->queue_sync_cookie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 73a216524af2984a07aa321f152df12013314e9c..4e74a6b90e70626d6e0e8bb8092796f078c7d55d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -380,6 +380,8 @@ struct iwl_mvm_vif { bool associated; u8 ap_assoc_sta_count; + u16 cab_queue; + bool uploaded; bool ap_ibss_active; bool pm_enabled; @@ -407,6 +409,7 @@ struct iwl_mvm_vif { struct iwl_mvm_time_event_data hs_time_event_data; struct iwl_mvm_int_sta bcast_sta; + struct iwl_mvm_int_sta mcast_sta; /* * Assigned while mac80211 has the interface in a channel context, @@ -603,10 +606,15 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; +#define MAX_NUM_LMAC 2 struct iwl_mvm_shared_mem_cfg { + int num_lmacs; int num_txfifo_entries; - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo_size[RX_FIFO_MAX_NUM]; + struct { + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + u32 rxfifo2_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; @@ -625,6 +633,7 @@ struct iwl_mvm_shared_mem_cfg { * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming + * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context */ @@ -640,6 +649,7 @@ struct iwl_mvm_reorder_buffer { unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; struct timer_list reorder_timer; bool removed; + bool valid; spinlock_t lock; struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; @@ -707,8 +717,25 @@ enum iwl_mvm_queue_status { }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) +#define IWL_MVM_INVALID_QUEUE 0xFFFF + #define IWL_MVM_NUM_CIPHERS 10 +#ifdef CONFIG_ACPI +#define IWL_MVM_SAR_TABLE_SIZE 10 +#define IWL_MVM_SAR_PROFILE_NUM 4 +#define IWL_MVM_GEO_TABLE_SIZE 18 + +struct iwl_mvm_sar_profile { + bool enabled; + u8 table[IWL_MVM_SAR_TABLE_SIZE]; +}; + +struct iwl_mvm_geo_table { + u8 values[IWL_MVM_GEO_TABLE_SIZE]; +}; +#endif + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -761,9 +788,9 @@ struct iwl_mvm { u64 on_time_scan; } radio_stats, accu_radio_stats; + u8 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; + struct { - /* Map to HW queue */ - u32 hw_queue_to_mac80211; u8 hw_queue_refcount; u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ @@ -975,7 +1002,10 @@ struct iwl_mvm { #endif /* Tx queues */ - u8 aux_queue; + u16 aux_queue; + u16 probe_queue; + u16 p2p_dev_queue; + u8 first_agg_queue; u8 last_agg_queue; @@ -1018,7 +1048,7 @@ struct iwl_mvm { } peer; } tdls_cs; - struct iwl_mvm_shared_mem_cfg shared_mem_cfg; + struct iwl_mvm_shared_mem_cfg smem_cfg; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; @@ -1035,6 +1065,9 @@ struct iwl_mvm { bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; +#ifdef CONFIG_ACPI + struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; +#endif }; /* Extract MVM priv from op_mode and _hw */ @@ -1222,13 +1255,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: - * The issue of how to determine CDB support is still not well defined. - * It may be that it will be for all next HW devices and it may be per - * FW compilation and it may also differ between different devices. - * For now take a ride on the new TX API and get back to it when - * it is well defined. + * The issue of how to determine CDB APIs and usage is still not fully + * defined. + * There is a compilation for CDB and non-CDB FW, but there may + * be also runtime check. + * For now there is a TLV for checking compilation mode, but a + * runtime check will also have to be here - once defined. */ - return iwl_mvm_has_new_tx_api(mvm); + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT); +} + +static inline struct agg_tx_status* +iwl_mvm_get_agg_status(struct iwl_mvm *mvm, + struct iwl_mvm_tx_resp *tx_resp) +{ + if (iwl_mvm_has_new_tx_api(mvm)) + return &tx_resp->v6.status; + else + return &tx_resp->v3.status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) @@ -1271,7 +1316,6 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); -int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, @@ -1389,6 +1433,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -1668,6 +1714,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout); +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout); + /* * Disable a TXQ. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. @@ -1701,7 +1750,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - iwl_free_fw_paging(mvm); + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_free_fw_paging(mvm); mvm->ucode_loaded = false; iwl_trans_stop_device(mvm->trans); } @@ -1781,6 +1831,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, u32 size); void iwl_mvm_reorder_timer_expired(unsigned long data); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); +bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); @@ -1797,4 +1848,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, u32 duration, u32 timeout); bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); +#ifdef CONFIG_ACPI +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); +#else +static inline +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) +{ + return -ENOENT; +} +#endif /* CONFIG_ACPI */ + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index eade099b6dbf43c7378acb60057952549a6ba39f..283c41df622cf1bb7693ef1b223ae5822074e59a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -817,6 +817,11 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { + IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n"); + return; + } + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4cd72d4cdc47c4de8fac948d22c59a6848e381d0..9ffff6ed813386418800cf38906b91989c5b6f52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_SYNC), RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, + iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, @@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), + HCMD_NAME(INIT_EXTENDED_CFG_CMD), }; /* Please keep this array *SORTED* by hex value. @@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), + HCMD_NAME(GEO_TX_POWER_LIMIT), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { + HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_PM_NOTIF), @@ -459,6 +464,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(RX_QUEUES_NOTIFICATION), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { + HCMD_NAME(MFU_ASSERT_DUMP_NTF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } } else { mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } @@ -732,10 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); - if (iwl_mvm_has_new_tx_api(mvm)) - err = iwl_run_unified_mvm_ucode(mvm, true); - else - err = iwl_run_init_mvm_ucode(mvm, true); + err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -1033,7 +1044,7 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) unsigned long mq; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[hw_queue]; spin_unlock_bh(&mvm->queue_info_lock); iwl_mvm_stop_mac_queues(mvm, mq); @@ -1063,7 +1074,7 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) unsigned long mq; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[hw_queue]; spin_unlock_bh(&mvm->queue_info_lock); iwl_mvm_start_mac_queues(mvm, mq); @@ -1256,7 +1267,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, u8 tid; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) + mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)) return false; mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); @@ -1344,7 +1355,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, struct ieee80211_sta *ap_sta; struct iwl_mvm_sta *mvm_ap_sta; - if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) + if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA) return; rcu_read_lock(); @@ -1414,7 +1425,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; } else { WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; mvm->d0i3_offloading = false; } @@ -1427,7 +1438,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) return ret; /* configure wowlan configuration only if needed */ - if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) { /* wake on beacons only if beacon storing isn't supported */ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_STORING)) @@ -1504,7 +1515,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) spin_lock_bh(&mvm->d0i3_tx_lock); - if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) + if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA) goto out; IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); @@ -1542,7 +1553,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) } clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); wake_up(&mvm->d0i3_exit_waitq); - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; if (wake_queues) ieee80211_wake_queues(mvm->hw); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 95138830b9f8e4ee8ba83b68278f0478e313f47d..d59efe804356168c0ab87fbb7a3a0633b33666ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { + enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY; + lockdep_assert_held(&mvm->mutex); + /* In CDB mode we cannot modify PHY context between bands so... */ + if (iwl_mvm_has_new_tx_api(mvm) && + ctxt->channel->band != chandef->chan->band) { + int ret; + + /* ... remove it here ...*/ + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_REMOVE, 0); + if (ret) + return ret; + + /* ... and proceed to add it again */ + action = FW_CTXT_ACTION_ADD; + } + ctxt->channel = chandef->chan; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, - FW_CTXT_ACTION_MODIFY, 0); + action, 0); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index ce907c58ebf6d83b632313f5af9c134a5ea2e109..7788eefcd2bdd3066c0b40d5f5575e44f2102c0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -826,7 +826,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, if (is_siso(rate) && rate->stbc) { /* To enable STBC we need to set both a flag and ANT_AB */ ucode_rate |= RATE_MCS_ANT_AB_MSK; - ucode_rate |= RATE_MCS_VHT_STBC_MSK; + ucode_rate |= RATE_MCS_STBC_MSK; } ucode_rate |= rate->bw; @@ -873,7 +873,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) rate->ldpc = true; - if (ucode_rate & RATE_MCS_VHT_STBC_MSK) + if (ucode_rate & RATE_MCS_STBC_MSK) rate->stbc = true; if (ucode_rate & RATE_MCS_BF_MSK) rate->bfer = true; @@ -3641,13 +3641,12 @@ int rs_pretty_print_rate(char *buf, const u32 rate) bw = "BAD BW"; } - return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n", + return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n", type, rs_pretty_ant(ant), bw, mcs, nss, (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ", - (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "", + (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", - (rate & RATE_MCS_BF_MSK) ? "BF " : "", - (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : ""); + (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 20473df79c945f8f48024785d9cac5c00508f9cc..fd1dd06c4f182902c32ed3ba1cf30e1fac1d8b3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { - unsigned int hdrlen, fraglen; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + unsigned int fraglen; + + /* + * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, + * but those are all multiples of 4 long) all goes away, but we + * want the *end* of it, which is going to be the start of the IP + * header, to be aligned when it gets pulled in. + * The beginning of the skb->data is aligned on at least a 4-byte + * boundary after allocation. Everything here is aligned at least + * on a 2-byte boundary so we can just take hdrlen & 3 and pad by + * the result. + */ + skb_reserve(skb, hdrlen & 3); /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ - hdrlen = (len <= skb_tailroom(skb)) ? len : - sizeof(*hdr) + crypt_len + 8; + hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); fraglen = len - hdrlen; @@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; - if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; @@ -398,7 +410,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* @@ -415,42 +427,49 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: - rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->flag |= RX_FLAG_HT_GF; + rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->flag |= RX_FLAG_LDPC; + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->vht_nss = + rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->flag |= RX_FLAG_VHT; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->encoding = RX_ENC_VHT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->vht_flag |= RX_VHT_FLAG_BF; + rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { - rx_status->rate_idx = - iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); + int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + + if (WARN(rate < 0 || rate > 0xFF, + "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band)) { + kfree_skb(skb); + return; + } + rx_status->rate_idx = rate; } #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, .mvm = mvm, }; int expected_size; + int i; + u8 *energy; + __le32 *bytes, *air_time; if (iwl_mvm_is_cdb_supported(mvm)) expected_size = sizeof(*stats); @@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, else expected_size = sizeof(struct iwl_notif_statistics_v10); - if (iwl_rx_packet_payload_len(pkt) != expected_size) - goto invalid; + if (iwl_rx_packet_payload_len(pkt) != expected_size) { + IWL_ERR(mvm, "received invalid statistics size (%d)!\n", + iwl_rx_packet_payload_len(pkt)); + return; + } data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = @@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, le64_to_cpu(stats->general.common.on_time_scan); data.general = &stats->general; - if (iwl_mvm_has_new_rx_api(mvm)) { - int i; - u8 *energy; - __le32 *bytes, *air_time; - - if (!iwl_mvm_is_cdb_supported(mvm)) { - struct iwl_notif_statistics_v11 *v11 = - (void *)&pkt->data; - - energy = (void *)&v11->load_stats.avg_energy; - bytes = (void *)&v11->load_stats.byte_count; - air_time = (void *)&v11->load_stats.air_time; - } else { - energy = (void *)&stats->load_stats.avg_energy; - bytes = (void *)&stats->load_stats.byte_count; - air_time = (void *)&stats->load_stats.air_time; - } - - rcu_read_lock(); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - struct iwl_mvm_sta *sta; - - if (!energy[i]) - continue; - - sta = iwl_mvm_sta_from_staid_rcu(mvm, i); - if (!sta) - continue; - sta->avg_energy = energy[i]; - } - rcu_read_unlock(); - } iwl_mvm_rx_stats_check_trigger(mvm, pkt); @@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); - return; - invalid: - IWL_ERR(mvm, "received invalid statistics size (%d)!\n", - iwl_rx_packet_payload_len(pkt)); + + if (!iwl_mvm_has_new_rx_api(mvm)) + return; + + if (!iwl_mvm_is_cdb_supported(mvm)) { + struct iwl_notif_statistics_v11 *v11 = + (void *)&pkt->data; + + energy = (void *)&v11->load_stats.avg_energy; + bytes = (void *)&v11->load_stats.byte_count; + air_time = (void *)&v11->load_stats.air_time; + } else { + energy = (void *)&stats->load_stats.avg_energy; + bytes = (void *)&stats->load_stats.byte_count; + air_time = (void *)&stats->load_stats.air_time; + } + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { + struct iwl_mvm_sta *sta; + + if (!energy[i]) + continue; + + sta = iwl_mvm_sta_from_staid_rcu(mvm, i); + if (!sta) + continue; + sta->avg_energy = energy[i]; + } + rcu_read_unlock(); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d79e9c2a2654aed22fd8c359cb27a923cdcc5e50..966cd7543629b3c2dd6d9bbf33fcaffd9582ab63 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) int i; u16 sn = 0, index = 0; bool expired = false; + bool cont = false; spin_lock(&buf->lock); @@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; - if (skb_queue_empty(&buf->entries[index])) + if (skb_queue_empty(&buf->entries[index])) { + /* + * If there is a hole and the next frame didn't expire + * we want to break and not advance SN + */ + cont = false; continue; - if (!time_after(jiffies, buf->reorder_time[index] + - RX_REORDER_BUF_TIMEOUT_MQ)) + } + if (!cont && !time_after(jiffies, buf->reorder_time[index] + + RX_REORDER_BUF_TIMEOUT_MQ)) break; + expired = true; + /* continue until next hole after this expired frames */ + cont = true; sn = ieee80211_sn_add(buf->head_sn, i + 1); } @@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, return false; baid_data = rcu_dereference(mvm->baid_map[baid]); - if (WARN(!baid_data, - "Received baid %d, but no data exists for this BAID\n", baid)) + if (!baid_data) { + WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), + "Received baid %d, but no data exists for this BAID\n", + baid); return false; + } + if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, @@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, spin_lock_bh(&buffer->lock); + if (!buffer->valid) { + if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { + spin_unlock_bh(&buffer->lock); + return false; + } + buffer->valid = true; + } + if (ieee80211_is_back_req(hdr->frame_control)) { iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); goto drop; @@ -727,7 +749,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, return true; } -static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) +static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, + u32 reorder_data, u8 baid) { unsigned long now = jiffies; unsigned long timeout; @@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) rcu_read_lock(); data = rcu_dereference(mvm->baid_map[baid]); - if (WARN_ON(!data)) + if (!data) { + WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); goto out; + } if (!data->timeout) goto out; @@ -799,7 +824,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } /* set the preamble flag if appropriate */ if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); @@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; - if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { + if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; @@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); - rcu_read_unlock(); - return; + goto out; } /* * Our hardware de-aggregates AMSDUs but copies the mac header * as it to the de-aggregated MPDUs. We need to turn off the * AMSDU bit in the QoS control ourselves. + * In addition, HW reverses addr3 and addr4 - reverse it back. */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { + int i; u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 mac_addr[ETH_ALEN]; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - if (!(desc->amsdu_info & - IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) - rx_status->flag |= RX_FLAG_AMSDU_MORE; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1]; + ether_addr_copy(hdr->addr3, mac_addr); + + if (ieee80211_has_a4(hdr->frame_control)) { + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = + hdr->addr4[ETH_ALEN - i - 1]; + ether_addr_copy(hdr->addr4, mac_addr); + } + } + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { + u32 reorder_data = le32_to_cpu(desc->reorder_data); + + iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) - iwl_mvm_agg_rx_received(mvm, baid); } /* Set up the HT phy flags */ @@ -920,42 +958,50 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: - rx_status->vht_flag |= RX_VHT_FLAG_160MHZ; + rx_status->bw = RATE_INFO_BW_160; break; } if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->flag |= RX_FLAG_HT_GF; + rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->flag |= RX_FLAG_LDPC; + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->vht_nss = + rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; - rx_status->flag |= RX_FLAG_VHT; - rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT; + rx_status->encoding = RX_ENC_VHT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->vht_flag |= RX_VHT_FLAG_BF; + rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { - rx_status->rate_idx = - iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - rx_status->band); + int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + + if (WARN(rate < 0 || rate > 0xFF, + "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band)) { + kfree_skb(skb); + goto out; + } + rx_status->rate_idx = rate; + } /* management stuff on default queue */ @@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); +out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 0a64efa844b7565bba9ba83666fca2316096aedd..8d1b994ae79f615c67b3fcf0fe91a372d669e99f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -81,44 +81,30 @@ enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_HIGH, }; +#define IWL_SCAN_DWELL_ACTIVE 10 +#define IWL_SCAN_DWELL_PASSIVE 110 +#define IWL_SCAN_DWELL_FRAGMENTED 44 +#define IWL_SCAN_DWELL_EXTENDED 90 + struct iwl_mvm_scan_timing_params { - u32 dwell_active; - u32 dwell_passive; - u32 dwell_fragmented; - u32 dwell_extended; u32 suspend_time; u32 max_out_time; }; static struct iwl_mvm_scan_timing_params scan_timing[] = { [IWL_SCAN_TYPE_UNASSOC] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 0, .max_out_time = 0, }, [IWL_SCAN_TYPE_WILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 30, .max_out_time = 120, }, [IWL_SCAN_TYPE_MILD] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, - .dwell_extended = 90, .suspend_time = 120, .max_out_time = 120, }, [IWL_SCAN_TYPE_FRAGMENTED] = { - .dwell_active = 10, - .dwell_passive = 110, - .dwell_fragmented = 44, .suspend_time = 95, .max_out_time = 44, }, @@ -294,34 +280,15 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) return max_ie_len; } -static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, - int num_res, u8 *buf, size_t buf_size) -{ - int i; - u8 *pos = buf, *end = buf + buf_size; - - for (i = 0; pos < end && i < num_res; i++) - pos += snprintf(pos, end - pos, " %u", res[i].channel); - - /* terminate the string in case the buffer was too short */ - *(buf + buf_size - 1) = '\0'; - - return buf; -} - void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; IWL_DEBUG_SCAN(mvm, - "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); + "Scan offload iteration complete: status=0x%x scanned channels=%d\n", + notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); @@ -743,10 +710,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; - cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; - cmd->extended_dwell = scan_timing[params->type].dwell_extended; + cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); @@ -944,13 +911,12 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) } static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, - struct iwl_scan_dwell *dwell, - struct iwl_mvm_scan_timing_params *timing) + struct iwl_scan_dwell *dwell) { - dwell->active = timing->dwell_active; - dwell->passive = timing->dwell_passive; - dwell->fragmented = timing->dwell_fragmented; - dwell->extended = timing->dwell_extended; + dwell->active = IWL_SCAN_DWELL_ACTIVE; + dwell->passive = IWL_SCAN_DWELL_PASSIVE; + dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED; + dwell->extended = IWL_SCAN_DWELL_EXTENDED; } static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) @@ -966,11 +932,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) channels[j] = band->channels[i].hw_value; } -static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) +static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); - struct iwl_scan_config *cfg = config; + struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); @@ -979,7 +945,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); - iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); @@ -989,11 +955,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, iwl_mvm_fill_channels(mvm, cfg->channel_array); } -static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) +static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); - struct iwl_scan_config_cdb *cfg = config; + struct iwl_scan_config *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); @@ -1001,12 +967,16 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); cfg->out_of_channel_time[0] = cpu_to_le32(scan_timing[type].max_out_time); - cfg->out_of_channel_time[1] = - cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); - cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time); - iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); + if (iwl_mvm_is_cdb_supported(mvm)) { + cfg->suspend_time[1] = + cpu_to_le32(scan_timing[type].suspend_time); + cfg->out_of_channel_time[1] = + cpu_to_le32(scan_timing[type].max_out_time); + } + + iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); @@ -1033,16 +1003,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) { - IWL_DEBUG_SCAN(mvm, - "Ignoring UMAC scan config of the same type\n"); + if (type == mvm->scan_type) return 0; - } - if (iwl_mvm_is_cdb_supported(mvm)) - cmd_size = sizeof(struct iwl_scan_config_cdb); - else + if (iwl_mvm_has_new_tx_api(mvm)) cmd_size = sizeof(struct iwl_scan_config); + else + cmd_size = sizeof(struct iwl_scan_config_v1); cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); @@ -1068,13 +1035,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - if (iwl_mvm_is_cdb_supported(mvm)) { + if (iwl_mvm_has_new_tx_api(mvm)) { flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; - iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags); - } else { iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); + } else { + iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags); } cmd.data[0] = cfg; @@ -1113,22 +1080,26 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->passive_dwell = params->measurement_dwell; cmd->extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = timing->dwell_active; - cmd->passive_dwell = timing->dwell_passive; - cmd->extended_dwell = timing->dwell_extended; + cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = timing->dwell_fragmented; - - if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time); - cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time); - cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time); - cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + + if (iwl_mvm_has_new_tx_api(mvm)) { + cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v6.max_out_time[1] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[1] = + cpu_to_le32(timing->suspend_time); + } } else { - cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time); - cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time); - cmd->no_cdb.scan_priority = + cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time); + cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); + cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } @@ -1207,8 +1178,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ? - (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data; + void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? + (void *)&cmd->v6.data : (void *)&cmd->v1.data; struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1245,12 +1216,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->cdb.channel_flags = channel_flags; - cmd->cdb.n_channels = params->n_channels; + if (iwl_mvm_has_new_tx_api(mvm)) { + cmd->v6.channel_flags = channel_flags; + cmd->v6.n_channels = params->n_channels; } else { - cmd->no_cdb.channel_flags = channel_flags; - cmd->no_cdb.n_channels = params->n_channels; + cmd->v1.channel_flags = channel_flags; + cmd->v1.n_channels = params->n_channels; } iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); @@ -1607,16 +1578,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; - u8 buf[256]; mvm->scan_start = le64_to_cpu(notif->start_tsf); IWL_DEBUG_SCAN(mvm, - "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", - notif->status, notif->scanned_channels, - iwl_mvm_dump_channel_list(notif->results, - notif->scanned_channels, buf, - sizeof(buf))); + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", + notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); @@ -1692,10 +1659,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { - int base_size = IWL_SCAN_REQ_UMAC_SIZE; + int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_is_cdb_supported(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB; + if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 101fb04a8573492c51a343c3d41b0eb1df12cbbe..539b06bf08031b321eed18ab6b51614d812e783c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: - if (sta_id == IWL_MVM_STATION_COUNT) { + if (sta_id == IWL_MVM_INVALID_STA) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; @@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; - u8 sta_id = IWL_MVM_STATION_COUNT; + u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, - .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, + .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9d28db7f56aa2404825c530559df62afbca5d21a..f5c786ddc52631087b56067807bb4d48e0869664 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -77,9 +77,11 @@ */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { - return iwl_mvm_has_new_rx_api(mvm) ? - sizeof(struct iwl_mvm_add_sta_cmd) : - sizeof(struct iwl_mvm_add_sta_cmd_v7); + if (iwl_mvm_has_new_rx_api(mvm) || + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + return sizeof(struct iwl_mvm_add_sta_cmd); + else + return sizeof(struct iwl_mvm_add_sta_cmd_v7); } static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, @@ -98,7 +100,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ - for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { + for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { if (BIT(sta_id) & reserved_ids) continue; @@ -106,7 +108,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, lockdep_is_held(&mvm->mutex))) return sta_id; } - return IWL_MVM_STATION_COUNT; + return IWL_MVM_INVALID_STA; } /* send station add/update command to firmware */ @@ -126,12 +128,21 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 status; u32 agg_size = 0, mpdu_dens = 0; + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + add_sta_cmd.station_type = mvm_sta->sta_type; + if (!update || (flags & STA_MODIFY_QUEUES)) { - add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); - if (flags & STA_MODIFY_QUEUES) - add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; + if (!iwl_mvm_has_new_tx_api(mvm)) { + add_sta_cmd.tfd_queue_msk = + cpu_to_le32(mvm_sta->tfd_queue_msk); + + if (flags & STA_MODIFY_QUEUES) + add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; + } else { + WARN_ON(flags & STA_MODIFY_QUEUES); + } } switch (sta->bandwidth) { @@ -209,13 +220,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK); + add_sta_cmd.uapsd_acs |= BIT(AC_BK); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE); + add_sta_cmd.uapsd_acs |= BIT(AC_BE); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI); + add_sta_cmd.uapsd_acs |= BIT(AC_VI); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO); + add_sta_cmd.uapsd_acs |= BIT(AC_VO); + add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; + add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; @@ -337,6 +350,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, u8 sta_id; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; spin_unlock_bh(&mvm->queue_info_lock); @@ -387,6 +403,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -426,6 +445,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -447,7 +469,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); - mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -468,6 +490,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); txq_curr_ac = mvm->queue_info[queue].mac80211_ac; sta_id = mvm->queue_info[queue].ra_sta_id; @@ -475,6 +500,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, spin_unlock_bh(&mvm->queue_info_lock); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + if (WARN_ON(!mvmsta)) + return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ @@ -512,6 +539,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, int i; lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); @@ -596,6 +625,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, unsigned long mq; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed @@ -617,7 +649,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; - mq = mvm->queue_info[queue].hw_queue_to_mac80211; + mq = mvm->hw_queue_to_mac80211[queue]; shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); spin_unlock_bh(&mvm->queue_info_lock); @@ -626,7 +658,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, /* Stop MAC queues and wait for this queue to empty */ iwl_mvm_stop_mac_queues(mvm, mq); - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); @@ -677,6 +709,37 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, return ret; } +static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 ac, + int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + u8 mac_queue = mvmsta->vif->hw_queue[ac]; + int queue = -1; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_TX_QUEUES(mvm, + "Allocating queue for sta %d on tid %d\n", + mvmsta->sta_id, tid); + queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, + wdg_timeout); + if (queue < 0) + return queue; + + IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); + + spin_lock_bh(&mvmsta->lock); + mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tid_data[tid].is_tid_active = true; + mvmsta->tfd_queue_msk |= BIT(queue); + spin_unlock_bh(&mvmsta->lock); + + return 0; +} + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_hdr *hdr) @@ -702,6 +765,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); + spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; spin_unlock_bh(&mvmsta->lock); @@ -880,6 +946,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + spin_lock_bh(&mvm->queue_info_lock); tid_bitmap = mvm->queue_info[queue].tid_bitmap; spin_unlock_bh(&mvm->queue_info_lock); @@ -917,6 +986,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) int ssn; int ret = true; + /* queue sharing is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->queue_info_lock); @@ -1014,7 +1087,7 @@ static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, ac = iwl_mvm_tid_to_ac_queue(tid); mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; - if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && + if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { IWL_ERR(mvm, "Can't alloc TXQ for sta %d tid %d - dropping frame\n", @@ -1059,8 +1132,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) mutex_lock(&mvm->mutex); + /* No queue reconfiguration in TVQM mode */ + if (iwl_mvm_has_new_tx_api(mvm)) + goto alloc_queues; + /* Reconfigure queues requiring reconfiguation */ - for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) { + for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { bool reconfig; bool change_owner; @@ -1088,6 +1165,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) iwl_mvm_change_queue_owner(mvm, queue); } +alloc_queues: /* Go over all stations with deferred traffic */ for_each_set_bit(sta_id, mvm->sta_deferred_frames, IWL_MVM_STATION_COUNT) { @@ -1116,6 +1194,10 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, int queue; bool using_inactive_queue = false, same_sta = false; + /* queue reserving is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return 0; + /* * Check for inactive queues, so we don't reach a situation where we * can't add a STA due to a shortage in queues that doesn't really exist @@ -1191,7 +1273,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, int ac; u8 mac_queue; - if (txq_id == IEEE80211_INVAL_HW_QUEUE) + if (txq_id == IWL_MVM_INVALID_QUEUE) continue; skb_queue_head_init(&tid_data->deferred_tx_frames); @@ -1199,20 +1281,31 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, ac = tid_to_mac80211_ac[i]; mac_queue = mvm_sta->vif->hw_queue[ac]; - cfg.tid = i; - cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; - cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || - txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); + if (iwl_mvm_has_new_tx_api(mvm)) { + IWL_DEBUG_TX_QUEUES(mvm, + "Re-mapping sta %d tid %d\n", + mvm_sta->sta_id, i); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, + mvm_sta->sta_id, + i, wdg_timeout); + tid_data->txq_id = txq_id; + } else { + u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); - IWL_DEBUG_TX_QUEUES(mvm, - "Re-mapping sta %d tid %d to queue %d\n", - mvm_sta->sta_id, i, txq_id); + cfg.tid = i; + cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || + txq_id == + IWL_MVM_DQA_BSS_CLIENT_QUEUE); - iwl_mvm_enable_txq(mvm, txq_id, mac_queue, - IEEE80211_SEQ_TO_SN(tid_data->seq_number), - &cfg, wdg_timeout); + IWL_DEBUG_TX_QUEUES(mvm, + "Re-mapping sta %d tid %d to queue %d\n", + mvm_sta->sta_id, i, txq_id); - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; + iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, + wdg_timeout); + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; + } } atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); @@ -1235,7 +1328,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, else sta_id = mvm_sta->sta_id; - if (sta_id == IWL_MVM_STATION_COUNT) + if (sta_id == IWL_MVM_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); @@ -1254,6 +1347,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; + mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ atomic_set(&mvm->pending_frames[sta_id], 0); @@ -1287,7 +1381,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ - mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } mvm_sta->deferred_traffic_tid_map = 0; @@ -1303,7 +1397,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) @@ -1317,10 +1411,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); + WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); mvmvif->ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); + WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); } } @@ -1486,13 +1580,13 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { - if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = iwl_mvm_tid_to_ac_queue(i); iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, vif->hw_queue[ac], i, 0); - mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } } @@ -1520,8 +1614,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); if (ret) return ret; - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, - mvm_sta->tfd_queue_msk); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + mvm_sta->tfd_queue_msk); if (ret) return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); @@ -1571,11 +1665,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; /* clear d0i3_ap_sta_id if no longer relevant */ if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } } @@ -1584,7 +1678,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } @@ -1637,27 +1731,28 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) + u32 qmask, enum nl80211_iftype iftype, + enum iwl_sta_type type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } sta->tfd_queue_msk = qmask; + sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } -static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta) +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); - sta->sta_id = IWL_MVM_STATION_COUNT; + sta->sta_id = IWL_MVM_INVALID_STA; } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, @@ -1675,8 +1770,11 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, cmd.sta_id = sta->sta_id; cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + cmd.station_type = sta->type; - cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) @@ -1701,27 +1799,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; - int ret; - - lockdep_assert_held(&mvm->mutex); - - /* Map Aux queue to fifo - needs to happen before adding Aux station */ - if (!iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - - /* Allocate aux station and assign to it the aux queue */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), - NL80211_IFTYPE_UNSPECIFIED); - if (ret) - return ret; - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + mvm->aux_queue = queue; + } else if (iwl_mvm_is_dqa_supported(mvm)) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvm->aux_sta.sta_id, @@ -1732,14 +1822,44 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, wdg_timeout); + } else { + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); } +} - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, - MAC_INDEX_AUX, 0); +int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + /* Allocate aux station and assign to it the aux queue */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), + NL80211_IFTYPE_UNSPECIFIED, + IWL_STA_AUX_ACTIVITY); if (ret) + return ret; + + /* Map Aux queue to fifo - needs to happen before adding Aux station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_queue(mvm); + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, + MAC_INDEX_AUX, 0); + if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); - return ret; + return ret; + } + + /* + * For a000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_queue(mvm); + + return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1790,39 +1910,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; + int queue; int ret; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_VO, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm)) { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_VO, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - int queue; - + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) - queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + queue = mvm->probe_queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + queue = mvm->p2p_dev_queue; else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) return -EINVAL; - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, - wdg_timeout); bsta->tfd_queue_msk |= BIT(queue); + + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, + &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; - if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, @@ -1831,27 +1951,21 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; /* - * In AP vif type, we also need to enable the cab_queue. However, we - * have to enable it after the ADD_STA command is sent, otherwise the - * FW will throw an assert once we send the ADD_STA command (it'll - * detect a mismatch in the tfd_queue_msk, as we can't add the - * enabled-cab_queue to the mask) + * For a000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added */ - if (iwl_mvm_is_dqa_supported(mvm) && - (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC)) { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvmvif->bcast_sta.sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); + if (iwl_mvm_has_new_tx_api(mvm)) { + queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], + bsta->sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + + if (vif->type == NL80211_IFTYPE_AP) + mvm->probe_queue = queue; + else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + mvm->p2p_dev_queue = queue; - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, - 0, &cfg, wdg_timeout); + bsta->tfd_queue_msk |= BIT(queue); } return 0; @@ -1869,24 +1983,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); - if (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) { - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE, + if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) { + iwl_mvm_disable_txq(mvm, mvm->probe_queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); - mvmvif->bcast_sta.tfd_queue_msk &= - ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue); } - if (mvmvif->bcast_sta.tfd_queue_msk & - BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) { - iwl_mvm_disable_txq(mvm, - IWL_MVM_DQA_P2P_DEVICE_QUEUE, + if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) { + iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); - mvmvif->bcast_sta.tfd_queue_msk &= - ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue); } } @@ -1928,7 +2036,8 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, - ieee80211_vif_type_p2p(vif)); + ieee80211_vif_type_p2p(vif), + IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, @@ -1982,6 +2091,101 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } +/* + * Allocate a new station entry for the multicast station to the given vif, + * and send it to the FW. + * Note that each AP/GO mac should have its own multicast station. + * + * @mvm: the mvm component + * @vif: the interface to which the multicast station is added + */ +int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; + static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; + const u8 *maddr = _maddr; + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = msta->sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_is_dqa_supported(mvm)) + return 0; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) + return -ENOTSUPP; + + /* + * While in previous FWs we had to exclude cab queue from TFD queue + * mask, now it is needed as any other queue. + */ + if (!iwl_mvm_has_new_tx_api(mvm) && + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, + &cfg, timeout); + msta->tfd_queue_msk |= BIT(vif->cab_queue); + } + ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, + mvmvif->id, mvmvif->color); + if (ret) { + iwl_mvm_dealloc_int_sta(mvm, msta); + return ret; + } + + /* + * Enable cab queue after the ADD_STA command is sent. + * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG + * command with unknown station id, and for FW that doesn't support + * station API since the cab queue is not included in the + * tfd_queue_mask. + */ + if (iwl_mvm_has_new_tx_api(mvm)) { + int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, + msta->sta_id, + IWL_MAX_TID_COUNT, + timeout); + mvmvif->cab_queue = queue; + } else if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_STA_TYPE)) { + iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, + &cfg, timeout); + } + + return 0; +} + +/* + * Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (!iwl_mvm_is_dqa_supported(mvm)) + return 0; + + iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, + IWL_MAX_TID_COUNT, 0); + + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + + return ret; +} + #define IWL_MAX_RX_BA_SESSIONS 16 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) @@ -2059,6 +2263,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->sta_id = sta_id; + reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&reorder_buf->entries[j]); } @@ -2226,7 +2431,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; - cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.modify_mask = STA_MODIFY_QUEUES; + cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); @@ -2310,10 +2517,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * one and mark it as reserved * 3. In DQA mode, but no traffic yet on this TID: same treatment as in * non-DQA mode, since the TXQ hasn't yet been allocated + * Don't support case 3 for new TX path as it is not expected to happen + * and aggregation will be offloaded soon anyway */ txq_id = mvmsta->tid_data[tid].txq_id; - if (iwl_mvm_is_dqa_supported(mvm) && - unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { + if (iwl_mvm_has_new_tx_api(mvm)) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { + ret = -ENXIO; + goto release_locks; + } + } else if (iwl_mvm_is_dqa_supported(mvm) && + unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", @@ -2409,6 +2624,20 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); + if (iwl_mvm_has_new_tx_api(mvm)) { + /* + * If no queue iwl_mvm_sta_tx_agg_start() would have failed so + * no need to check queue's status + */ + if (buf_size < mvmsta->max_agg_bufsize) + return -ENOTSUPP; + + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + return -EIO; + goto out; + } + cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; spin_lock_bh(&mvm->queue_info_lock); @@ -2430,8 +2659,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * If reconfiguring an existing queue, it first must be * drained */ - ret = iwl_trans_wait_tx_queue_empty(mvm->trans, - BIT(queue)); + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); @@ -2466,6 +2695,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); +out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, @@ -2483,6 +2713,27 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); } +static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + u16 txq_id) +{ + if (iwl_mvm_has_new_tx_api(mvm)) + return; + + spin_lock_bh(&mvm->queue_info_lock); + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + + spin_unlock_bh(&mvm->queue_info_lock); +} + int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { @@ -2509,18 +2760,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - spin_lock_bh(&mvm->queue_info_lock); - /* - * The TXQ is marked as reserved only if no traffic came through yet - * This means no traffic has been sent on this TID (agg'd or not), so - * we no longer have use for the queue. Since it hasn't even been - * allocated through iwl_mvm_enable_txq, so we can just mark it back as - * free. - */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; - - spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); switch (tid_data->state) { case IWL_AGG_ON: @@ -2600,24 +2840,14 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - spin_lock_bh(&mvm->queue_info_lock); - /* - * The TXQ is marked as reserved only if no traffic came through yet - * This means no traffic has been sent on this TID (agg'd or not), so - * we no longer have use for the queue. Since it hasn't even been - * allocated through iwl_mvm_enable_txq, so we can just mark it back as - * free. - */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; - spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); - iwl_trans_wait_tx_queue_empty(mvm->trans, - mvmsta->tfd_queue_msk); + iwl_trans_wait_tx_queues_empty(mvm->trans, + mvmsta->tfd_queue_msk); iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); @@ -2675,7 +2905,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], @@ -2697,68 +2927,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, - struct ieee80211_key_conf *keyconf, bool mcast, + struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset) { - struct iwl_mvm_add_sta_key_cmd cmd = {}; + union { + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; + struct iwl_mvm_add_sta_key_cmd cmd; + } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; - int i; - u8 sta_id = mvm_sta->sta_id; + u64 pn = 0; + int i, size; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); - keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); - switch (keyconf->cipher) { + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); - cmd.tkip_rx_tsc_byte2 = tkip_iv32; - for (i = 0; i < 5; i++) - cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + if (new_api) { + memcpy((void *)&u.cmd.tx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + memcpy((void *)&u.cmd.rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + pn = atomic64_read(&key->tx_pn); + + } else { + u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + u.cmd_v1.tkip_rx_ttak[i] = + cpu_to_le16(tkip_p1k[i]); + } + memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); + if (new_api) + pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); /* fall through */ case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); - memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); /* fall through */ case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); + if (new_api) + pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); - memcpy(cmd.key, keyconf->key, keyconf->keylen); + memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_offset = key_offset; - cmd.key_flags = key_flags; - cmd.sta_id = sta_id; + u.cmd.common.key_offset = key_offset; + u.cmd.common.key_flags = key_flags; + u.cmd.common.sta_id = mvm_sta->sta_id; + + if (new_api) { + u.cmd.transmit_seq_cnt = cpu_to_le64(pn); + size = sizeof(u.cmd); + } else { + size = sizeof(u.cmd_v1); + } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, + &u.cmd); else - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, + &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: @@ -2858,7 +3117,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { + mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -2911,9 +3170,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { - struct iwl_mvm_add_sta_key_cmd cmd = {}; + union { + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; + struct iwl_mvm_add_sta_key_cmd cmd; + } u = {}; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; - int ret; + int ret, size; u32 status; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -2924,13 +3188,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - cmd.key_flags = key_flags; - cmd.key_offset = keyconf->hw_key_idx; - cmd.sta_id = sta_id; + /* + * The fields assigned here are in the same location at the start + * of the command, so we can do this union trick. + */ + u.cmd.common.key_flags = key_flags; + u.cmd.common.key_offset = keyconf->hw_key_idx; + u.cmd.common.sta_id = sta_id; + + size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), - &cmd, &status); + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, + &status); switch (status) { case ADD_STA_SUCCESS: @@ -3044,7 +3314,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_STATION_COUNT; + u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); @@ -3207,13 +3477,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); + cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); + cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { - cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); + cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ @@ -3290,6 +3560,27 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, spin_unlock_bh(&mvm_sta->lock); } +static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + struct iwl_mvm_int_sta *sta, + bool disable) +{ + u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); + struct iwl_mvm_add_sta_cmd cmd = { + .add_modify = STA_MODE_MODIFY, + .sta_id = sta->sta_id, + .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, + .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), + .mac_id_n_color = cpu_to_le32(id), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); +} + void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) @@ -3301,7 +3592,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* Block/unblock all the stations of the given mvmvif */ - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) @@ -3314,6 +3605,22 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + return; + + /* Need to block/unblock also multicast station */ + if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) + iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, + &mvmvif->mcast_sta, disable); + + /* + * Only unblock the broadcast station (FW blocks it for immediate + * quiet, not the driver) + */ + if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) + iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, + &mvmvif->bcast_sta, disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1927ce6077984fff4cde0f767ee97ea42de49761..2716cb5483bf5ab9851e7dbf1e5af04b887296e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -380,6 +380,7 @@ struct iwl_mvm_rxq_dup_data { * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station + * @sta_type: station type * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP @@ -416,6 +417,7 @@ struct iwl_mvm_sta { u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; + enum iwl_sta_type sta_type; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; @@ -453,10 +455,12 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or * broadcast) * @sta_id: the index of the station in the fw (will be replaced by id_n_color) + * @type: station type * @tfd_queue_msk: the tfd queues used by the station */ struct iwl_mvm_int_sta { u32 sta_id; + enum iwl_sta_type type; u32 tfd_queue_msk; }; @@ -532,10 +536,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype); + u32 qmask, enum nl80211_iftype iftype, + enum iwl_sta_type type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 9f160fc58cd079395cafeff5995574d22225a161..df7cd87199ea298565729a79c561117cbcc5edc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) @@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) @@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) /* populate TDLS peer data */ cnt = 0; - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta) || !sta->tdls) @@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); if (state == IWL_MVM_TDLS_SW_IDLE) - mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) @@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); @@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) goto out; sta = rcu_dereference_protected( @@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { + if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); @@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { + if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } @@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; - mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; + mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; @@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { + mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index a1947d6f3a2cc1e676663885d65bde5b31c11773..16ce8a56b5b94d9b5356011a98e19888be8faef9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm) if (IWL_MVM_TOF_IS_RESPONDER) { tof_data->responder_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); - tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; + tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index bec7d9c46087d3c8fed48d5858f4f116797eceed..f9cbd197246f7ba6ba9e5e0af816cbec54eb9790 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) struct iwl_mvm_sta *mvmsta; int i, err; - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); if (!mvmsta) continue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 1ba0a6f55503665d14b1c1e9a129e37503b9f33f..bcaceb64a6e8c230c5127ee5cc4b790b1f4f1d57 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = TX_CMD; + + if (iwl_mvm_has_new_tx_api(mvm)) { + struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info); + + /* padding is inserted later in transport */ + /* FIXME - check for AMSDU may need to be removed */ + if (ieee80211_hdrlen(hdr->frame_control) % 4 && + !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) + offload_assist |= BIT(TX_CMD_OFFLD_PAD); + + cmd->offload_assist |= cpu_to_le16(offload_assist); + + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); + + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + + if (!info->control.hw_key) + cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS); + + /* For data packets rate info comes from the fw */ + if (ieee80211_is_data(hdr->frame_control) && sta) + goto out; + + cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE); + cmd->rate_n_flags = + cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); + + goto out; + } + tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) @@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdrlen); + +out: return dev_cmd; } @@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, */ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || ieee80211_is_deauth(fc)) - return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + return mvm->probe_queue; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); - return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + return mvm->probe_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) - return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + return mvm->p2p_dev_queue; if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; WARN_ON_ONCE(1); - return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; @@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info info; struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); int queue; @@ -594,11 +630,13 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (queue < 0) return -1; + if (queue == info.control.vif->cab_queue) + queue = mvmvif->cab_queue; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); - if (ap_sta_id != IWL_MVM_STATION_COUNT) + if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; } else if (iwl_mvm_is_dqa_supported(mvm) && info.control.vif->type == NL80211_IFTYPE_STATION && @@ -616,11 +654,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; @@ -713,7 +746,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * fifo to be able to send bursts. */ max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->shared_mem_cfg.txfifo_size[txf] - 256); + mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, @@ -862,6 +895,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) unsigned long now = jiffies; int tid; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) @@ -881,11 +917,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; - u8 txq_id = info->hw_queue; + u16 txq_id = info->hw_queue; bool is_ampdu = false; int hdrlen; @@ -896,7 +931,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, @@ -904,8 +939,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (!dev_cmd) goto drop; - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; - /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses @@ -926,18 +959,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) goto drop_unlock_sta; - seq_number = mvmsta->tid_data[tid].seq_number; - seq_number &= IEEE80211_SCTL_SEQ; - hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(seq_number); is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ON_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON)) goto drop_unlock_sta; + + seq_number = mvmsta->tid_data[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + + if (!iwl_mvm_has_new_tx_api(mvm)) { + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + /* update the tx_cmd hdr as it was already copied */ + tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; + } } if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) txq_id = mvmsta->tid_data[tid].txq_id; + if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { /* default to TID 0 for non-QoS packets */ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; @@ -945,17 +987,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; } - /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, hdrlen); - WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || + if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || !mvmsta->tid_data[tid].is_tid_active) && iwl_mvm_is_dqa_supported(mvm)) { /* If TXQ needs to be allocated... */ - if (txq_id == IEEE80211_INVAL_HW_QUEUE) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); /* @@ -967,6 +1006,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, return 0; } + /* queue should always be active in new TX path */ + WARN_ON(iwl_mvm_has_new_tx_api(mvm)); + /* If we are here - TXQ exists and needs to be re-activated */ spin_lock(&mvm->queue_info_lock); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; @@ -977,7 +1019,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - if (iwl_mvm_is_dqa_supported(mvm)) { + if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; @@ -1036,7 +1078,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); @@ -1245,6 +1287,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, } } +/** + * iwl_mvm_get_scd_ssn - returns the SSN of the SCD + * @tx_resp: the Tx response from the fw (agg or non-agg) + * + * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since + * it can't know that everything will go well until the end of the AMPDU, it + * can't know in advance the number of MPDUs that will be sent in the current + * batch. This is why it writes the agg Tx response while it fetches the MPDUs. + * Hence, it can't know in advance what the SSN of the SCD will be at the end + * of the batch. This is why the SSN of the SCD is written at the end of the + * whole struct at a variable offset. This function knows how to cope with the + * variable offset and returns the SSN of the SCD. + */ +static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, + struct iwl_mvm_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + + tx_resp->frame_count) & 0xfff; +} + static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -1254,8 +1316,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); - u32 status = le16_to_cpu(tx_resp->status.status); - u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); + struct agg_tx_status *agg_status = + iwl_mvm_get_agg_status(mvm, tx_resp); + u32 status = le16_to_cpu(agg_status->status); + u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct iwl_mvm_sta *mvmsta; struct sk_buff_head skbs; u8 skb_freed = 0; @@ -1264,6 +1328,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, __skb_queue_head_init(&skbs); + if (iwl_mvm_has_new_tx_api(mvm)) + txq_id = le16_to_cpu(tx_resp->v6.tx_queue); + seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ @@ -1388,7 +1455,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, if (!IS_ERR(sta)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (tid != IWL_TID_NON_QOS) { + if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; @@ -1520,7 +1587,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; - struct agg_tx_status *frame_status = &tx_resp->status; + struct agg_tx_status *frame_status = + iwl_mvm_get_agg_status(mvm, tx_resp); int i; for (i = 0; i < tx_resp->frame_count; i++) { @@ -1722,6 +1790,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; + if (!le16_to_cpu(ba_res->tfd_cnt)) + goto out; + /* * TODO: * When supporting multi TID aggregations - we need to move @@ -1730,12 +1801,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * This will go together with SN and AddBA offload and cannot * be handled properly for now. */ - WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1); - iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid, - (int)ba_res->tfd[0].q_num, + WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1); + tid = ba_res->ra_tid[0].tid; + if (tid == IWL_MGMT_TID) + tid = IWL_MAX_TID_COUNT; + iwl_mvm_tx_reclaim(mvm, sta_id, tid, + (int)(le16_to_cpu(ba_res->tfd[0].q_num)), le16_to_cpu(ba_res->tfd[0].tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate)); +out: IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index dedea96a8e0ff7cd3c609d9e5a45976705b54451..8f4f176e204e588567f7fc8552f9982cc65c80b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -591,6 +592,10 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) lockdep_assert_held(&mvm->queue_info_lock); + /* This should not be hit with new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -ENOSPC; + /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].hw_queue_refcount == 0 && @@ -627,6 +632,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, }; int ret; + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + spin_lock_bh(&mvm->queue_info_lock); if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, "Trying to reconfig unallocated queue %d\n", queue)) { @@ -644,50 +652,94 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return ret; } -void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 sta_id, u8 tid) { bool enable_queue = true; spin_lock_bh(&mvm->queue_info_lock); /* Make sure this TID isn't already enabled */ - if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", - queue, cfg->tid); - return; + queue, tid); + return false; } /* Update mappings and refcounts */ if (mvm->queue_info[queue].hw_queue_refcount > 0) enable_queue = false; - mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + mvm->queue_info[queue].hw_queue_refcount++; - mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); - mvm->queue_info[queue].ra_sta_id = cfg->sta_id; + mvm->queue_info[queue].tid_bitmap |= BIT(tid); + mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { - if (cfg->tid != IWL_MAX_TID_COUNT) + if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = - tid_to_mac80211_ac[cfg->tid]; + tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; - mvm->queue_info[queue].txq_tid = cfg->tid; + mvm->queue_info[queue].txq_tid = tid; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); + mvm->hw_queue_to_mac80211[queue]); spin_unlock_bh(&mvm->queue_info_lock); + return enable_queue; +} + +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout) +{ + struct iwl_tx_queue_cfg_cmd cmd = { + .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), + .sta_id = sta_id, + .tid = tid, + }; + int queue; + + if (cmd.tid == IWL_MAX_TID_COUNT) + cmd.tid = IWL_MGMT_TID; + queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, + SCD_QUEUE_CFG, timeout); + + if (queue < 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "Failed allocating TXQ for sta %d tid %d, ret: %d\n", + sta_id, tid, queue); + return queue; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", + queue, sta_id, tid); + + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d (mac80211 map:0x%x)\n", + queue, mvm->hw_queue_to_mac80211[queue]); + + return queue; +} + +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, + u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + /* Send the enabling command if we need to */ - if (enable_queue) { + if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, + cfg->sta_id, cfg->tid)) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, @@ -701,7 +753,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); @@ -718,6 +771,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, bool remove_mac_queue = true; int ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_free(mvm->trans, queue); + + return 0; + } + spin_lock_bh(&mvm->queue_info_lock); if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { @@ -744,7 +807,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } if (remove_mac_queue) - mvm->queue_info[queue].hw_queue_to_mac80211 &= + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); mvm->queue_info[queue].hw_queue_refcount--; @@ -757,7 +820,7 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211); + mvm->hw_queue_to_mac80211[queue]); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) { @@ -771,16 +834,16 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].hw_queue_refcount || mvm->queue_info[queue].tid_bitmap || - mvm->queue_info[queue].hw_queue_to_mac80211, + mvm->hw_queue_to_mac80211[queue], "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->queue_info[queue].hw_queue_to_mac80211, + mvm->hw_queue_to_mac80211[queue], mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].hw_queue_refcount = 0; mvm->queue_info[queue].tid_bitmap = 0; - mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + mvm->hw_queue_to_mac80211[queue] = 0; /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; @@ -789,11 +852,11 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(cmd), &cmd); + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); + if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); - return ret; } @@ -816,7 +879,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) .data = { lq, }, }; - if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) + if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); @@ -1006,6 +1069,35 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) return bss_iter_data.vif; } +struct iwl_sta_iter_data { + bool assoc; +}; + +static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_sta_iter_data *data = _data; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (vif->bss_conf.assoc) + data->assoc = true; +} + +bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) +{ + struct iwl_sta_iter_data data = { + .assoc = false, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_sta_iface_iterator, + &data); + return data.assoc; +} + unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q) @@ -1088,6 +1180,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ @@ -1114,8 +1209,8 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; - mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; - mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue); + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); mvm->queue_info[queue].hw_queue_refcount--; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); mvmsta->tid_data[tid].is_tid_active = false; @@ -1135,7 +1230,7 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->queue_info[queue].hw_queue_to_mac80211 |= + mvm->hw_queue_to_mac80211[queue] |= BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); } @@ -1154,6 +1249,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) unsigned long now = jiffies; int i; + if (iwl_mvm_has_new_tx_api(mvm)) + return; + spin_lock_bh(&mvm->queue_info_lock); for (i = 0; i < IWL_MAX_HW_QUEUES; i++) if (mvm->queue_info[i].hw_queue_refcount > 0) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c new file mode 100644 index 0000000000000000000000000000000000000000..b1f43397bb59a72dbefd7fc1aab72d8a6de3d7b4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -0,0 +1,281 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info.h" +#include "internal.h" +#include "iwl-prph.h" + +static int iwl_pcie_get_num_sections(const struct fw_img *fw, + int start) +{ + int i = 0; + + while (start < fw->num_sec && + fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && + fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { + start++; + i++; + } + + return i; +} + +static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, + const struct fw_desc *sec, + struct iwl_dram_data *dram) +{ + dram->block = dma_alloc_coherent(trans->dev, sec->len, + &dram->physical, + GFP_KERNEL); + if (!dram->block) + return -ENOMEM; + + dram->size = sec->len; + memcpy(dram->block, sec->data, sec->len); + + return 0; +} + +static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->fw) { + WARN_ON(dram->fw_cnt); + return; + } + + for (i = 0; i < dram->fw_cnt; i++) + dma_free_coherent(trans->dev, dram->fw[i].size, + dram->fw[i].block, dram->fw[i].physical); + + kfree(dram->fw); + dram->fw_cnt = 0; +} + +void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->paging) { + WARN_ON(dram->paging_cnt); + return; + } + + /* free paging*/ + for (i = 0; i < dram->paging_cnt; i++) + dma_free_coherent(trans->dev, dram->paging[i].size, + dram->paging[i].block, + dram->paging[i].physical); + + kfree(dram->paging); + dram->paging_cnt = 0; +} + +static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info *ctxt_info) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram; + int i, ret, lmac_cnt, umac_cnt, paging_cnt; + + lmac_cnt = iwl_pcie_get_num_sections(fw, 0); + /* add 1 due to separator */ + umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); + /* add 2 due to separators */ + paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); + + dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); + if (!dram->fw) + return -ENOMEM; + dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); + if (!dram->paging) + return -ENOMEM; + + /* initialize lmac sections */ + for (i = 0; i < lmac_cnt; i++) { + ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->lmac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* initialize umac sections */ + for (i = 0; i < umac_cnt; i++) { + /* access FW with +1 to make up for lmac separator */ + ret = iwl_pcie_ctxt_info_alloc_dma(trans, + &fw->sec[dram->fw_cnt + 1], + &dram->fw[dram->fw_cnt]); + if (ret) + return ret; + ctxt_dram->umac_img[i] = + cpu_to_le64(dram->fw[dram->fw_cnt].physical); + dram->fw_cnt++; + } + + /* + * Initialize paging. + * Paging memory isn't stored in dram->fw as the umac and lmac - it is + * stored separately. + * This is since the timing of its release is different - + * while fw memory can be released on alive, the paging memory can be + * freed only when the device goes down. + * Given that, the logic here in accessing the fw image is a bit + * different - fw_cnt isn't changing so loop counter is added to it. + */ + for (i = 0; i < paging_cnt; i++) { + /* access FW with +2 to make up for lmac & umac separators */ + int fw_idx = dram->fw_cnt + i + 2; + + ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], + &dram->paging[i]); + if (ret) + return ret; + + ctxt_dram->virtual_img[i] = + cpu_to_le64(dram->paging[i].physical); + dram->paging_cnt++; + } + + return 0; +} + +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info *ctxt_info; + struct iwl_context_info_rbd_cfg *rx_cfg; + u32 control_flags = 0; + int ret; + + ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info) + return -ENOMEM; + + ctxt_info->version.version = 0; + ctxt_info->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + /* size is in DWs */ + ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); + + BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); + control_flags = IWL_CTXT_INFO_RB_SIZE_4K | + IWL_CTXT_INFO_TFD_FORMAT_LONG | + RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << + IWL_CTXT_INFO_RB_CB_SIZE_POS; + ctxt_info->control.control_flags = cpu_to_le32(control_flags); + + /* initialize RX default queue */ + rx_cfg = &ctxt_info->rbd_cfg; + rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); + rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); + rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + + /* initialize TX command queue */ + ctxt_info->hcmd_cfg.cmd_queue_addr = + cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + ctxt_info->hcmd_cfg.cmd_queue_size = + TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); + if (ret) { + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + ctxt_info, trans_pcie->ctxt_info_dma_addr); + return ret; + } + + trans_pcie->ctxt_info = ctxt_info; + + iwl_enable_interrupts(trans); + + /* Configure debug, if exists */ + if (trans->dbg_dest_tlv) + iwl_pcie_apply_destination(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); + iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); + + /* Context info will be released upon alive or failure to get one */ + + return 0; +} + +void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->ctxt_info) + return; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), + trans_pcie->ctxt_info, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info = NULL; + + iwl_pcie_ctxt_info_free_fw_img(trans); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index ba8a81cb0e2b7118ae761f9be68e4157477c26e5..e51760e752d48cf8e559bc918d71f928b46917b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, /* 9000 Series */ @@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, /* a000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)}, #endif /* CONFIG_IWLMVM */ {0} @@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = cfg_7265d; } - if (iwl_trans->cfg->rf_id) { - if (cfg == &iwl9460_2ac_cfg && - iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { - cfg = &iwl9000lc_2ac_cfg; - iwl_trans->cfg = cfg; - } - - if (cfg == &iwla000_2ac_cfg_hr && - iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) { - cfg = &iwla000_2ac_cfg_jf; - iwl_trans->cfg = cfg; - } + if (iwl_trans->cfg->rf_id && + (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) && + iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) { + cfg = &iwla000_2ac_cfg_jf; + iwl_trans->cfg = cfg; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 10937309641a5f097e5232acffca07665adf1316..fd4faaaa1484bbe3303f7be448ad63326c9cfb1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -205,11 +205,11 @@ struct iwl_cmd_meta { * into the buffer regardless of whether it should be mapped or not. * This indicates how big the first TB must be to include the scratch buffer * and the assigned PN. - * Since PN location is 16 bytes at offset 24, it's 40 now. + * Since PN location is 8 bytes at offset 12, it's 20 now. * If we make it bigger then allocations will be bigger and copy slower, so * that's probably not useful. */ -#define IWL_FIRST_TB_SIZE 40 +#define IWL_FIRST_TB_SIZE 20 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { @@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf { * @stuck_timer: timer that fires if queue gets stuck * @trans_pcie: pointer back to transport (for timer) * @need_update: indicates need to update read/write index - * @active: stores if queue is active * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires + * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) * @write_ptr: 1-st empty entry (index) host_w * @read_ptr: last used entry (index) host_r * @dma_addr: physical addr for BD's @@ -277,11 +277,11 @@ struct iwl_txq { struct iwl_trans_pcie *trans_pcie; bool need_update; bool frozen; - u8 active; bool ampdu; int block; unsigned long wd_timeout; struct sk_buff_head overflow_q; + struct iwl_dma_ptr bc_tbl; int write_ptr; int read_ptr; @@ -314,12 +314,44 @@ enum iwl_shared_irq_flags { IWL_SHARED_IRQ_FIRST_RSS = BIT(1), }; +/** + * struct iwl_dram_data + * @physical: page phy pointer + * @block: pointer to the allocated block/page + * @size: size of the block/page + */ +struct iwl_dram_data { + dma_addr_t physical; + void *block; + int size; +}; + +/** + * struct iwl_self_init_dram - dram data used by self init process + * @fw: lmac and umac dram data + * @fw_cnt: total number of items in array + * @paging: paging dram data + * @paging_cnt: total number of items in array + */ +struct iwl_self_init_dram { + struct iwl_dram_data *fw; + int fw_cnt; + struct iwl_dram_data *paging; + int paging_cnt; +}; + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing + * @ctxt_info: context information for FW self init + * @ctxt_info_dma_addr: dma addr of context information + * @init_dram: DRAM data of firmware image (including paging). + * Context information addresses will be taken from here. + * This is driver's local copy for keeping track of size and + * count for allocating and freeing the memory. * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler @@ -357,6 +389,9 @@ struct iwl_trans_pcie { struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; + struct iwl_context_info *ctxt_info; + dma_addr_t ctxt_info_dma_addr; + struct iwl_self_init_dram init_dram; struct iwl_trans *trans; struct net_device napi_dev; @@ -378,9 +413,10 @@ struct iwl_trans_pcie { struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; - struct iwl_txq *txq; - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + struct iwl_txq *txq_memory; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; /* PCI bus related data */ struct pci_dev *pci_dev; @@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); @@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); * TX / HCMD ******************************************************/ int iwl_pcie_tx_init(struct iwl_trans *trans); +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); @@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); -dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq); void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, @@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } +static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, int idx) +{ + return txq->tfds + trans_pcie->tfd_size * idx; +} + static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -719,4 +762,41 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); +/* common functions that are used by gen2 transport */ +void iwl_pcie_apm_config(struct iwl_trans *trans); +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); +bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); +int iwl_queue_space(const struct iwl_txq *q); +int iwl_pcie_apm_stop_master(struct iwl_trans *trans); +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); +int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); +int iwl_pcie_txq_alloc(struct iwl_trans *trans, + struct iwl_txq *txq, int slots_num, bool cmd_queue); +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size); +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); +void iwl_pcie_apply_destination(struct iwl_trans *trans); + +/* transport gen 2 exported functions */ +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill); +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int timeout); +void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); +int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id); +int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, + bool low_power); +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); +void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); +void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); +void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index de94dfdf2ec9972ee7c78e695b26f04f3a022154..1da2de205cdfef5fa6f5747e1e567f9af8aad7bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) return 0; } -int iwl_pcie_rx_init(struct iwl_trans *trans) +static int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; @@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + return 0; +} + +int iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = _iwl_pcie_rx_init(trans); + + if (ret) + return ret; + if (trans->cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else - iwl_pcie_rx_hw_init(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); - iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rxq_restock(trans, trans_pcie->rxq); - spin_lock(&def_rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); - spin_unlock(&def_rxq->lock); + spin_lock(&trans_pcie->rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); + spin_unlock(&trans_pcie->rxq->lock); return 0; } +int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) +{ + /* + * We don't configure the RFH. + * Restock will be done at alive, after firmware configured the RFH. + */ + return _iwl_pcie_rx_init(trans); +} + void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; @@ -1127,7 +1147,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); - if (reclaim) { + if (reclaim && !pkt->hdr.group_id) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { @@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) return; } - iwl_pcie_dump_csr(trans); - iwl_dump_fh(trans, NULL); - local_bh_disable(); /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ iwl_trans_fw_error(trans); local_bh_enable(); - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - del_timer(&trans_pcie->txq[i].stuck_timer); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + if (!trans_pcie->txq[i]) + continue; + del_timer(&trans_pcie->txq[i]->stuck_timer); + } clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); @@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; + if (trans->cfg->gen2) { + /* + * We can restock, since firmware configured + * the RFH + */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } } } @@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; + if (trans->cfg->gen2) { + /* We can restock, since firmware configured the RFH */ + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); + } } /* uCode wakes up after power-down sleep */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c new file mode 100644 index 0000000000000000000000000000000000000000..ac60a282d6debc1eb2b891ef0fb2b20bf1370df0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -0,0 +1,374 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-trans.h" +#include "iwl-context-info.h" +#include "internal.h" + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) +{ + int ret = 0; + + IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + iwl_pcie_apm_config(trans); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(trans, "Failed to init the card\n"); + return ret; + } + + set_bit(STATUS_DEVICE_ENABLED, &trans->status); + + return 0; +} + +static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) +{ + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); + + if (op_mode_leave) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + iwl_pcie_gen2_apm_init(trans); + + /* inform ME that we are leaving */ + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE | + CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + mdelay(5); + } + + clear_bit(STATUS_DEVICE_ENABLED, &trans->status); + + /* Stop device's DMA activity */ + iwl_pcie_apm_stop_master(trans); + + /* Reset the entire device */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(1000, 2000); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} + +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill, was_hw_rfkill; + + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + + was_hw_rfkill = iwl_is_rfkill_set(trans); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(trans); + + /* device going down, Stop using ICT table */ + iwl_pcie_disable_ict(trans); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + IWL_DEBUG_INFO(trans, + "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_gen2_tx_stop(trans); + iwl_pcie_rx_stop(trans); + } + + iwl_pcie_ctxt_info_free_paging(trans); + iwl_pcie_ctxt_info_free(trans); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_pcie_gen2_apm_stop(trans, false); + + /* stop and reset the on-board processor */ + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + usleep_range(1000, 2000); + + /* + * Upon stop, the IVAR table gets erased, so msi-x won't + * work. This causes a bug in RF-KILL flows, since the interrupt + * that enables radio won't fire on the correct irq, and the + * driver won't be able to handle the interrupt. + * Configure the IVAR table again after reset. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + + /* + * Upon stop, the APM issues an interrupt if HW RF kill is set. + * This is a bug in certain verions of the hardware. + * Certain devices also keep sending HW RF kill interrupt all + * the time, unless the interrupt is ACKed even if the interrupt + * should be masked. Re-ACK all the interrupts here. + */ + iwl_disable_interrupts(trans); + + /* clear all status bits */ + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + clear_bit(STATUS_INT_ENABLED, &trans->status); + clear_bit(STATUS_TPOWER_PMI, &trans->status); + clear_bit(STATUS_RFKILL, &trans->status); + + /* + * Even if we stop the HW, we still want the RF kill + * interrupt + */ + iwl_enable_rfkill_int(trans); + + /* + * Check again since the RF kill state may have changed while + * all the interrupts were disabled, in this case we couldn't + * receive the RF kill interrupt and update the state in the + * op_mode. + * Don't call the op_mode if the rkfill state hasn't changed. + * This allows the op_mode to call stop_device from the rfkill + * notification without endless recursion. Under very rare + * circumstances, we might have a small recursion if the rfkill + * state changed exactly now while we were called from stop_device. + * This is very unlikely but can happen and is supported. + */ + hw_rfkill = iwl_is_rfkill_set(trans); + if (hw_rfkill) + set_bit(STATUS_RFKILL, &trans->status); + else + clear_bit(STATUS_RFKILL, &trans->status); + if (hw_rfkill != was_hw_rfkill) + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + + /* re-take ownership to prevent other users from stealing the device */ + iwl_pcie_prepare_card_hw(trans); +} + +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_gen2_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + +static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* TODO: most of the logic can be removed in A0 - but not in Z0 */ + spin_lock(&trans_pcie->irq_lock); + iwl_pcie_gen2_apm_init(trans); + spin_unlock(&trans_pcie->irq_lock); + + iwl_op_mode_nic_config(trans->op_mode); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (iwl_pcie_gen2_rx_init(trans)) + return -ENOMEM; + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_pcie_gen2_tx_init(trans)) + return -ENOMEM; + + /* enable shadow regs in HW */ + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); + + return 0; +} + +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_pcie_reset_ict(trans); + + /* make sure all queue are not stopped/used */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* now that we got alive we can free the fw image & the context info. + * paging memory cannot be freed included since FW will still use it + */ + iwl_pcie_ctxt_info_free(trans); +} + +int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + bool hw_rfkill; + int ret; + + /* This may fail if AMT took ownership of the device */ + if (iwl_pcie_prepare_card_hw(trans)) { + IWL_WARN(trans, "Exit HW not ready\n"); + ret = -EIO; + goto out; + } + + iwl_enable_rfkill_int(trans); + + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + /* + * We enabled the RF-Kill interrupt and the handler may very + * well be running. Disable the interrupts to make sure no other + * interrupt can be fired. + */ + iwl_disable_interrupts(trans); + + /* Make sure it finished running */ + iwl_pcie_synchronize_irqs(trans); + + mutex_lock(&trans_pcie->mutex); + + /* If platform's RF_KILL switch is NOT set to KILL */ + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = -EIO; + goto out; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(trans, CSR_INT, 0xFFFFFFFF); + + ret = iwl_pcie_gen2_nic_init(trans); + if (ret) { + IWL_ERR(trans, "Unable to init nic\n"); + goto out; + } + + ret = iwl_pcie_ctxt_info_init(trans, fw); + if (ret) + goto out; + + /* re-check RF-Kill state since we may have missed the interrupt */ + hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + if (hw_rfkill && !run_in_rfkill) + ret = -ERFKILL; + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7f05fc56587add6336fc4b453b2282e6ff8c4730..70acf850a9f19f9750296ae2019d8f4a8c277b67 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -7,7 +7,7 @@ * * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 -static void iwl_pcie_apm_config(struct iwl_trans *trans) +void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; @@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); } -static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) +int iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret = 0; @@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) } /* Note: returns standard 0/-ERROR code */ -static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) +int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; @@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); } -static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans, - u32 dst_addr, dma_addr_t phy_addr, - u32 byte_cnt) -{ - /* Stop DMA channel */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0); - - /* Configure SRAM address */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR, - dst_addr); - - /* Configure DRAM address - 64 bit */ - iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr); - - /* Configure byte count to transfer */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt); - - /* Enable the DRAM2SRAM to start */ - iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP | - TFH_SRV_DMA_TO_DRIVER | - TFH_SRV_DMA_START); -} - static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) @@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, if (!iwl_trans_grab_nic_access(trans, &flags)) return -EIO; - if (trans->cfg->use_tfh) - iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr, - byte_cnt); - else - iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, - byte_cnt); + iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, + byte_cnt); iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, @@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, return ret; } -/* - * Driver Takes the ownership on secure machine before FW load - * and prevent race with the BT load. - * W/A for ROM bug. (should be remove in the next Si step) - */ -static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) -{ - u32 val, loop = 1000; - - /* - * Check the RSA semaphore is accessible. - * If the HW isn't locked and the rsa semaphore isn't accessible, - * we are in trouble. - */ - val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); - if (val & (BIT(1) | BIT(17))) { - IWL_DEBUG_INFO(trans, - "can't access the RSA semaphore it is write protected\n"); - return 0; - } - - /* take ownership on the AUX IF */ - iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); - iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); - - do { - iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); - val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); - if (val == 0x1) { - iwl_write_prph(trans, RSA_ENABLE, 0); - return 0; - } - - udelay(10); - loop--; - } while (loop > 0); - - IWL_ERR(trans, "Failed to take ownership on secure machine\n"); - return -EIO; -} - static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, const struct fw_img *image, int cpu, @@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, return ret; /* Notify ucode of loaded section number and status */ - if (trans->cfg->use_tfh) { - val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val); - } else { - val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); - } + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + sec_num = (sec_num << 1) | 0x1; } @@ -904,7 +831,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, return 0; } -static void iwl_pcie_apply_destination(struct iwl_trans *trans) +void iwl_pcie_apply_destination(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; @@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, if (trans->dbg_dest_tlv) iwl_pcie_apply_destination(trans); - /* TODO: remove in the next Si step */ - ret = iwl_pcie_rsa_race_bug_wa(trans); - if (ret) - return ret; + IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", + iwl_read_prph(trans, WFPM_GP2)); + + /* + * Set default value. On resume reading the values that were + * zeored can provide debug data on the resume flow. + * This is for debugging only and has no functional impact. + */ + iwl_write_prph(trans, WFPM_GP2, 0x01010101); /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ @@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } -static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) { bool hw_rfkill = iwl_is_rfkill_set(trans); @@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } -static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) +void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; @@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } -static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) +void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) lockdep_assert_held(&trans_pcie->mutex); - if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - _iwl_trans_pcie_stop_device(trans, true); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { + if (trans->cfg->gen2) + _iwl_trans_pcie_gen2_stop_device(trans, true); + else + _iwl_trans_pcie_stop_device(trans, true); + } } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, @@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, } } + IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", + iwl_read_prph(trans, WFPM_GP2)); + val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) *status = IWL_D3_STATUS_RESET; @@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); - iwl_pcie_tx_free(trans); + if (trans->cfg->gen2) + iwl_pcie_gen2_tx_free(trans); + else + iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); if (trans_pcie->msix_enabled) { @@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = &trans_pcie->txq[queue]; + struct iwl_txq *txq = trans_pcie->txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); @@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = &trans_pcie->txq[i]; + struct iwl_txq *txq = trans_pcie->txq[i]; if (i == trans_pcie->cmd_queue) continue; @@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 scd_sram_addr; - u8 buf[16]; - int cnt; + u32 txq_id = txq->id; + u32 status; + bool active; + u8 fifo; - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->read_ptr, txq->write_ptr); - - if (trans->cfg->use_tfh) + if (trans->cfg->use_tfh) { + IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, + txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ return; + } - scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->id); - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, - iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); - - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); - - if (cnt & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; + status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); + fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - cnt, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); - } + IWL_ERR(trans, + "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", + txq_id, active ? "" : "in", fifo, + jiffies_to_msecs(txq->wd_timeout), + txq->read_ptr, txq->write_ptr, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) @@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) continue; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); - txq = &trans_pcie->txq[cnt]; + txq = trans_pcie->txq[cnt]; wr_ptr = ACCESS_ONCE(txq->write_ptr); while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && @@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; - if (!trans_pcie->txq) + if (!trans_pcie->txq_memory) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); @@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, return -ENOMEM; for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - txq = &trans_pcie->txq[cnt]; + txq = trans_pcie->txq[cnt]; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", cnt, txq->read_ptr, txq->write_ptr, @@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs; @@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) } #endif /* CONFIG_PM_SLEEP */ +#define IWL_TRANS_COMMON_OPS \ + .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ + .write8 = iwl_trans_pcie_write8, \ + .write32 = iwl_trans_pcie_write32, \ + .read32 = iwl_trans_pcie_read32, \ + .read_prph = iwl_trans_pcie_read_prph, \ + .write_prph = iwl_trans_pcie_write_prph, \ + .read_mem = iwl_trans_pcie_read_mem, \ + .write_mem = iwl_trans_pcie_write_mem, \ + .configure = iwl_trans_pcie_configure, \ + .set_pmi = iwl_trans_pcie_set_pmi, \ + .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ + .release_nic_access = iwl_trans_pcie_release_nic_access, \ + .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ + .ref = iwl_trans_pcie_ref, \ + .unref = iwl_trans_pcie_unref, \ + .dump_data = iwl_trans_pcie_dump_data, \ + .wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \ + .d3_suspend = iwl_trans_pcie_d3_suspend, \ + .d3_resume = iwl_trans_pcie_d3_resume + +#ifdef CONFIG_PM_SLEEP +#define IWL_TRANS_PM_OPS \ + .suspend = iwl_trans_pcie_suspend, \ + .resume = iwl_trans_pcie_resume, +#else +#define IWL_TRANS_PM_OPS +#endif /* CONFIG_PM_SLEEP */ + static const struct iwl_trans_ops trans_ops_pcie = { + IWL_TRANS_COMMON_OPS, + IWL_TRANS_PM_OPS .start_hw = iwl_trans_pcie_start_hw, - .op_mode_leave = iwl_trans_pcie_op_mode_leave, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, - .d3_suspend = iwl_trans_pcie_d3_suspend, - .d3_resume = iwl_trans_pcie_d3_resume, - -#ifdef CONFIG_PM_SLEEP - .suspend = iwl_trans_pcie_suspend, - .resume = iwl_trans_pcie_resume, -#endif /* CONFIG_PM_SLEEP */ - .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, @@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = { .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, - .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table, - .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, - .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, +}; + +static const struct iwl_trans_ops trans_ops_pcie_gen2 = { + IWL_TRANS_COMMON_OPS, + IWL_TRANS_PM_OPS + .start_hw = iwl_trans_pcie_start_hw, + .fw_alive = iwl_trans_pcie_gen2_fw_alive, + .start_fw = iwl_trans_pcie_gen2_start_fw, + .stop_device = iwl_trans_pcie_gen2_stop_device, - .write8 = iwl_trans_pcie_write8, - .write32 = iwl_trans_pcie_write32, - .read32 = iwl_trans_pcie_read32, - .read_prph = iwl_trans_pcie_read_prph, - .write_prph = iwl_trans_pcie_write_prph, - .read_mem = iwl_trans_pcie_read_mem, - .write_mem = iwl_trans_pcie_write_mem, - .configure = iwl_trans_pcie_configure, - .set_pmi = iwl_trans_pcie_set_pmi, - .grab_nic_access = iwl_trans_pcie_grab_nic_access, - .release_nic_access = iwl_trans_pcie_release_nic_access, - .set_bits_mask = iwl_trans_pcie_set_bits_mask, - - .ref = iwl_trans_pcie_ref, - .unref = iwl_trans_pcie_unref, - - .dump_data = iwl_trans_pcie_dump_data, + .send_cmd = iwl_trans_pcie_gen2_send_hcmd, + + .tx = iwl_trans_pcie_gen2_tx, + .reclaim = iwl_trans_pcie_reclaim, + + .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, + .txq_free = iwl_trans_pcie_dyn_txq_free, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (ret) return ERR_PTR(ret); - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, cfg, &trans_ops_pcie, 0); + if (cfg->gen2) + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie_gen2); + else + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie); if (!trans) return ERR_PTR(-ENOMEM); @@ -3028,7 +2976,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - trans->dev = &pdev->dev; trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c new file mode 100644 index 0000000000000000000000000000000000000000..9fb46a6f47cf416e55d4416b228be50cdd460e4e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -0,0 +1,1018 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include + +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-io.h" +#include "internal.h" +#include "mvm/fw-api.h" + + /* + * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels + */ +void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { + if (!trans_pcie->txq[txq_id]) + continue; + iwl_pcie_gen2_txq_unmap(trans, txq_id); + } +} + +/* + * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + int write_ptr = txq->write_ptr; + u8 filled_tfd_size, num_fetch_chunks; + u16 len = byte_cnt; + __le16 bc_ent; + + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + scd_bc_tbl->tfd_offset[write_ptr] = bc_ent; +} + +/* + * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); +} + +static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + return le16_to_cpu(tfd->num_tbs) & 0x1f; +} + +static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); + + if (num_tbs >= trans_pcie->max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + return; + } + + /* first TB is never freed - it's the bidirectional DMA data */ + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + } + + tfd->num_tbs = 0; +} + +static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int rd_ptr = txq->read_ptr; + int idx = get_cmd_index(txq, rd_ptr); + + lockdep_assert_held(&txq->lock); + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); + + /* free SKB */ + if (txq->entries) { + struct sk_buff *skb; + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } + } +} + +static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd, dma_addr_t addr, + u16 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans_pcie->max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +static +struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfh_tfd *tfd = + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + dma_addr_t tb_phys; + int i, len, tb1_len, tb2_len, hdr_len; + void *tb1_addr; + + memset(tfd, 0, sizeof(*tfd)); + + tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + IWL_FIRST_TB_SIZE); + + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* there must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + + ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; + + tb1_len = ALIGN(len, 4); + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + + /* set up TFD's third entry to point to remainder of skb's head */ + hdr_len = ieee80211_hdrlen(hdr->frame_control); + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, + skb_frag_size(frag)); + + out_meta->tbs |= BIT(tb_idx); + } + + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, + skb->data + hdr_len, tb2_len); + trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len, + skb->len - hdr_len); + + return tfd; + +out_err: + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_cmd *dev_cmd, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + void *tfd; + + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && + __skb_linearize(skb)) + return -ENOMEM; + + spin_lock(&txq->lock); + + /* Set up driver data for this TFD */ + txq->entries[txq->write_ptr].skb = skb; + txq->entries[txq->write_ptr].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(txq->write_ptr))); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[txq->write_ptr].meta; + out_meta->flags = 0; + + tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); + if (!tfd) { + spin_unlock(&txq->lock); + return -1; + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_gen2_get_num_tbs(trans, tfd)); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr) { + if (txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); + iwl_trans_ref(trans); + } + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); + if (iwl_queue_space(txq) < txq->high_mark) + iwl_stop_queue(trans, txq); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a pointer to the ucode command structure + * + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the + * command queue. + */ +static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + unsigned long flags; + void *dup_buf = NULL; + dma_addr_t phys_addr; + int idx, i, cmd_pos; + u16 copy_size, cmd_size, tb0_size; + bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; + struct iwl_tfh_tfd *tfd = + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + + memset(tfd, 0, sizeof(*tfd)); + + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + cmddata[i] = cmd->data[i]; + cmdlen[i] = cmd->len[i]; + + if (!cmd->len[i]) + continue; + + /* need at least IWL_FIRST_TB_SIZE copied */ + if (copy_size < IWL_FIRST_TB_SIZE) { + int copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmdlen[i]) + copy = cmdlen[i]; + cmdlen[i] -= copy; + cmddata[i] += copy; + copy_size += copy; + } + + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { + idx = -EINVAL; + goto free_dup_buf; + } + } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { + /* + * This is also a chunk that isn't copied + * to the static buffer so set had_nocopy. + */ + had_nocopy = true; + + /* only allowed once */ + if (WARN_ON(dup_buf)) { + idx = -EINVAL; + goto free_dup_buf; + } + + dup_buf = kmemdup(cmddata[i], cmdlen[i], + GFP_ATOMIC); + if (!dup_buf) + return -ENOMEM; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) { + idx = -EINVAL; + goto free_dup_buf; + } + copy_size += cmdlen[i]; + } + cmd_size += cmd->len[i]; + } + + /* + * If any of the command structures end up being larger than the + * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into + * separate TFDs, then we will need to increase the size of the buffers + */ + if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, + "Command %s (%#x) is too large (%d bytes)\n", + iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { + idx = -EINVAL; + goto free_dup_buf; + } + + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_bh(&txq->lock); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); + idx = -ENOSPC; + goto free_dup_buf; + } + + idx = get_cmd_index(txq, txq->write_ptr); + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; + + /* re-initialize to NULL */ + memset(out_meta, 0, sizeof(*out_meta)); + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + + /* set up the header */ + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(txq->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + + /* and copy the data that needs to be copied */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + int copy; + + if (!cmd->len[i]) + continue; + + /* copy everything if not nocopy/dup */ + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) { + copy = cmd->len[i]; + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_FIRST_TB_SIZE copied + * in total (for bi-directional DMA), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_FIRST_TB_SIZE) { + copy = IWL_FIRST_TB_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; + } + } + + IWL_DEBUG_HC(trans, + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + iwl_get_cmd_string(trans, cmd->id), group_id, + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); + + /* start the TFD with the minimum copy bytes */ + tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); + iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), + tb0_size); + + /* map first command fragment, if any remains */ + if (copy_size > tb0_size) { + phys_addr = dma_map_single(trans->dev, + ((u8 *)&out_cmd->hdr) + tb0_size, + copy_size - tb0_size, + DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, + copy_size - tb0_size); + } + + /* map the remaining (adjusted) nocopy/dup fragments */ + for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { + const void *data = cmddata[i]; + + if (!cmdlen[i]) + continue; + if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP))) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) + data = dup_buf; + phys_addr = dma_map_single(trans->dev, (void *)data, + cmdlen[i], DMA_TO_DEVICE); + if (dma_mapping_error(trans->dev, phys_addr)) { + idx = -ENOMEM; + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + goto out; + } + iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); + } + + BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); + out_meta->flags = cmd->flags; + if (WARN_ON_ONCE(txq->entries[idx].free_buf)) + kzfree(txq->entries[idx].free_buf); + txq->entries[idx].free_buf = dup_buf; + + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (!(cmd->flags & CMD_SEND_IN_IDLE) && + !trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = true; + IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); + iwl_trans_ref(trans); + } + /* Increment and update queue's write index */ + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + +out: + spin_unlock_bh(&txq->lock); +free_dup_buf: + if (idx < 0) + kfree(dup_buf); + return idx; +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + int cmd_idx; + int ret; + + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); + + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", cmd_str)) + return -EIO; + + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); + + if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + pm_runtime_active(&trans_pcie->pci_dev->dev), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); + return -ETIMEDOUT; + } + } + + cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + cmd_str, ret); + return ret; + } + + ret = wait_event_timeout(trans_pcie->wait_command_queue, + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + txq->read_ptr, txq->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + cmd_str); + ret = -ETIMEDOUT; + + iwl_force_nmi(trans); + iwl_trans_fw_error(trans); + + goto cancel; + } + + if (test_bit(STATUS_FW_ERROR, &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + ret = -EIO; + goto cancel; + } + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); + ret = -ERFKILL; + goto cancel; + } + + if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { + IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + } + + if (cmd->resp_pkt) { + iwl_free_resp(cmd); + cmd->resp_pkt = NULL; + } + + return ret; +} + +int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", + cmd->id); + return -ERFKILL; + } + + if (cmd->flags & CMD_ASYNC) { + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + if (ret < 0) { + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_get_cmd_string(trans, cmd->id), ret); + return ret; + } + return 0; + } + + return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); +} + +/* + * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + iwl_pcie_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + + if (txq->read_ptr == txq->write_ptr) { + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + if (txq_id != trans_pcie->cmd_queue) { + IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", + txq->id); + iwl_trans_unref(trans); + } else if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, + "clear ref_cmd_in_flight\n"); + iwl_trans_unref(trans); + } + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } + } + spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); +} + +static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->dma_addr); + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); + kfree(txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + int i; + + if (WARN_ON(!txq)) + return; + + iwl_pcie_gen2_txq_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans_pcie->cmd_queue) + for (i = 0; i < txq->n_window; i++) { + kzfree(txq->entries[i].cmd); + kzfree(txq->entries[i].free_buf); + } + del_timer_sync(&txq->stuck_timer); + + iwl_pcie_gen2_txq_free_memory(trans, txq); + + trans_pcie->txq[txq_id] = NULL; + + clear_bit(txq_id, trans_pcie->queue_used); +} + +int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, + struct iwl_tx_queue_cfg_cmd *cmd, + int cmd_id, + unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue_cfg_rsp *rsp; + struct iwl_txq *txq; + struct iwl_host_cmd hcmd = { + .id = cmd_id, + .len = { sizeof(*cmd) }, + .data = { cmd, }, + .flags = CMD_WANT_SKB, + }; + int ret, qid; + + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return -ENOMEM; + ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, + sizeof(struct iwlagn_scd_bc_tbl)); + if (ret) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + kfree(txq); + return -ENOMEM; + } + + ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false); + if (ret) { + IWL_ERR(trans, "Tx queue alloc failed\n"); + goto error; + } + ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false); + if (ret) { + IWL_ERR(trans, "Tx queue init failed\n"); + goto error; + } + + txq->wd_timeout = msecs_to_jiffies(timeout); + + cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { + ret = -EINVAL; + goto error; + } + + rsp = (void *)hcmd.resp_pkt->data; + qid = le16_to_cpu(rsp->queue_number); + + if (qid > ARRAY_SIZE(trans_pcie->txq)) { + WARN_ONCE(1, "queue index %d unsupported", qid); + ret = -EIO; + goto error; + } + + if (test_and_set_bit(qid, trans_pcie->queue_used)) { + WARN_ONCE(1, "queue %d already used", qid); + ret = -EIO; + goto error; + } + + txq->id = qid; + trans_pcie->txq[qid] = txq; + + /* Place first TFD at index corresponding to start sequence number */ + txq->read_ptr = le16_to_cpu(rsp->write_pointer); + txq->write_ptr = le16_to_cpu(rsp->write_pointer); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (txq->write_ptr) | (qid << 16)); + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); + + return qid; + +error: + iwl_pcie_gen2_txq_free_memory(trans, txq); + return ret; +} + +void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", queue); + return; + } + + iwl_pcie_gen2_txq_unmap(trans, queue); + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); +} + +void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + + /* Free all TX queues */ + for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { + if (!trans_pcie->txq[i]) + continue; + + iwl_pcie_gen2_txq_free(trans, i); + } +} + +int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *cmd_queue; + int txq_id = trans_pcie->cmd_queue, ret; + + /* alloc and init the command queue */ + if (!trans_pcie->txq[txq_id]) { + cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); + if (!cmd_queue) { + IWL_ERR(trans, "Not enough memory for command queue\n"); + return -ENOMEM; + } + trans_pcie->txq[txq_id] = cmd_queue; + ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } else { + cmd_queue = trans_pcie->txq[txq_id]; + } + + ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans_pcie->txq[txq_id]->id = txq_id; + set_bit(txq_id, trans_pcie->queue_used); + + return 0; + +error: + iwl_pcie_gen2_tx_free(trans); + return ret; +} + diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 911cf98681074725b449f2b281379994269312f7..386950a2d6162845ccf3aa2ca24c968750d5d2cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -2,7 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -71,7 +71,7 @@ * ***************************************************/ -static int iwl_queue_space(const struct iwl_txq *q) +int iwl_queue_space(const struct iwl_txq *q) { unsigned int max; unsigned int used; @@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q) /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) +static int iwl_queue_init(struct iwl_txq *q, int slots_num) { q->n_window = slots_num; - q->id = id; /* slots_num must be power-of-two size, otherwise * get_cmd_index is broken. */ @@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) return 0; } -static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr, size_t size) +int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, + struct iwl_dma_ptr *ptr, size_t size) { if (WARN_ON(ptr->addr)) return -EINVAL; @@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, return 0; } -static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, - struct iwl_dma_ptr *ptr) +void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; @@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) } spin_unlock(&txq->lock); - IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, - jiffies_to_msecs(txq->wd_timeout)); - iwl_trans_pcie_log_scd_error(trans, txq); iwl_force_nmi(trans); @@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = (void *)txq->entries[txq->write_ptr].cmd->payload; + u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; - if (trans->cfg->use_tfh) { - u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + - num_tbs * sizeof(struct iwl_tfh_tb); - /* - * filled_tfd_size contains the number of filled bytes in the - * TFD. - * Dividing it by 64 will give the number of chunks to fetch - * to SRAM- 0 for one chunk, 1 for 2 and so on. - * If, for example, TFD contains only 3 TBs then 32 bytes - * of the TFD are used, and only one chunk of 64 bytes should - * be fetched - */ - u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - } else { - u8 sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - } + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) int i; for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = &trans_pcie->txq[i]; + struct iwl_txq *txq = trans_pcie->txq[i]; spin_lock_bh(&txq->lock); - if (trans_pcie->txq[i].need_update) { + if (txq->need_update) { iwl_pcie_txq_inc_wr_ptr(trans, txq); - trans_pcie->txq[i].need_update = false; + txq->need_update = false; } spin_unlock_bh(&txq->lock); } } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, - struct iwl_txq *txq, int idx) -{ - return txq->tfds + trans_pcie->tfd_size * idx; -} - static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { @@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, u8 idx, dma_addr_t addr, u16 len) { - if (trans->cfg->use_tfh) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; + struct iwl_tfd *tfd_fh = (void *)tfd; + struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); + u16 hi_n_len = len << 4; - tfd_fh->num_tbs = cpu_to_le16(idx + 1); - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); - tb->hi_n_len = cpu_to_le16(hi_n_len); + tb->hi_n_len = cpu_to_le16(hi_n_len); - tfd_fh->num_tbs = idx + 1; - } + tfd_fh->num_tbs = idx + 1; } static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) @@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window @@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, return num_tbs; } -static int iwl_pcie_txq_alloc(struct iwl_trans *trans, - struct iwl_txq *txq, int slots_num, - u32 txq_id) +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; @@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->entries) goto error; - if (txq_id == trans_pcie->cmd_queue) + if (cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), @@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->first_tb_bufs) goto err_free_tfds; - txq->id = txq_id; - return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); error: - if (txq->entries && txq_id == trans_pcie->cmd_queue) + if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); @@ -589,10 +546,9 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, } -static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, u32 txq_id) +int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; txq->need_update = false; @@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num, txq_id); + ret = iwl_queue_init(txq, slots_num); if (ret) return ret; spin_lock_init(&txq->lock); - if (txq_id == trans_pcie->cmd_queue) { + if (cmd_queue) { static struct lock_class_key iwl_pcie_cmd_queue_lock_class; lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); @@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, __skb_queue_head_init(&txq->overflow_q); - /* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * Circular buffer (TFD queue in DRAM) physical base address */ - if (trans->cfg->use_tfh) - iwl_write_direct64(trans, - FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->dma_addr); - else - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->dma_addr >> 8); - return 0; } @@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { @@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } - txq->active = false; while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); @@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct device *dev = trans->dev; int i; @@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - if (trans->cfg->use_tfh) - return; - trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; + /* + * we should never get here in gen2 trans mode return early to avoid + * having invalid accesses + */ + if (WARN_ON_ONCE(trans->cfg->gen2)) + return; + for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; if (trans->cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), @@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); /* This can happen: start_hw, stop_device */ - if (!trans_pcie->txq) + if (!trans_pcie->txq_memory) return 0; /* Unmap DMA from host system and free skb's */ @@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + /* Tx queues */ - if (trans_pcie->txq) { + if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; txq_id++) + txq_id < trans->cfg->base_params->num_of_queues; + txq_id++) { iwl_pcie_txq_free(trans, txq_id); + trans_pcie->txq[txq_id] = NULL; + } } - kfree(trans_pcie->txq); - trans_pcie->txq = NULL; + kfree(trans_pcie->txq_memory); + trans_pcie->txq_memory = NULL; iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); @@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ - if (WARN_ON(trans_pcie->txq)) { + if (WARN_ON(trans_pcie->txq_memory)) { ret = -EINVAL; goto error; } @@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); - if (!trans_pcie->txq) { + trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); + if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; goto error; @@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); + bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } + trans_pcie->txq[txq_id]->id = txq_id; } return 0; @@ -1012,6 +967,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) return ret; } + int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) int txq_id, slots_num; bool alloc = false; - if (!trans_pcie->txq) { + if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) goto error; @@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { - slots_num = (txq_id == trans_pcie->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], - slots_num, txq_id); + bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } - } - if (trans->cfg->use_tfh) { - iwl_write_direct32(trans, TFH_TRANSFER_MODE, - TFH_TRANSFER_MAX_PENDING_REQ | - TFH_CHUNK_SIZE_128 | - TFH_CHUNK_SPLIT_MODE); - return 0; + /* + * Tell nic where to find circular buffer of TFDs for a + * given Tx queue, and enable the DMA channel used for that + * queue. + * Circular buffer (TFD queue in DRAM) physical base address + */ + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), + trans_pcie->txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); @@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); int last_to_free; @@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, spin_lock_bh(&txq->lock); - if (!txq->active) { + if (!test_bit(txq_id, trans_pcie->queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; @@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; unsigned long flags; int nfreed = 0; @@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; int fifo = -1; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - if (cfg && trans->cfg->use_tfh) - WARN_ONCE(1, "Expected no calls to SCD configuration"); - txq->wd_timeout = msecs_to_jiffies(wdg_timeout); if (cfg) { @@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, "Activate queue %d WrPtr: %d\n", txq_id, ssn & 0xff); } - - txq->active = true; } void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txq[txq_id]; txq->ampdu = !shared_mode; } -dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - return trans_pcie->scd_bc_tbls.dma + - txq * sizeof(struct iwlagn_scd_bc_tbl); -} - void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { @@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; - trans_pcie->txq[txq_id].frozen = false; + trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; + trans_pcie->txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, return; } - if (configure_scd && trans->cfg->use_tfh) - WARN_ONCE(1, "Expected no calls to SCD configuration"); - if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); @@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, } iwl_pcie_txq_unmap(trans, txq_id); - trans_pcie->txq[txq_id].ampdu = false; + trans_pcie->txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; @@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->cmd_queue, sequence, - trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, - trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { + txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, + txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } @@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; @@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - IWL_ERR(trans, "Error sending %s: time out after %dms.\n", iwl_get_cmd_string(trans, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); @@ -1959,8 +1899,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, * in later, it will possibly set an invalid * address (cmd->meta.source). */ - trans_pcie->txq[trans_pcie->cmd_queue]. - entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { @@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u16 wifi_seq; bool amsdu; - txq = &trans_pcie->txq[txq_id]; + txq = trans_pcie->txq[txq_id]; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), "TX on unused queue %d\n", txq_id)) diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c index 7aa47069af0a771725c8c158ac2c1cae35c484f9..b2d5ec8634b5d5ee6f4f8b5e7d509672b78dd328 100644 --- a/drivers/net/wireless/intersil/orinoco/cfg.c +++ b/drivers/net/wireless/intersil/orinoco/cfg.c @@ -97,7 +97,7 @@ int orinoco_wiphy_register(struct wiphy *wiphy) } static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct orinoco_private *priv = wiphy_priv(wiphy); diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 28cf9748900108959df907190dc34d9da3057ceb..d9128bb25e85657f1f419ed603ad3f13f34939f2 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -2283,7 +2283,7 @@ int orinoco_if_add(struct orinoco_private *priv, priv->ndev = dev; /* Report what we've done */ - dev_dbg(priv->dev, "Registerred interface %s.\n", dev->name); + dev_dbg(priv->dev, "Registered interface %s.\n", dev->name); return 0; diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 98e1380b9917f1e4889b140784d270904eec418e..132f5fbda58ba1517ed924e16e78dbf34ac17b14 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -769,18 +769,31 @@ static int ezusb_submit_in_urb(struct ezusb_priv *upriv) static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset) { - u8 res_val = reset; /* avoid argument promotion */ + int ret; + u8 *res_val = NULL; if (!upriv->udev) { err("%s: !upriv->udev", __func__); return -EFAULT; } - return usb_control_msg(upriv->udev, + + res_val = kmalloc(sizeof(*res_val), GFP_KERNEL); + + if (!res_val) + return -ENOMEM; + + *res_val = reset; /* avoid argument promotion */ + + ret = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | - USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val, - sizeof(res_val), DEF_TIMEOUT); + USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val, + sizeof(*res_val), DEF_TIMEOUT); + + kfree(res_val); + + return ret; } static int ezusb_firmware_download(struct ezusb_priv *upriv, diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 1af7da0b386e56623f9e40ab8bfd487c10c9d637..5e1c91a80c58022fde772b71f067e93d1e0c8ca8 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -352,7 +352,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) - rx_status->flag |= RX_FLAG_SHORTPRE; + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 50c219fb1a52b9799d502470c6a5bea08ba7174a..87444af20fc5eb183784f9497869a59f8b6100b7 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -350,6 +350,7 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = { CHAN5G(5785), /* Channel 157 */ CHAN5G(5805), /* Channel 161 */ CHAN5G(5825), /* Channel 165 */ + CHAN5G(5845), /* Channel 169 */ }; static const struct ieee80211_rate hwsim_rates[] = { @@ -389,7 +390,7 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, u32 val; err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, - hwsim_vendor_test_policy); + hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) @@ -525,6 +526,11 @@ struct mac80211_hwsim_data { struct ieee80211_vif *hw_scan_vif; int scan_chan_idx; u8 scan_addr[ETH_ALEN]; + struct { + struct ieee80211_channel *channel; + unsigned long next_start, start, end; + } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + + ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_channel *channel; u64 beacon_int /* beacon interval in us */; @@ -552,8 +558,6 @@ struct mac80211_hwsim_data { /* wmediumd portid responsible for netgroup of this radio */ u32 wmediumd; - int power_level; - /* difference between this hw's clock and the real clock, in usecs */ s64 tsf_offset; s64 bcn_delta; @@ -1188,20 +1192,22 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { rx_status.rate_idx = ieee80211_rate_get_vht_mcs(&info->control.rates[0]); - rx_status.vht_nss = + rx_status.nss = ieee80211_rate_get_vht_nss(&info->control.rates[0]); - rx_status.flag |= RX_FLAG_VHT; + rx_status.encoding = RX_ENC_VHT; } else { rx_status.rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) - rx_status.flag |= RX_FLAG_HT; + rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rx_status.flag |= RX_FLAG_40MHZ; + rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rx_status.flag |= RX_FLAG_SHORT_GI; + rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate real signal strength (and optional packet loss) */ - rx_status.signal = data->power_level - 50; + rx_status.signal = -50; + if (info->control.vif) + rx_status.signal += info->control.vif->bss_conf.txpower; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); @@ -1576,6 +1582,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; + int idx; if (conf->chandef.chan) wiphy_debug(hw->wiphy, @@ -1598,11 +1605,34 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); - data->channel = conf->chandef.chan; + WARN_ON(conf->chandef.chan && data->use_chanctx); + + mutex_lock(&data->mutex); + if (data->scanning && conf->chandef.chan) { + for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { + if (data->survey_data[idx].channel == data->channel) { + data->survey_data[idx].start = + data->survey_data[idx].next_start; + data->survey_data[idx].end = jiffies; + break; + } + } - WARN_ON(data->channel && data->use_chanctx); + data->channel = conf->chandef.chan; + + for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { + if (data->survey_data[idx].channel && + data->survey_data[idx].channel != data->channel) + continue; + data->survey_data[idx].channel = data->channel; + data->survey_data[idx].next_start = jiffies; + break; + } + } else { + data->channel = conf->chandef.chan; + } + mutex_unlock(&data->mutex); - data->power_level = conf->power_level; if (!data->started || !data->beacon_int) tasklet_hrtimer_cancel(&data->beacon_timer); else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { @@ -1787,28 +1817,37 @@ static int mac80211_hwsim_conf_tx( return 0; } -static int mac80211_hwsim_get_survey( - struct ieee80211_hw *hw, int idx, - struct survey_info *survey) +static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) { - struct ieee80211_conf *conf = &hw->conf; - - wiphy_debug(hw->wiphy, "%s (idx=%d)\n", __func__, idx); + struct mac80211_hwsim_data *hwsim = hw->priv; - if (idx != 0) + if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data)) return -ENOENT; - /* Current channel */ - survey->channel = conf->chandef.chan; + mutex_lock(&hwsim->mutex); + survey->channel = hwsim->survey_data[idx].channel; + if (!survey->channel) { + mutex_unlock(&hwsim->mutex); + return -ENOENT; + } /* - * Magically conjured noise level --- this is only ok for simulated hardware. + * Magically conjured dummy values --- this is only ok for simulated hardware. * - * A real driver which cannot determine the real channel noise MUST NOT - * report any noise, especially not a magically conjured one :-) + * A real driver which cannot determine real values noise MUST NOT + * report any, especially not a magically conjured ones :-) */ - survey->filled = SURVEY_INFO_NOISE_DBM; + survey->filled = SURVEY_INFO_NOISE_DBM | + SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; survey->noise = -92; + survey->time = + jiffies_to_msecs(hwsim->survey_data[idx].end - + hwsim->survey_data[idx].start); + /* report 12.5% of channel time is used */ + survey->time_busy = survey->time/8; + mutex_unlock(&hwsim->mutex); return 0; } @@ -1852,7 +1891,7 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, int err, ps; err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, - hwsim_testmode_policy); + hwsim_testmode_policy, NULL); if (err) return err; @@ -1986,6 +2025,10 @@ static void hw_scan_work(struct work_struct *work) } ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, msecs_to_jiffies(dwell)); + hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan; + hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; + hwsim->survey_data[hwsim->scan_chan_idx].end = + jiffies + msecs_to_jiffies(dwell); hwsim->scan_chan_idx++; mutex_unlock(&hwsim->mutex); } @@ -2011,6 +2054,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, hw_req->req.mac_addr_mask); else memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); + memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); mutex_unlock(&hwsim->mutex); wiphy_debug(hw->wiphy, "hwsim hw_scan request\n"); @@ -2057,6 +2101,7 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); hwsim->scanning = true; + memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); out: mutex_unlock(&hwsim->mutex); @@ -2207,7 +2252,6 @@ static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { "d_tx_failed", "d_ps_mode", "d_group", - "d_tx_power", }; #define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) @@ -2244,7 +2288,6 @@ static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, data[i++] = ar->tx_failed; data[i++] = ar->ps; data[i++] = ar->group; - data[i++] = ar->power_level; WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); } @@ -2438,6 +2481,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, goto failed; } + /* ieee80211_alloc_hw_nm may have used a default name */ + param->hwname = wiphy_name(hw->wiphy); + if (info) net = genl_info_net(info); else @@ -2645,6 +2691,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, if (param->no_vif) ieee80211_hw_set(hw, NO_AUTO_VIF); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + err = ieee80211_register_hw(hw); if (err < 0) { printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n", diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index 39f22467ca2a219809cd35c38a2f5c28d15595bc..3f5eda591dba7c24bb2848b4495febcca1922b87 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -57,12 +57,12 @@ enum hwsim_tx_control_flags { * @HWSIM_CMD_REGISTER: request to register and received all broadcasted * frames by any mac80211_hwsim radio device. * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user - * space, uses: + * space, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER, * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE, * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE, %HWSIM_ATTR_FREQ (optional) * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to - * kernel, uses: + * kernel, uses: * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS, * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE * @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters, diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 3f97acb57e66f1d20fa73a13edbf78382ee3f38f..a0463fef79b016635b8cbd59978fe4c94b10a735 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -1657,7 +1657,7 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, */ static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct lbs_private *priv = wiphy_priv(wiphy); diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index c3a53cd6988e730ccddd13802672b7b863457e06..7b4955cc38db3d9b19376d0fde19c1ada14139f9 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1181,6 +1181,10 @@ static int if_spi_probe(struct spi_device *spi) /* Initialize interrupt handling stuff. */ card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0); + if (!card->workqueue) { + err = -ENOMEM; + goto remove_card; + } INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); INIT_WORK(&card->resume_work, if_spi_resume_worker); @@ -1209,6 +1213,7 @@ static int if_spi_probe(struct spi_device *spi) free_irq(spi->irq, card); terminate_workqueue: destroy_workqueue(card->workqueue); +remove_card: lbs_remove_card(priv); /* will call free_netdev */ free_card: free_if_spi_card(card); diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 54e426c1e405fd68b294d7fb3fd52607136c4d89..d8033311798929599b60f557c155567e9310a345 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -641,6 +641,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) BIT(NL80211_IFTYPE_ADHOC); skb_queue_head_init(&priv->bc_ps_buf); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + SET_IEEE80211_DEV(hw, dmdev); INIT_WORK(&priv->cmd_work, lbtf_cmd_work); diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index 43dccd5b0291f98fcf184822d8a50374c142fc9c..366eb4991a7d88ffbd2965a43449f74e1ab7e551 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -153,7 +153,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST); cmd->size = cpu_to_le16(S_DS_GEN); - le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req)); + le16_unaligned_add_cpu(&cmd->size, + sizeof(struct host_cmd_ds_chan_rpt_req)); cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ); cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value; diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 1e3bd435a694534f4a450b32e8b0bd69702e5269..7ec06bf13413bfaf6fb1153cc9a2a34a14e51c1f 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -594,6 +594,24 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) return 0; } +static void mwifiex_reg_apply_radar_flags(struct wiphy *wiphy) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + unsigned int i; + + if (!wiphy->bands[NL80211_BAND_5GHZ]) + return; + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; + if ((!(chan->flags & IEEE80211_CHAN_DISABLED)) && + (chan->flags & IEEE80211_CHAN_RADAR)) + chan->flags |= IEEE80211_CHAN_NO_IR; + } +} + /* * CFG802.11 regulatory domain callback function. * @@ -613,6 +631,7 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, mwifiex_dbg(adapter, INFO, "info: cfg80211 regulatory domain callback for %c%c\n", request->alpha2[0], request->alpha2[1]); + mwifiex_reg_apply_radar_flags(wiphy); switch (request->initiator) { case NL80211_REGDOM_SET_BY_DRIVER: @@ -916,7 +935,7 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, static int mwifiex_change_vif_to_p2p(struct net_device *dev, enum nl80211_iftype curr_iftype, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct mwifiex_private *priv; @@ -988,7 +1007,7 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, static int mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, enum nl80211_iftype curr_iftype, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct mwifiex_private *priv; @@ -1047,7 +1066,7 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, static int mwifiex_change_vif_to_ap(struct net_device *dev, enum nl80211_iftype curr_iftype, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct mwifiex_private *priv; @@ -1103,7 +1122,7 @@ mwifiex_change_vif_to_ap(struct net_device *dev, static int mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); @@ -1124,10 +1143,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return mwifiex_change_vif_to_p2p(dev, curr_iftype, - type, flags, params); + type, params); case NL80211_IFTYPE_AP: return mwifiex_change_vif_to_ap(dev, curr_iftype, type, - flags, params); + params); case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as IBSS\n", dev->name); @@ -1154,10 +1173,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return mwifiex_change_vif_to_p2p(dev, curr_iftype, - type, flags, params); + type, params); case NL80211_IFTYPE_AP: return mwifiex_change_vif_to_ap(dev, curr_iftype, type, - flags, params); + params); case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as STA\n", dev->name); @@ -1175,13 +1194,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, - type, flags, - params); + type, params); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return mwifiex_change_vif_to_p2p(dev, curr_iftype, - type, flags, params); + type, params); case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as AP\n", dev->name); @@ -1214,14 +1232,13 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, if (mwifiex_cfg80211_deinit_p2p(priv)) return -EFAULT; return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype, - type, flags, - params); + type, params); break; case NL80211_IFTYPE_AP: if (mwifiex_cfg80211_deinit_p2p(priv)) return -EFAULT; return mwifiex_change_vif_to_ap(dev, curr_iftype, type, - flags, params); + params); case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as P2P\n", dev->name); @@ -2036,7 +2053,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2304,7 +2321,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); @@ -2513,7 +2530,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, priv->scan_block = false; if (!mwifiex_stop_bg_scan(priv)) - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy, 0); user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); if (!user_scan_cfg) @@ -2528,9 +2545,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, priv->random_mac[i] |= get_random_int() & ~(request->mac_addr_mask[i]); } + ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); + } else { + eth_zero_addr(priv->random_mac); } - ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); user_scan_cfg->num_ssids = request->n_ssids; user_scan_cfg->ssid_list = request->ssids; @@ -2701,7 +2720,7 @@ mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, * previous bgscan configuration in the firmware */ static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy, - struct net_device *dev) + struct net_device *dev, u64 reqid) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); @@ -2822,7 +2841,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) { struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); @@ -3997,8 +4015,8 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, if (!priv) return -EINVAL; - err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, - mwifiex_tm_policy); + err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy, + NULL); if (err) return err; @@ -4279,7 +4297,6 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -4298,6 +4315,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + wiphy->max_sched_scan_reqs = 1; wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 25a7475702f7fa9166098e9ca97e9cdd268682a8..0c3b217247b145a9f823f9f73d1233070e87f542 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -242,7 +242,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, mwifiex_dbg(adapter, CMD, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, - le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)), + get_unaligned_le16((u8 *)host_cmd + S_DS_GEN), cmd_size, le16_to_cpu(host_cmd->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); @@ -286,7 +286,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM; adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code; adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] = - le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)); + get_unaligned_le16((u8 *)host_cmd + S_DS_GEN); /* Clear BSS_NO_BITS from HostCmd */ cmd_code &= HostCmd_CMD_ID_MASK; diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index cb6a1a81d44e213c7315c4e64a7e2051b4919a62..6cf9ab9133ea37145c7ccb59d527b1e41b5cf2ea 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -31,17 +31,35 @@ struct rfc_1042_hdr { u8 llc_ctrl; u8 snap_oui[3]; __be16 snap_type; -}; +} __packed; struct rx_packet_hdr { struct ethhdr eth803_hdr; struct rfc_1042_hdr rfc1042_hdr; -}; +} __packed; struct tx_packet_hdr { struct ethhdr eth803_hdr; struct rfc_1042_hdr rfc1042_hdr; -}; +} __packed; + +struct mwifiex_fw_header { + __le32 dnld_cmd; + __le32 base_addr; + __le32 data_length; + __le32 crc; +} __packed; + +struct mwifiex_fw_data { + struct mwifiex_fw_header header; + __le32 seq_num; + u8 data[1]; +} __packed; + +#define MWIFIEX_FW_DNLD_CMD_1 0x1 +#define MWIFIEX_FW_DNLD_CMD_5 0x5 +#define MWIFIEX_FW_DNLD_CMD_6 0x6 +#define MWIFIEX_FW_DNLD_CMD_7 0x7 #define B_SUPPORTED_RATES 5 #define G_SUPPORTED_RATES 9 @@ -707,7 +725,7 @@ struct uap_txpd { u8 reserved1[2]; u8 tx_token_id; u8 reserved[2]; -}; +} __packed; struct uap_rxpd { u8 bss_type; @@ -723,7 +741,7 @@ struct uap_rxpd { u8 ht_info; u8 reserved[3]; u8 flags; -}; +} __packed; struct mwifiex_fw_chan_stats { u8 chan_num; @@ -987,7 +1005,7 @@ struct mwifiex_ps_param { __le16 adhoc_wake_period; __le16 mode; __le16 delay_to_ps; -}; +} __packed; #define HS_DEF_WAKE_INTERVAL 100 #define HS_DEF_INACTIVITY_TIMEOUT 50 @@ -996,7 +1014,7 @@ struct mwifiex_ps_param_in_hs { struct mwifiex_ie_types_header header; __le32 hs_wake_int; __le32 hs_inact_timeout; -}; +} __packed; #define BITMAP_AUTO_DS 0x01 #define BITMAP_STA_PS 0x10 @@ -1062,7 +1080,7 @@ struct host_cmd_ds_802_11_rssi_info { __le16 nbcn; __le16 reserved[9]; long long reserved_1; -}; +} __packed; struct host_cmd_ds_802_11_rssi_info_rsp { __le16 action; @@ -1077,12 +1095,12 @@ struct host_cmd_ds_802_11_rssi_info_rsp { __le16 bcn_rssi_avg; __le16 bcn_nf_avg; long long tsf_bcn; -}; +} __packed; struct host_cmd_ds_802_11_mac_address { __le16 action; u8 mac_addr[ETH_ALEN]; -}; +} __packed; struct host_cmd_ds_mac_control { __le32 action; @@ -1230,7 +1248,7 @@ struct host_cmd_ds_802_11_get_log { __le32 wep_icv_err_cnt[4]; __le32 bcn_rcv_cnt; __le32 bcn_miss_cnt; -}; +} __packed; /* Enumeration for rate format */ enum _mwifiex_rate_format { @@ -1368,12 +1386,12 @@ struct host_cmd_ds_rf_ant_mimo { __le16 tx_ant_mode; __le16 action_rx; __le16 rx_ant_mode; -}; +} __packed; struct host_cmd_ds_rf_ant_siso { __le16 action; __le16 ant_mode; -}; +} __packed; struct host_cmd_ds_tdls_oper { __le16 tdls_action; @@ -1383,13 +1401,13 @@ struct host_cmd_ds_tdls_oper { struct mwifiex_tdls_config { __le16 enable; -}; +} __packed; struct mwifiex_tdls_config_cs_params { u8 unit_time; u8 thr_otherlink; u8 thr_directlink; -}; +} __packed; struct mwifiex_tdls_init_cs_params { u8 peer_mac[ETH_ALEN]; @@ -1404,7 +1422,7 @@ struct mwifiex_tdls_init_cs_params { struct mwifiex_tdls_stop_cs_params { u8 peer_mac[ETH_ALEN]; -}; +} __packed; struct host_cmd_ds_tdls_config { __le16 tdls_action; @@ -1709,7 +1727,7 @@ struct mwifiex_ie_types_local_pwr_constraint { struct mwifiex_ie_types_wmm_param_set { struct mwifiex_ie_types_header header; u8 wmm_ie[1]; -}; +} __packed; struct mwifiex_ie_types_mgmt_frame { struct mwifiex_ie_types_header header; @@ -1834,7 +1852,7 @@ struct host_cmd_ds_mem_access { __le16 reserved; __le32 addr; __le32 value; -}; +} __packed; struct mwifiex_ie_types_qos_info { struct mwifiex_ie_types_header header; diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index c488c3068abc53e8991720405057634772e4afdd..922e3d69fd84748d72f39c0ba4b2592b468a8101 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -131,9 +131,10 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv, sizeof(struct mwifiex_ie)); } - le16_add_cpu(&ie_list->len, - le16_to_cpu(priv->mgmt_ie[index].ie_length) + - MWIFIEX_IE_HDR_SIZE); + le16_unaligned_add_cpu(&ie_list->len, + le16_to_cpu( + priv->mgmt_ie[index].ie_length) + + MWIFIEX_IE_HDR_SIZE); input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE; } @@ -172,21 +173,21 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv, le16_to_cpu(beacon_ie->ie_length); memcpy(pos, beacon_ie, len); pos += len; - le16_add_cpu(&ap_custom_ie->len, len); + le16_unaligned_add_cpu(&ap_custom_ie->len, len); } if (pr_ie) { len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE + le16_to_cpu(pr_ie->ie_length); memcpy(pos, pr_ie, len); pos += len; - le16_add_cpu(&ap_custom_ie->len, len); + le16_unaligned_add_cpu(&ap_custom_ie->len, len); } if (ar_ie) { len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE + le16_to_cpu(ar_ie->ie_length); memcpy(pos, ar_ie, len); pos += len; - le16_add_cpu(&ap_custom_ie->len, len); + le16_unaligned_add_cpu(&ap_custom_ie->len, len); } ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie); @@ -242,7 +243,7 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, vs_ie = (struct ieee_types_header *)vendor_ie; memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), vs_ie, vs_ie->len + 2); - le16_add_cpu(&ie->ie_length, vs_ie->len + 2); + le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); ie->mgmt_subtype_mask = cpu_to_le16(mask); ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK); } diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 536ab834b12625f8ac7ccce1b309b465f82db31b..48e154e1865df321e12f4f160d51085d599d8721 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -91,6 +91,8 @@ struct wep_key { #define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf #define MWIFIEX_PRIO_BK 2 #define MWIFIEX_PRIO_VI 5 +#define MWIFIEX_SUPPORTED_CHANNELS 2 +#define MWIFIEX_OPERATING_CLASSES 16 struct mwifiex_uap_bss_param { u8 channel; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index b62e03d11c2e27c240d02a4e7813958da562aca8..dd87b9ff64c371911a74308be55e93e3a46cdd43 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -17,6 +17,8 @@ * this warranty disclaimer. */ +#include + #include "main.h" #include "wmm.h" #include "cfg80211.h" @@ -147,7 +149,6 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) kfree(adapter->regd); - vfree(adapter->chan_stats); kfree(adapter); return 0; } @@ -511,7 +512,7 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) * - Download the correct firmware to card * - Issue the init commands to firmware */ -static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) +static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) { int ret; char fmt[64]; @@ -594,7 +595,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) rtnl_lock(); /* Create station interface by default */ wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM, - NL80211_IFTYPE_STATION, NULL, NULL); + NL80211_IFTYPE_STATION, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create default STA interface\n"); @@ -604,7 +605,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (driver_mode & MWIFIEX_DRIVER_MODE_UAP) { wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM, - NL80211_IFTYPE_AP, NULL, NULL); + NL80211_IFTYPE_AP, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create AP interface\n"); @@ -615,8 +616,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (driver_mode & MWIFIEX_DRIVER_MODE_P2P) { wdev = mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d", NET_NAME_ENUM, - NL80211_IFTYPE_P2P_CLIENT, NULL, - NULL); + NL80211_IFTYPE_P2P_CLIENT, NULL); if (IS_ERR(wdev)) { mwifiex_dbg(adapter, ERROR, "cannot create p2p client interface\n"); @@ -631,6 +631,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto done; err_add_intf: + vfree(adapter->chan_stats); wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: @@ -664,11 +665,18 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) mwifiex_free_adapter(adapter); /* Tell all current and future waiters we're finished */ complete_all(fw_done); - return; + + return init_failed ? -EIO : 0; +} + +static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) +{ + _mwifiex_fw_dpc(firmware, context); } /* - * This function initializes the hardware and gets firmware. + * This function gets the firmware and (if called asynchronously) kicks off the + * HW init when done. */ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter, bool req_fw_nowait) @@ -691,20 +699,15 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter, ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, adapter->dev, GFP_KERNEL, adapter, mwifiex_fw_dpc); - if (ret < 0) - mwifiex_dbg(adapter, ERROR, - "request_firmware_nowait error %d\n", ret); } else { ret = request_firmware(&adapter->firmware, adapter->fw_name, adapter->dev); - if (ret < 0) - mwifiex_dbg(adapter, ERROR, - "request_firmware error %d\n", ret); - else - mwifiex_fw_dpc(adapter->firmware, (void *)adapter); } + if (ret < 0) + mwifiex_dbg(adapter, ERROR, "request_firmware%s error %d\n", + req_fw_nowait ? "_nowait" : "", ret); return ret; } @@ -745,7 +748,7 @@ mwifiex_close(struct net_device *dev) mwifiex_dbg(priv->adapter, INFO, "aborting bgscan on ndo_stop\n"); mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); } return 0; @@ -1413,6 +1416,7 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); rtnl_unlock(); } + vfree(adapter->chan_stats); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); exit_return: @@ -1426,6 +1430,8 @@ EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); int mwifiex_reinit_sw(struct mwifiex_adapter *adapter) { + int ret; + mwifiex_init_lock_list(adapter); if (adapter->if_ops.up_dev) adapter->if_ops.up_dev(adapter); @@ -1435,6 +1441,7 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) init_waitqueue_head(&adapter->init_wait_q); adapter->is_suspended = false; adapter->hs_activated = false; + adapter->is_cmd_timedout = 0; init_waitqueue_head(&adapter->hs_activate_wait_q); init_waitqueue_head(&adapter->cmd_wait_q.wait); adapter->cmd_wait_q.status = 0; @@ -1472,9 +1479,15 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter) "%s: firmware init failed\n", __func__); goto err_init_fw; } + + /* _mwifiex_fw_dpc() does its own cleanup */ + ret = _mwifiex_fw_dpc(adapter->firmware, adapter); + if (ret) { + pr_err("Failed to bring up adapter: %d\n", ret); + return ret; + } mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); - complete_all(adapter->fw_done); return 0; err_init_fw: @@ -1502,14 +1515,13 @@ static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv) { struct mwifiex_adapter *adapter = priv; - if (adapter->irq_wakeup >= 0) { - dev_dbg(adapter->dev, "%s: wake by wifi", __func__); - adapter->wake_by_wifi = true; - disable_irq_nosync(irq); - } + dev_dbg(adapter->dev, "%s: wake by wifi", __func__); + adapter->wake_by_wifi = true; + disable_irq_nosync(irq); /* Notify PM core we are wakeup source */ pm_wakeup_event(adapter->dev, 0); + pm_system_wakeup(); return IRQ_HANDLED; } @@ -1714,6 +1726,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); rtnl_unlock(); } + vfree(adapter->chan_stats); wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); @@ -1742,7 +1755,7 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask, struct va_format vaf; va_list args; - if (!adapter->dev || !(adapter->debug_mask & mask)) + if (!(adapter->debug_mask & mask)) return; va_start(args, fmt); @@ -1750,7 +1763,10 @@ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask, vaf.fmt = fmt; vaf.va = &args; - dev_info(adapter->dev, "%pV", &vaf); + if (adapter->dev) + dev_info(adapter->dev, "%pV", &vaf); + else + pr_info("%pV", &vaf); va_end(args); } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5c8297207f339559461f43cee1dc9b0585a23042..bb2a467d8b133dd21dbe7fd918a77e82bac46992 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1359,7 +1359,7 @@ mwifiex_netdev_get_priv(struct net_device *dev) */ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb) { - return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT); + return (get_unaligned_le32(skb->data) == PKT_TYPE_MGMT); } /* This function retrieves channel closed for operation by Channel @@ -1529,7 +1529,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params); int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index b8c990d10d6ecb11491cbf5ed345ecbdd7dc0359..ac62bce50e964b900135cdc600dcdb9032928a3f 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -119,7 +119,7 @@ static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter, */ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) { - u32 *cookie_addr; + u32 cookie_value; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -127,11 +127,11 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) return true; if (card->sleep_cookie_vbase) { - cookie_addr = (u32 *)card->sleep_cookie_vbase; + cookie_value = get_unaligned_le32(card->sleep_cookie_vbase); mwifiex_dbg(adapter, INFO, "info: ACCESS_HW: sleep cookie=0x%x\n", - *cookie_addr); - if (*cookie_addr == FW_AWAKE_COOKIE) + cookie_value); + if (cookie_value == FW_AWAKE_COOKIE) return true; } @@ -294,8 +294,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) if (!adapter || !adapter->priv_num) return; - cancel_work_sync(&card->work); - reg = card->pcie.reg; if (reg) ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); @@ -350,22 +348,16 @@ MODULE_DEVICE_TABLE(pci, mwifiex_ids); static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) { - struct mwifiex_adapter *adapter; - struct pcie_service_card *card; - - if (!pdev) { - pr_err("%s: PCIe device is not specified\n", __func__); - return; - } + struct pcie_service_card *card = pci_get_drvdata(pdev); + struct mwifiex_adapter *adapter = card->adapter; + int ret; - card = (struct pcie_service_card *)pci_get_drvdata(pdev); - if (!card || !card->adapter) { - pr_err("%s: Card or adapter structure is not valid (%ld)\n", - __func__, (long)card); + if (!adapter) { + dev_err(&pdev->dev, "%s: adapter structure is not valid\n", + __func__); return; } - adapter = card->adapter; mwifiex_dbg(adapter, INFO, "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n", __func__, pdev->vendor, pdev->device, @@ -379,13 +371,19 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) */ mwifiex_shutdown_sw(adapter); adapter->surprise_removed = true; + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); } else { /* Kernel stores and restores PCIe function context before and * after performing FLR respectively. Reconfigure the software * and firmware including firmware redownload */ adapter->surprise_removed = false; - mwifiex_reinit_sw(adapter); + ret = mwifiex_reinit_sw(adapter); + if (ret) { + dev_err(&pdev->dev, "reinit failed: %d\n", ret); + return; + } } mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } @@ -447,7 +445,7 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, sizeof(sleep_cookie), PCI_DMA_FROMDEVICE); buffer = cmdrsp->data; - sleep_cookie = READ_ONCE(*(u32 *)buffer); + sleep_cookie = get_unaligned_le32(buffer); if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { mwifiex_dbg(adapter, INFO, @@ -1039,6 +1037,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); + dev_kfree_skb_any(card->cmd_buf); } return 0; } @@ -1049,6 +1048,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; + u32 tmp; card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), &card->sleep_cookie_pbase); @@ -1058,11 +1058,12 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) return -ENOMEM; } /* Init val of Sleep Cookie */ - *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE; + tmp = FW_AWAKE_COOKIE; + put_unaligned(tmp, card->sleep_cookie_vbase); mwifiex_dbg(adapter, INFO, "alloc_scook: sleep cookie=0x%x\n", - *((u32 *)card->sleep_cookie_vbase)); + get_unaligned(card->sleep_cookie_vbase)); return 0; } @@ -1223,7 +1224,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, dma_addr_t buf_pa; struct mwifiex_pcie_buf_desc *desc = NULL; struct mwifiex_pfu_buf_desc *desc2 = NULL; - __le16 *tmp; if (!(skb->data && skb->len)) { mwifiex_dbg(adapter, ERROR, @@ -1244,10 +1244,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, adapter->data_sent = true; payload = skb->data; - tmp = (__le16 *)&payload[0]; - *tmp = cpu_to_le16((u16)skb->len); - tmp = (__le16 *)&payload[2]; - *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA); + put_unaligned_le16((u16)skb->len, payload + 0); + put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2); if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) @@ -1376,7 +1374,6 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) (card->rxbd_rdptr & reg->rx_rollover_ind))) { struct sk_buff *skb_data; u16 rx_len; - __le16 pkt_len; rd_index = card->rxbd_rdptr & reg->rx_mask; skb_data = card->rx_buf_list[rd_index]; @@ -1393,8 +1390,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) /* Get data length from interface header - * first 2 bytes for len, next 2 bytes is for type */ - pkt_len = *((__le16 *)skb_data->data); - rx_len = le16_to_cpu(pkt_len); + rx_len = get_unaligned_le16(skb_data->data); if (WARN_ON(rx_len <= INTF_HEADER_LEN || rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) { mwifiex_dbg(adapter, ERROR, @@ -1601,13 +1597,18 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) adapter->cmd_sent = true; - *(__le16 *)&payload[0] = cpu_to_le16((u16)skb->len); - *(__le16 *)&payload[2] = cpu_to_le16(MWIFIEX_TYPE_CMD); + put_unaligned_le16((u16)skb->len, &payload[0]); + put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]); if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) return -1; card->cmd_buf = skb; + /* + * Need to keep a reference, since core driver might free up this + * buffer before we've unmapped it. + */ + skb_get(skb); /* To send a command, the driver will: 1. Write the 64bit physical address of the data buffer to @@ -1694,7 +1695,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) struct sk_buff *skb = card->cmdrsp_buf; int count = 0; u16 rx_len; - __le16 pkt_len; mwifiex_dbg(adapter, CMD, "info: Rx CMD Response\n"); @@ -1711,11 +1711,11 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); + dev_kfree_skb_any(card->cmd_buf); card->cmd_buf = NULL; } - pkt_len = *((__le16 *)skb->data); - rx_len = le16_to_cpu(pkt_len); + rx_len = get_unaligned_le16(skb->data); skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, rx_len); @@ -1856,7 +1856,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) desc = card->evtbd_ring[rdptr]; memset(desc, 0, sizeof(*desc)); - event = *(u32 *) &skb_cmd->data[INTF_HEADER_LEN]; + event = get_unaligned_le32(&skb_cmd->data[INTF_HEADER_LEN]); adapter->event_cause = event; /* The first 4bytes will be the event transfer header len is 2 bytes followed by type which is 2 bytes */ @@ -1965,6 +1965,94 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, return ret; } +/* Combo firmware image is a combination of + * (1) combo crc heaer, start with CMD5 + * (2) bluetooth image, start with CMD7, end with CMD6, data wrapped in CMD1. + * (3) wifi image. + * + * This function bypass the header and bluetooth part, return + * the offset of tail wifi-only part. + */ + +static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, + const void *firmware, u32 firmware_len) { + const struct mwifiex_fw_data *fwdata; + u32 offset = 0, data_len, dnld_cmd; + int ret = 0; + bool cmd7_before = false; + + while (1) { + /* Check for integer and buffer overflow */ + if (offset + sizeof(fwdata->header) < sizeof(fwdata->header) || + offset + sizeof(fwdata->header) >= firmware_len) { + mwifiex_dbg(adapter, ERROR, + "extract wifi-only fw failure!\n"); + ret = -1; + goto done; + } + + fwdata = firmware + offset; + dnld_cmd = le32_to_cpu(fwdata->header.dnld_cmd); + data_len = le32_to_cpu(fwdata->header.data_length); + + /* Skip past header */ + offset += sizeof(fwdata->header); + + switch (dnld_cmd) { + case MWIFIEX_FW_DNLD_CMD_1: + if (!cmd7_before) { + mwifiex_dbg(adapter, ERROR, + "no cmd7 before cmd1!\n"); + ret = -1; + goto done; + } + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + ret = -1; + goto done; + } + offset += data_len; + break; + case MWIFIEX_FW_DNLD_CMD_5: + /* Check for integer overflow */ + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + ret = -1; + goto done; + } + offset += data_len; + break; + case MWIFIEX_FW_DNLD_CMD_6: + /* Check for integer overflow */ + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + ret = -1; + goto done; + } + offset += data_len; + if (offset >= firmware_len) { + mwifiex_dbg(adapter, ERROR, + "extract wifi-only fw failure!\n"); + ret = -1; + } else { + ret = offset; + } + goto done; + case MWIFIEX_FW_DNLD_CMD_7: + cmd7_before = true; + break; + default: + mwifiex_dbg(adapter, ERROR, "unknown dnld_cmd %d\n", + dnld_cmd); + ret = -1; + goto done; + } + } + +done: + return ret; +} + /* * This function downloads the firmware to the card. * @@ -1980,7 +2068,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, u32 firmware_len = fw->fw_len; u32 offset = 0; struct sk_buff *skb; - u32 txlen, tx_blocks = 0, tries, len; + u32 txlen, tx_blocks = 0, tries, len, val; u32 block_retry_cnt = 0; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -2007,6 +2095,24 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, goto done; } + ret = mwifiex_read_reg(adapter, PCIE_SCRATCH_13_REG, &val); + if (ret) { + mwifiex_dbg(adapter, FATAL, "Failed to read scratch register 13\n"); + goto done; + } + + /* PCIE FLR case: extract wifi part from combo firmware*/ + if (val == MWIFIEX_PCIE_FLR_HAPPENS) { + ret = mwifiex_extract_wifi_fw(adapter, firmware, firmware_len); + if (ret < 0) { + mwifiex_dbg(adapter, ERROR, "Failed to extract wifi fw\n"); + goto done; + } + offset = ret; + mwifiex_dbg(adapter, MSG, + "info: dnld wifi firmware from %d bytes\n", offset); + } + /* Perform firmware data transfer */ do { u32 ireg_intr = 0; @@ -2503,8 +2609,8 @@ mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG, - PCIE_SCRATCH_13_REG, - PCIE_SCRATCH_14_REG}; + PCIE_SCRATCH_14_REG, + PCIE_SCRATCH_15_REG}; if (!p) return 0; @@ -2874,6 +2980,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) int ret; u32 fw_status; + cancel_work_sync(&card->work); + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, @@ -3077,12 +3185,6 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - /* Bluetooth is not on pcie interface. Download Wifi only firmware - * during pcie FLR, so that bluetooth part of firmware which is - * already running doesn't get affected. - */ - strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME); - /* tx_buf_size might be changed to 3584 by firmware during * data transfer, we should reset it to default size. */ diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 00e8ee5ad4a834ed1905c5b98acee85be3ca1a32..f7ce9b6db6b41a9c4ff2493f0b11c2a67de0f6a3 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -35,7 +35,6 @@ #define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin" #define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin" #define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin" -#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin" #define PCIE_VENDOR_ID_MARVELL (0x11ab) #define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b) @@ -77,8 +76,9 @@ #define PCIE_SCRATCH_10_REG 0xCE8 #define PCIE_SCRATCH_11_REG 0xCEC #define PCIE_SCRATCH_12_REG 0xCF0 -#define PCIE_SCRATCH_13_REG 0xCF8 -#define PCIE_SCRATCH_14_REG 0xCFC +#define PCIE_SCRATCH_13_REG 0xCF4 +#define PCIE_SCRATCH_14_REG 0xCF8 +#define PCIE_SCRATCH_15_REG 0xCFC #define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C #define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C @@ -119,6 +119,8 @@ #define MWIFIEX_SLEEP_COOKIE_SIZE 4 #define MWIFIEX_MAX_DELAY_COUNT 100 +#define MWIFIEX_PCIE_FLR_HAPPENS 0xFEDCBABA + struct mwifiex_pcie_card_reg { u16 cmd_addr_lo; u16 cmd_addr_hi; @@ -217,8 +219,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, .pfu_enabled = 1, .sleep_cookie = 0, - .fw_dump_ctrl = 0xcf4, - .fw_dump_start = 0xcf8, + .fw_dump_ctrl = PCIE_SCRATCH_13_REG, + .fw_dump_start = PCIE_SCRATCH_14_REG, .fw_dump_end = 0xcff, .fw_dump_host_ready = 0xee, .fw_dump_read_done = 0xfe, @@ -254,8 +256,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, .pfu_enabled = 1, .sleep_cookie = 0, - .fw_dump_ctrl = 0xcf4, - .fw_dump_start = 0xcf8, + .fw_dump_ctrl = PCIE_SCRATCH_13_REG, + .fw_dump_start = PCIE_SCRATCH_14_REG, .fw_dump_end = 0xcff, .fw_dump_host_ready = 0xcc, .fw_dump_read_done = 0xdd, diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 181691684a08f0a1bfc6eca17292ba5fe3f1a778..ce6936d0c5c02ee4a64e5bcb5a13e11188adf07f 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -691,8 +691,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, /* Increment the TLV header length by the size appended */ - le16_add_cpu(&chan_tlv_out->header.len, - sizeof(chan_tlv_out->chan_scan_param)); + le16_unaligned_add_cpu(&chan_tlv_out->header.len, + sizeof( + chan_tlv_out->chan_scan_param)); /* * The tlv buffer length is set to the number of bytes @@ -859,6 +860,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, *scan_current_only = false; if (user_scan_in) { + u8 tmpaddr[ETH_ALEN]; /* Default the ssid_filter flag to TRUE, set false under certain wildcard conditions and qualified by the existence @@ -883,8 +885,10 @@ mwifiex_config_scan(struct mwifiex_private *priv, user_scan_in->specific_bssid, sizeof(scan_cfg_out->specific_bssid)); + memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN); + if (adapter->ext_scan && - !is_zero_ether_addr(scan_cfg_out->specific_bssid)) { + !is_zero_ether_addr(tmpaddr)) { bssid_tlv = (struct mwifiex_ie_types_bssid_list *)tlv_pos; bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID); @@ -947,8 +951,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, * truncate scan results. That is not an issue with an SSID * or BSSID filter applied to the scan results in the firmware. */ + memcpy(tmpaddr, scan_cfg_out->specific_bssid, ETH_ALEN); if ((i && ssid_filter) || - !is_zero_ether_addr(scan_cfg_out->specific_bssid)) + !is_zero_ether_addr(tmpaddr)) *filtered_scan = true; if (user_scan_in->scan_chan_gap) { @@ -989,10 +994,15 @@ mwifiex_config_scan(struct mwifiex_private *priv, * If a specific BSSID or SSID is used, the number of channels in the * scan command will be increased to the absolute maximum. */ - if (*filtered_scan) + if (*filtered_scan) { *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; - else - *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD; + } else { + if (!priv->media_connected) + *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD; + else + *max_chan_per_scan = + MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD / 2; + } if (adapter->ext_scan) { bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos; @@ -1742,7 +1752,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, if (*bytes_left >= sizeof(beacon_size)) { /* Extract & convert beacon size from command buffer */ - beacon_size = le16_to_cpu(*(__le16 *)(*bss_info)); + beacon_size = get_unaligned_le16((*bss_info)); *bytes_left -= sizeof(beacon_size); *bss_info += sizeof(beacon_size); } @@ -2369,8 +2379,9 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, temp_chan = chan_list_tlv->chan_scan_param + chan_idx; /* Increment the TLV header length by size appended */ - le16_add_cpu(&chan_list_tlv->header.len, - sizeof(chan_list_tlv->chan_scan_param)); + le16_unaligned_add_cpu(&chan_list_tlv->header.len, + sizeof( + chan_list_tlv->chan_scan_param)); temp_chan->chan_number = bgscan_cfg_in->chan_list[chan_idx].chan_number; @@ -2407,8 +2418,8 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in, chan_list_tlv-> chan_scan_param); - le16_add_cpu(&chan_list_tlv->header.len, - chan_num * + le16_unaligned_add_cpu(&chan_list_tlv->header.len, + chan_num * sizeof(chan_list_tlv->chan_scan_param[0])); } @@ -2432,7 +2443,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, /* Append vendor specific IE TLV */ mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos); - le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv); + le16_unaligned_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a4b356d267f982b2646dc90f6eb1a2c3deac1425..0af1c6733c9257e64fbecd81e11fb76ec871452a 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -387,8 +387,6 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; - cancel_work_sync(&card->work); - mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); @@ -943,7 +941,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, return -1; } - nb = le16_to_cpu(*(__le16 *) (buffer)); + nb = get_unaligned_le16((buffer)); if (nb > npayload) { mwifiex_dbg(adapter, ERROR, "%s: invalid packet, nb=%d npayload=%d\n", @@ -951,7 +949,7 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, return -1; } - *type = le16_to_cpu(*(__le16 *) (buffer + 2)); + *type = get_unaligned_le16((buffer + 2)); return ret; } @@ -1139,7 +1137,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, __func__, blk_num, blk_size, total_pkt_len); break; } - pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET)); + pkt_len = get_unaligned_le16((data + + SDIO_HEADER_OFFSET)); if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) { mwifiex_dbg(adapter, ERROR, "%s: error in pkt_len,\t" @@ -1172,10 +1171,11 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb, u32 upld_typ) { u8 *cmd_buf; - __le16 *curr_ptr = (__le16 *)skb->data; - u16 pkt_len = le16_to_cpu(*curr_ptr); + u16 pkt_len; struct mwifiex_rxinfo *rx_info; + pkt_len = get_unaligned_le16(skb->data); + if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) { skb_trim(skb, pkt_len); skb_pull(skb, INTF_HEADER_LEN); @@ -1235,7 +1235,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, case MWIFIEX_TYPE_EVENT: mwifiex_dbg(adapter, EVENT, "info: --- Rx: Event ---\n"); - adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data); + adapter->event_cause = get_unaligned_le32(skb->data); if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) memcpy(adapter->event_body, @@ -1380,7 +1380,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, } if (card->mpa_rx.pkt_cnt == 1) - mport = adapter->ioport + port; + mport = adapter->ioport + card->mpa_rx.start_port; if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, card->mpa_rx.buf_len, mport, 1)) @@ -1392,8 +1392,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, u32 *len_arr = card->mpa_rx.len_arr; /* get curr PKT len & type */ - pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]); - pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]); + pkt_len = get_unaligned_le16(&curr_ptr[0]); + pkt_type = get_unaligned_le16(&curr_ptr[2]); /* copy pkt to deaggr buf */ skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind], @@ -1813,7 +1813,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } if (card->mpa_tx.pkt_cnt == 1) - mport = adapter->ioport + port; + mport = adapter->ioport + card->mpa_tx.start_port; ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, card->mpa_tx.buf_len, mport); @@ -1874,8 +1874,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, /* Allocate buffer and copy payload */ blk_size = MWIFIEX_SDIO_BLOCK_SIZE; buf_block_len = (pkt_len + blk_size - 1) / blk_size; - *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len); - *(__le16 *)&payload[2] = cpu_to_le16(type); + put_unaligned_le16((u16)pkt_len, payload + 0); + put_unaligned_le16((u32)type, payload + 2); + /* * This is SDIO specific header @@ -2155,6 +2156,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + cancel_work_sync(&card->work); + kfree(card->mp_regs); kfree(card->mpa_rx.skb_arr); kfree(card->mpa_rx.len_arr); @@ -2193,6 +2196,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; struct sdio_func *func = card->func; + int ret; mwifiex_shutdown_sw(adapter); @@ -2207,7 +2211,9 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - mwifiex_reinit_sw(adapter); + ret = mwifiex_reinit_sw(adapter); + if (ret) + dev_err(&func->dev, "reinit failed: %d\n", ret); } /* This function read/write firmware */ diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 2f1f4d190b28429c43dc61770f53f81a09353616..83916c1439af767ca4d03c2fab046170e44492d0 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -126,19 +126,19 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv, if (cmd_action == HostCmd_ACT_GEN_GET) { snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET); snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE); - le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE); + le16_unaligned_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE); } else if (cmd_action == HostCmd_ACT_GEN_SET) { snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET); snmp_mib->buf_size = cpu_to_le16(sizeof(u16)); - *((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp); - le16_add_cpu(&cmd->size, sizeof(u16)); + put_unaligned_le16(*ul_temp, snmp_mib->value); + le16_unaligned_add_cpu(&cmd->size, sizeof(u16)); } mwifiex_dbg(priv->adapter, CMD, "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t" "OIDSize=0x%x, Value=0x%x\n", cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size), - le16_to_cpu(*(__le16 *)snmp_mib->value)); + get_unaligned_le16(snmp_mib->value)); return 0; } @@ -1357,8 +1357,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); - le16_add_cpu(&cmd->size, - sizeof(struct mwifiex_ie_types_rssi_threshold)); + le16_unaligned_add_cpu(&cmd->size, + sizeof( + struct mwifiex_ie_types_rssi_threshold)); } if (event_bitmap & BITMASK_BCN_RSSI_HIGH) { @@ -1378,8 +1379,9 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); - le16_add_cpu(&cmd->size, - sizeof(struct mwifiex_ie_types_rssi_threshold)); + le16_unaligned_add_cpu(&cmd->size, + sizeof( + struct mwifiex_ie_types_rssi_threshold)); } return 0; @@ -1398,7 +1400,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv, filter = &mef_entry->filter[i]; if (!filter->filt_type) break; - *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat); + put_unaligned_le32((u32)filter->repeat, stack_ptr); stack_ptr += 4; *stack_ptr = TYPE_DNUM; stack_ptr += 1; @@ -1410,8 +1412,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv, stack_ptr += 1; *stack_ptr = TYPE_BYTESEQ; stack_ptr += 1; - - *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset); + put_unaligned_le32((u32)filter->offset, stack_ptr); stack_ptr += 4; *stack_ptr = TYPE_DNUM; stack_ptr += 1; @@ -1683,14 +1684,15 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, sizeof(u8) + sizeof(u8)); /* Add the rule length to the command size*/ - le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) + - sizeof(struct mwifiex_ie_types_header)); + le16_unaligned_add_cpu(&cmd->size, + le16_to_cpu(rule->header.len) + + sizeof(struct mwifiex_ie_types_header)); rule = (void *)((u8 *)rule->params + length); } /* Add sizeof action, num_of_rules to total command length */ - le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16)); + le16_unaligned_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16)); return 0; } @@ -1708,7 +1710,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG); cmd->size = cpu_to_le16(S_DS_GEN); tdls_config->tdls_action = cpu_to_le16(cmd_action); - le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action)); + le16_unaligned_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action)); switch (cmd_action) { case ACT_TDLS_CS_ENABLE_CONFIG: @@ -1735,7 +1737,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv, return -ENOTSUPP; } - le16_add_cpu(&cmd->size, len); + le16_unaligned_add_cpu(&cmd->size, len); return 0; } @@ -1759,7 +1761,8 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER); cmd->size = cpu_to_le16(S_DS_GEN); - le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper)); + le16_unaligned_add_cpu(&cmd->size, + sizeof(struct host_cmd_ds_tdls_oper)); tdls_oper->reason = 0; memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN); @@ -1783,7 +1786,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, return -ENODATA; } - *(__le16 *)pos = cpu_to_le16(params->capability); + put_unaligned_le16(params->capability, pos); config_len += sizeof(params->capability); qos_info = params->uapsd_queues | (params->max_sp << 5); @@ -1861,7 +1864,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, return -ENOTSUPP; } - le16_add_cpu(&cmd->size, config_len); + le16_unaligned_add_cpu(&cmd->size, config_len); return 0; } @@ -2032,7 +2035,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_VERSION_EXT: cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->params.verext.version_str_sel = - (u8) (*((u32 *) data_buf)); + (u8)(get_unaligned((u32 *)data_buf)); memcpy(&cmd_ptr->params, data_buf, sizeof(struct host_cmd_ds_version_ext)); cmd_ptr->size = @@ -2043,7 +2046,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_MGMT_FRAME_REG: cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action); - cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf); + cmd_ptr->params.reg_mask.mask = cpu_to_le32( + get_unaligned((u32 *)data_buf)); cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) + S_DS_GEN); @@ -2063,7 +2067,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_P2P_MODE_CFG: cmd_ptr->command = cpu_to_le16(cmd_no); cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action); - cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf); + cmd_ptr->params.mode_cfg.mode = cpu_to_le16( + get_unaligned((u16 *)data_buf)); cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) + S_DS_GEN); @@ -2359,8 +2364,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) if (ret) return -1; - if (!disable_auto_ds && - first_sta && priv->adapter->iface_type != MWIFIEX_USB && + if (!disable_auto_ds && first_sta && priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { /* Enable auto deep sleep */ auto_ds.auto_ds = DEEP_SLEEP_ON; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 8548027abf71bbe01d2f115e938d3cc79f2da086..f1d1f56fc23ff739c3766e38bc723337a581ccb6 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -183,7 +183,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv, "query_type = %#x, buf size = %#x\n", oid, query_type, le16_to_cpu(smib->buf_size)); if (query_type == HostCmd_ACT_GEN_GET) { - ul_temp = le16_to_cpu(*((__le16 *) (smib->value))); + ul_temp = get_unaligned_le16(smib->value); if (data_buf) *data_buf = ul_temp; switch (oid) { @@ -741,7 +741,7 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv, struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg; if (data_buf) - *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode); + put_unaligned_le16(le16_to_cpu(mode_cfg->mode), data_buf); return 0; } @@ -1201,7 +1201,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); - cfg80211_sched_scan_results(priv->wdev.wiphy); + cfg80211_sched_scan_results(priv->wdev.wiphy, 0); mwifiex_dbg(adapter, CMD, "info: CMD_RESP: BG_SCAN result is ready!\n"); break; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index d63d163eb1ecaaa441d4da1861fc9038f7cd326e..839df8a9634e86c58367c7149d146d8a9bd36657 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -670,7 +670,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->dbg.num_event_deauth++; if (priv->media_connected) { reason_code = - le16_to_cpu(*(__le16 *)adapter->event_body); + get_unaligned_le16(adapter->event_body); mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -685,7 +685,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->dbg.num_event_disassoc++; if (priv->media_connected) { reason_code = - le16_to_cpu(*(__le16 *)adapter->event_body); + get_unaligned_le16(adapter->event_body); mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -695,7 +695,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->dbg.num_event_link_lost++; if (priv->media_connected) { reason_code = - le16_to_cpu(*(__le16 *)adapter->event_body); + get_unaligned_le16(adapter->event_body); mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -793,7 +793,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_BG_SCAN_STOPPED: dev_dbg(adapter->dev, "event: BGS_STOPPED\n"); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); if (priv->sched_scanning) priv->sched_scanning = false; break; @@ -923,7 +923,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->event_body); break; case EVENT_AMSDU_AGGR_CTRL: - ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); + ctrl = get_unaligned_le16(adapter->event_body); mwifiex_dbg(adapter, EVENT, "event: AMSDU_AGGR_CTRL %d\n", ctrl); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 1532ac9cee0b2f0a4bffda01073db5fa181286ca..42997e05d90fa528a5db7b64cef75f5f6283bd1d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -560,7 +560,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) #endif mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); + cfg80211_sched_scan_stopped(priv->wdev.wiphy, 0); #ifdef CONFIG_PM } #endif diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index df9704de07150d82887de1db36c99fd15d3933f7..7d0d3ff3dd4cc02e7a1729a67a743f132c36c2a6 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -349,7 +349,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT; break; } - vht_oper->center_freq_seg1_idx = + vht_oper->center_freq_seg0_idx = mwifiex_get_center_freq_index(priv, BAND_AAC, bss_desc->channel, chan_bw); @@ -431,6 +431,41 @@ mwifiex_add_wmm_info_ie(struct mwifiex_private *priv, struct sk_buff *skb, *buf++ = qosinfo; /* U-APSD no in use */ } +static void mwifiex_tdls_add_bss_co_2040(struct sk_buff *skb) +{ + struct ieee_types_bss_co_2040 *bssco; + + bssco = (void *)skb_put(skb, sizeof(struct ieee_types_bss_co_2040)); + bssco->ieee_hdr.element_id = WLAN_EID_BSS_COEX_2040; + bssco->ieee_hdr.len = sizeof(struct ieee_types_bss_co_2040) - + sizeof(struct ieee_types_header); + bssco->bss_2040co = 0x01; +} + +static void mwifiex_tdls_add_supported_chan(struct sk_buff *skb) +{ + struct ieee_types_generic *supp_chan; + u8 chan_supp[] = {1, 11}; + + supp_chan = (void *)skb_put(skb, (sizeof(struct ieee_types_header) + + sizeof(chan_supp))); + supp_chan->ieee_hdr.element_id = WLAN_EID_SUPPORTED_CHANNELS; + supp_chan->ieee_hdr.len = sizeof(chan_supp); + memcpy(supp_chan->data, chan_supp, sizeof(chan_supp)); +} + +static void mwifiex_tdls_add_oper_class(struct sk_buff *skb) +{ + struct ieee_types_generic *reg_class; + u8 rc_list[] = {1, + 1, 2, 3, 4, 12, 22, 23, 24, 25, 27, 28, 29, 30, 32, 33}; + reg_class = (void *)skb_put(skb, (sizeof(struct ieee_types_header) + + sizeof(rc_list))); + reg_class->ieee_hdr.element_id = WLAN_EID_SUPPORTED_REGULATORY_CLASSES; + reg_class->ieee_hdr.len = sizeof(rc_list); + memcpy(reg_class->data, rc_list, sizeof(rc_list)); +} + static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, @@ -484,7 +519,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, } mwifiex_tdls_add_ext_capab(priv, skb); - mwifiex_tdls_add_qos_capab(skb); + mwifiex_tdls_add_bss_co_2040(skb); + mwifiex_tdls_add_supported_chan(skb); + mwifiex_tdls_add_oper_class(skb); mwifiex_add_wmm_info_ie(priv, skb, 0); break; @@ -522,7 +559,9 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, } mwifiex_tdls_add_ext_capab(priv, skb); - mwifiex_tdls_add_qos_capab(skb); + mwifiex_tdls_add_bss_co_2040(skb); + mwifiex_tdls_add_supported_chan(skb); + mwifiex_tdls_add_oper_class(skb); mwifiex_add_wmm_info_ie(priv, skb, 0); break; @@ -612,6 +651,9 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, sizeof(struct ieee_types_bss_co_2040) + sizeof(struct ieee80211_ht_operation) + sizeof(struct ieee80211_tdls_lnkie) + + (2 * (sizeof(struct ieee_types_header))) + + MWIFIEX_SUPPORTED_CHANNELS + + MWIFIEX_OPERATING_CLASSES + sizeof(struct ieee80211_wmm_param_ie) + extra_ies_len; @@ -760,7 +802,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, } mwifiex_tdls_add_ext_capab(priv, skb); + mwifiex_tdls_add_bss_co_2040(skb); + mwifiex_tdls_add_supported_chan(skb); mwifiex_tdls_add_qos_capab(skb); + mwifiex_tdls_add_oper_class(skb); break; default: mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n"); @@ -857,7 +902,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, struct mwifiex_sta_node *sta_ptr; u8 *peer, *pos, *end; u8 i, action, basic; - __le16 cap = 0; + u16 cap = 0; int ie_len = 0; if (len < (sizeof(struct ethhdr) + 3)) @@ -879,7 +924,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, pos = buf + sizeof(struct ethhdr) + 4; /* payload 1+ category 1 + action 1 + dialog 1 */ - cap = cpu_to_le16(*(u16 *)pos); + cap = get_unaligned_le16(pos); ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN; pos += 2; break; @@ -889,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, return; /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/ pos = buf + sizeof(struct ethhdr) + 6; - cap = cpu_to_le16(*(u16 *)pos); + cap = get_unaligned_le16(pos); ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN; pos += 2; break; @@ -909,7 +954,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, if (!sta_ptr) return; - sta_ptr->tdls_cap.capab = cap; + sta_ptr->tdls_cap.capab = cpu_to_le16(cap); for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) { if (pos + 2 + pos[1] > end) @@ -969,7 +1014,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, case WLAN_EID_AID: if (priv->adapter->is_hw_11ac_capable) sta_ptr->tdls_cap.aid = - le16_to_cpu(*(__le16 *)(pos + 2)); + get_unaligned_le16((pos + 2)); default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index d24eca34ac119d6a917ac92640a41de620a53eac..e10b2a52e78fe7c04b24a62d63af1e6b7bcec682 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -202,7 +202,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) "AP EVENT: event id: %#x\n", eventcause); break; case EVENT_AMSDU_AGGR_CTRL: - ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); + ctrl = get_unaligned_le16(adapter->event_body); mwifiex_dbg(adapter, EVENT, "event: AMSDU_AGGR_CTRL %d\n", ctrl); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 9cf3334adf4d56e2b3dcbc77bf847c028001a443..2f7705c50161404825695846bd94e8c95bfd5a7c 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -306,9 +306,17 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) } } - usb_fill_bulk_urb(ctx->urb, card->udev, - usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data, - size, mwifiex_usb_rx_complete, (void *)ctx); + if (card->rx_cmd_ep == ctx->ep && + card->rx_cmd_ep_type == USB_ENDPOINT_XFER_INT) + usb_fill_int_urb(ctx->urb, card->udev, + usb_rcvintpipe(card->udev, ctx->ep), + ctx->skb->data, size, mwifiex_usb_rx_complete, + (void *)ctx, card->rx_cmd_interval); + else + usb_fill_bulk_urb(ctx->urb, card->udev, + usb_rcvbulkpipe(card->udev, ctx->ep), + ctx->skb->data, size, mwifiex_usb_rx_complete, + (void *)ctx); if (card->rx_cmd_ep == ctx->ep) atomic_inc(&card->rx_cmd_urb_pending); @@ -424,10 +432,13 @@ static int mwifiex_usb_probe(struct usb_interface *intf, epd = &iface_desc->endpoint[i].desc; if (usb_endpoint_dir_in(epd) && usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT && - usb_endpoint_xfer_bulk(epd)) { - pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n", + (usb_endpoint_xfer_bulk(epd) || + usb_endpoint_xfer_int(epd))) { + card->rx_cmd_ep_type = usb_endpoint_type(epd); + card->rx_cmd_interval = epd->bInterval; + pr_debug("info: Rx CMD/EVT:: max pkt size: %d, addr: %d, ep_type: %d\n", le16_to_cpu(epd->wMaxPacketSize), - epd->bEndpointAddress); + epd->bEndpointAddress, card->rx_cmd_ep_type); card->rx_cmd_ep = usb_endpoint_num(epd); atomic_set(&card->rx_cmd_urb_pending, 0); } @@ -461,10 +472,16 @@ static int mwifiex_usb_probe(struct usb_interface *intf, } if (usb_endpoint_dir_out(epd) && usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT && - usb_endpoint_xfer_bulk(epd)) { + (usb_endpoint_xfer_bulk(epd) || + usb_endpoint_xfer_int(epd))) { + card->tx_cmd_ep_type = usb_endpoint_type(epd); + card->tx_cmd_interval = epd->bInterval; pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n", le16_to_cpu(epd->wMaxPacketSize), epd->bEndpointAddress); + pr_debug("info: Tx CMD:: max pkt size: %d, addr: %d, ep_type: %d\n", + le16_to_cpu(epd->wMaxPacketSize), + epd->bEndpointAddress, card->tx_cmd_ep_type); card->tx_cmd_ep = usb_endpoint_num(epd); atomic_set(&card->tx_cmd_urb_pending, 0); card->bulk_out_maxpktsize = @@ -884,9 +901,17 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, context->skb = skb; tx_urb = context->urb; - usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep), - data, skb->len, mwifiex_usb_tx_complete, - (void *)context); + if (ep == card->tx_cmd_ep && + card->tx_cmd_ep_type == USB_ENDPOINT_XFER_INT) + usb_fill_int_urb(tx_urb, card->udev, + usb_sndintpipe(card->udev, ep), data, + skb->len, mwifiex_usb_tx_complete, + (void *)context, card->tx_cmd_interval); + else + usb_fill_bulk_urb(tx_urb, card->udev, + usb_sndbulkpipe(card->udev, ep), data, + skb->len, mwifiex_usb_tx_complete, + (void *)context); tx_urb->transfer_flags |= URB_ZERO_PACKET; diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h index e5f204ea018bd8022b5da4f71f8416fadff6d410..e36bd63172ff9606ee61979fdb51a96330b996ef 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.h +++ b/drivers/net/wireless/marvell/mwifiex/usb.h @@ -90,6 +90,10 @@ struct usb_card_rec { struct urb_context tx_cmd; u8 mc_resync_flag; struct usb_tx_data_port port[MWIFIEX_TX_DATA_PORT]; + int rx_cmd_ep_type; + u8 rx_cmd_interval; + int tx_cmd_ep_type; + u8 tx_cmd_interval; }; struct fw_header { @@ -102,12 +106,12 @@ struct fw_header { struct fw_sync_header { __le32 cmd; __le32 seq_num; -}; +} __packed; struct fw_data { struct fw_header fw_hdr; __le32 seq_num; u8 data[1]; -}; +} __packed; #endif /*_MWIFIEX_USB_H */ diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index b1ab8da121dd8a9e015fb0a82bf7e0c5ab031749..0cd68ffc2c74dc1c849e8011b84208ebf74a6730 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -274,13 +274,13 @@ int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf, val = *((u8 *)addr); break; case 2: - val = *((u16 *)addr); + val = get_unaligned((u16 *)addr); break; case 4: - val = *((u32 *)addr); + val = get_unaligned((u32 *)addr); break; case 8: - val = *((long long *)addr); + val = get_unaligned((long long *)addr); break; default: val = -1; diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h index b541d66c01ebf47362a53573296ac34868c0fe54..c386992abcdb870eb3fef637f627f4abab33de44 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.h +++ b/drivers/net/wireless/marvell/mwifiex/util.h @@ -93,4 +93,9 @@ static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb) int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf, struct mwifiex_debug_info *info); +static inline void le16_unaligned_add_cpu(__le16 *var, u16 val) +{ + put_unaligned_le16(get_unaligned_le16(var) + val, var); +} + #endif /* !_MWIFIEX_UTIL_H_ */ diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index b1b400b59d86456ae5b75a66ba2f227b3dcb8b1c..e813b2ca740c26ae6460ebe258c3280cb9cb2472 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -994,9 +994,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, *noise = -rxd->noise_floor; if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) { - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate); } else { int i; @@ -1011,7 +1011,7 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status, if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->flag & RX_FLAG_HT)) + if (!(status->encoding == RX_ENC_HT)) status->rate_idx -= 5; } else { status->band = NL80211_BAND_2GHZ; @@ -1109,17 +1109,17 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (rate_info & MWL8K_STA_RATE_INFO_40MHZ) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT) - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; if (rxd->channel > 14) { status->band = NL80211_BAND_5GHZ; - if (!(status->flag & RX_FLAG_HT)) + if (!(status->encoding == RX_ENC_HT)) status->rate_idx -= 5; } else { status->band = NL80211_BAND_2GHZ; @@ -6144,6 +6144,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) if (priv->sta_macids_supported || priv->device_info->fw_image_sta) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + rc = ieee80211_register_hw(hw); if (rc) { wiphy_err(hw->wiphy, "Cannot register device\n"); diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index a6e9017662269ee06b0d6f1be0d039f83542dbf5..d3b611aaf06180d29252ffd72bb11f67467803eb 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -615,6 +615,8 @@ int mt7601u_register_device(struct mt7601u_dev *dev) wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = mt76_init_sband_2g(dev); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index 3c576392ed8994a4ec73291934f50cbc7890d059..d6dc59bb00df42e86dbd212793e118eb66d962fc 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -401,7 +401,7 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) case MT_PHY_TYPE_CCK: if (idx >= 8) { idx -= 8; - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } if (WARN_ON(idx >= 4)) @@ -410,10 +410,10 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) status->rate_idx = idx; return; case MT_PHY_TYPE_HT_GF: - status->flag |= RX_FLAG_HT_GF; + status->enc_flags |= RX_ENC_FLAG_HT_GF; /* fall through */ case MT_PHY_TYPE_HT: - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; status->rate_idx = idx; break; default: @@ -422,13 +422,13 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) } if (rate & MT_RXWI_RATE_SGI) - status->flag |= RX_FLAG_SHORT_GI; + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate & MT_RXWI_RATE_STBC) - status->flag |= 1 << RX_FLAG_STBC_SHIFT; + status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; if (rate & MT_RXWI_RATE_BW) - status->flag |= RX_FLAG_40MHZ; + status->bw = RATE_INFO_BW_40; } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index dbdfb3f5c507165fbc2f72d884ff13bdfc5b31c6..a9f5f398b2f860c239db8f6e17899d9adcdc7f33 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - skb_reserve(skb, MT_DMA_HDR_LEN); - memcpy(skb_put(skb, len), data, len); + if (skb) { + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + } return skb; } @@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); } @@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); } diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig index de62f5dcb62f7971fbc21adcc1212a1ed9ac68aa..a1d1cfe214d2e4cef2e6f2ebb3873b1c8a9a2034 100644 --- a/drivers/net/wireless/ralink/rt2x00/Kconfig +++ b/drivers/net/wireless/ralink/rt2x00/Kconfig @@ -201,7 +201,7 @@ endif config RT2800SOC tristate "Ralink WiSoC support" - depends on SOC_RT288X || SOC_RT305X + depends on SOC_RT288X || SOC_RT305X || SOC_MT7620 select RT2X00_LIB_SOC select RT2X00_LIB_MMIO select RT2X00_LIB_CRYPTO diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 256496bfbafb2ef29b4560c9d93753f731035656..6a8c93fb6a43587614d27a734306e350ef509efe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -79,6 +79,7 @@ #define RF5372 0x5372 #define RF5390 0x5390 #define RF5392 0x5392 +#define RF7620 0x7620 /* * Chipset revisions. @@ -638,6 +639,24 @@ #define RF_CSR_CFG_WRITE FIELD32(0x00010000) #define RF_CSR_CFG_BUSY FIELD32(0x00020000) +/* + * MT7620 RF registers (reversed order) + */ +#define RF_CSR_CFG_DATA_MT7620 FIELD32(0x0000ff00) +#define RF_CSR_CFG_REGNUM_MT7620 FIELD32(0x03ff0000) +#define RF_CSR_CFG_WRITE_MT7620 FIELD32(0x00000010) +#define RF_CSR_CFG_BUSY_MT7620 FIELD32(0x00000001) + +/* undocumented registers for calibration of new MAC */ +#define RF_CONTROL0 0x0518 +#define RF_BYPASS0 0x051c +#define RF_CONTROL1 0x0520 +#define RF_BYPASS1 0x0524 +#define RF_CONTROL2 0x0528 +#define RF_BYPASS2 0x052c +#define RF_CONTROL3 0x0530 +#define RF_BYPASS3 0x0534 + /* * EFUSE_CSR: RT30x0 EEPROM */ @@ -1021,6 +1040,16 @@ #define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00) #define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000) +/* + * MIMO_PS_CFG: MIMO Power-save Configuration + */ +#define MIMO_PS_CFG 0x1210 +#define MIMO_PS_CFG_MMPS_BB_EN FIELD32(0x00000001) +#define MIMO_PS_CFG_MMPS_RX_ANT_NUM FIELD32(0x00000006) +#define MIMO_PS_CFG_MMPS_RF_EN FIELD32(0x00000008) +#define MIMO_PS_CFG_RX_STBY_POL FIELD32(0x00000010) +#define MIMO_PS_CFG_RX_RX_STBY0 FIELD32(0x00000020) + /* * EDCA_AC0_CFG: */ @@ -1095,6 +1124,12 @@ #define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000) #define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_0B_1MBS_2MBS FIELD32(0x000000ff) +#define TX_PWR_CFG_0B_5MBS_11MBS FIELD32(0x0000ff00) +#define TX_PWR_CFG_0B_6MBS_9MBS FIELD32(0x00ff0000) +#define TX_PWR_CFG_0B_12MBS_18MBS FIELD32(0xff000000) + /* * TX_PWR_CFG_1: @@ -1117,6 +1152,11 @@ #define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000) #define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_1B_24MBS_36MBS FIELD32(0x000000ff) +#define TX_PWR_CFG_1B_48MBS FIELD32(0x0000ff00) +#define TX_PWR_CFG_1B_MCS0_MCS1 FIELD32(0x00ff0000) +#define TX_PWR_CFG_1B_MCS2_MCS3 FIELD32(0xff000000) /* * TX_PWR_CFG_2: @@ -1139,6 +1179,11 @@ #define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000) #define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_2B_MCS4_MCS5 FIELD32(0x000000ff) +#define TX_PWR_CFG_2B_MCS6_MCS7 FIELD32(0x0000ff00) +#define TX_PWR_CFG_2B_MCS8_MCS9 FIELD32(0x00ff0000) +#define TX_PWR_CFG_2B_MCS10_MCS11 FIELD32(0xff000000) /* * TX_PWR_CFG_3: @@ -1161,6 +1206,11 @@ #define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000) #define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_3B_MCS12_MCS13 FIELD32(0x000000ff) +#define TX_PWR_CFG_3B_MCS14 FIELD32(0x0000ff00) +#define TX_PWR_CFG_3B_STBC_MCS0_MCS1 FIELD32(0x00ff0000) +#define TX_PWR_CFG_3B_STBC_MCS2_MSC3 FIELD32(0xff000000) /* * TX_PWR_CFG_4: @@ -1171,10 +1221,13 @@ #define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) #define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) /* bits for 3T devices */ -#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f) -#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0) -#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00) -#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000) +#define TX_PWR_CFG_4_STBC4_CH0 FIELD32(0x0000000f) +#define TX_PWR_CFG_4_STBC4_CH1 FIELD32(0x000000f0) +#define TX_PWR_CFG_4_STBC6_CH0 FIELD32(0x00000f00) +#define TX_PWR_CFG_4_STBC6_CH1 FIELD32(0x0000f000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_4B_STBC_MCS4_MCS5 FIELD32(0x000000ff) +#define TX_PWR_CFG_4B_STBC_MCS6 FIELD32(0x0000ff00) /* * TX_PIN_CFG: @@ -1201,6 +1254,8 @@ #define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000) #define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000) #define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000) +#define TX_PIN_CFG_RFRX_EN FIELD32(0x00100000) +#define TX_PIN_CFG_RFRX_POL FIELD32(0x00200000) #define TX_PIN_CFG_PA_PE_A2_EN FIELD32(0x01000000) #define TX_PIN_CFG_PA_PE_G2_EN FIELD32(0x02000000) #define TX_PIN_CFG_PA_PE_A2_POL FIELD32(0x04000000) @@ -1547,6 +1602,95 @@ #define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f) #define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00) +/* TXn_RF_GAIN_CORRECT: RF Gain Correction for each RF_ALC[3:2] + * Unit: 0.1 dB, Range: -3.2 dB to 3.1 dB + */ +#define TX0_RF_GAIN_CORRECT 0x13a0 +#define TX0_RF_GAIN_CORRECT_GAIN_CORR_0 FIELD32(0x0000003f) +#define TX0_RF_GAIN_CORRECT_GAIN_CORR_1 FIELD32(0x00003f00) +#define TX0_RF_GAIN_CORRECT_GAIN_CORR_2 FIELD32(0x003f0000) +#define TX0_RF_GAIN_CORRECT_GAIN_CORR_3 FIELD32(0x3f000000) + +#define TX1_RF_GAIN_CORRECT 0x13a4 +#define TX1_RF_GAIN_CORRECT_GAIN_CORR_0 FIELD32(0x0000003f) +#define TX1_RF_GAIN_CORRECT_GAIN_CORR_1 FIELD32(0x00003f00) +#define TX1_RF_GAIN_CORRECT_GAIN_CORR_2 FIELD32(0x003f0000) +#define TX1_RF_GAIN_CORRECT_GAIN_CORR_3 FIELD32(0x3f000000) + +/* TXn_RF_GAIN_ATTEN: TXn RF Gain Attenuation Level + * Format: 7-bit, signed value + * Unit: 0.5 dB, Range: -20 dB to -5 dB + */ +#define TX0_RF_GAIN_ATTEN 0x13a8 +#define TX0_RF_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000007f) +#define TX0_RF_GAIN_ATTEN_LEVEL_1 FIELD32(0x00007f00) +#define TX0_RF_GAIN_ATTEN_LEVEL_2 FIELD32(0x007f0000) +#define TX0_RF_GAIN_ATTEN_LEVEL_3 FIELD32(0x7f000000) +#define TX1_RF_GAIN_ATTEN 0x13ac +#define TX1_RF_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000007f) +#define TX1_RF_GAIN_ATTEN_LEVEL_1 FIELD32(0x00007f00) +#define TX1_RF_GAIN_ATTEN_LEVEL_2 FIELD32(0x007f0000) +#define TX1_RF_GAIN_ATTEN_LEVEL_3 FIELD32(0x7f000000) + +/* TX_ALC_CFG_0: TX Automatic Level Control Configuration 0 + * TX_ALC_LIMIT_n: TXn upper limit + * TX_ALC_CH_INIT_n: TXn channel initial transmission gain + * Unit: 0.5 dB, Range: 0 to 23.5 dB + */ +#define TX_ALC_CFG_0 0x13b0 +#define TX_ALC_CFG_0_CH_INIT_0 FIELD32(0x0000003f) +#define TX_ALC_CFG_0_CH_INIT_1 FIELD32(0x00003f00) +#define TX_ALC_CFG_0_LIMIT_0 FIELD32(0x003f0000) +#define TX_ALC_CFG_0_LIMIT_1 FIELD32(0x3f000000) + +/* TX_ALC_CFG_1: TX Automatic Level Control Configuration 1 + * TX_TEMP_COMP: TX Power Temperature Compensation + * Unit: 0.5 dB, Range: -10 dB to 10 dB + * TXn_GAIN_FINE: TXn Gain Fine Adjustment + * Unit: 0.1 dB, Range: -0.8 dB to 0.7 dB + * RF_TOS_DLY: Sets the RF_TOS_EN assertion delay after + * deassertion of PA_PE. + * Unit: 0.25 usec + * TXn_RF_GAIN_ATTEN: TXn RF gain attentuation selector + * RF_TOS_TIMEOUT: time-out value for RF_TOS_ENABLE + * deassertion if RF_TOS_DONE is missing. + * Unit: 0.25 usec + * RF_TOS_ENABLE: TX offset calibration enable + * ROS_BUSY_EN: RX offset calibration busy enable + */ +#define TX_ALC_CFG_1 0x13b4 +#define TX_ALC_CFG_1_TX_TEMP_COMP FIELD32(0x0000003f) +#define TX_ALC_CFG_1_TX0_GAIN_FINE FIELD32(0x00000f00) +#define TX_ALC_CFG_1_TX1_GAIN_FINE FIELD32(0x0000f000) +#define TX_ALC_CFG_1_RF_TOS_DLY FIELD32(0x00070000) +#define TX_ALC_CFG_1_TX0_RF_GAIN_ATTEN FIELD32(0x00300000) +#define TX_ALC_CFG_1_TX1_RF_GAIN_ATTEN FIELD32(0x00c00000) +#define TX_ALC_CFG_1_RF_TOS_TIMEOUT FIELD32(0x3f000000) +#define TX_ALC_CFG_1_RF_TOS_ENABLE FIELD32(0x40000000) +#define TX_ALC_CFG_1_ROS_BUSY_EN FIELD32(0x80000000) + +/* TXn_BB_GAIN_ATTEN: TXn RF Gain Attenuation Level + * Format: 5-bit signed values + * Unit: 0.5 dB, Range: -8 dB to 7 dB + */ +#define TX0_BB_GAIN_ATTEN 0x13c0 +#define TX0_BB_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000001f) +#define TX0_BB_GAIN_ATTEN_LEVEL_1 FIELD32(0x00001f00) +#define TX0_BB_GAIN_ATTEN_LEVEL_2 FIELD32(0x001f0000) +#define TX0_BB_GAIN_ATTEN_LEVEL_3 FIELD32(0x1f000000) +#define TX1_BB_GAIN_ATTEN 0x13c4 +#define TX1_BB_GAIN_ATTEN_LEVEL_0 FIELD32(0x0000001f) +#define TX1_BB_GAIN_ATTEN_LEVEL_1 FIELD32(0x00001f00) +#define TX1_BB_GAIN_ATTEN_LEVEL_2 FIELD32(0x001f0000) +#define TX1_BB_GAIN_ATTEN_LEVEL_3 FIELD32(0x1f000000) + +/* TX_ALC_VGA3: TX Automatic Level Correction Variable Gain Amplifier 3 */ +#define TX_ALC_VGA3 0x13c8 +#define TX_ALC_VGA3_TX0_ALC_VGA3 FIELD32(0x0000001f) +#define TX_ALC_VGA3_TX1_ALC_VGA3 FIELD32(0x00001f00) +#define TX_ALC_VGA3_TX0_ALC_VGA2 FIELD32(0x001f0000) +#define TX_ALC_VGA3_TX1_ALC_VGA2 FIELD32(0x1f000000) + /* TX_PWR_CFG_7 */ #define TX_PWR_CFG_7 0x13d4 #define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f) @@ -1555,6 +1699,10 @@ #define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000) #define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_7B_54MBS FIELD32(0x000000ff) +#define TX_PWR_CFG_7B_MCS7 FIELD32(0x00ff0000) + /* TX_PWR_CFG_8 */ #define TX_PWR_CFG_8 0x13d8 @@ -1564,12 +1712,17 @@ #define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000) #define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000) #define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000) +/* bits for new 2T devices */ +#define TX_PWR_CFG_8B_MCS15 FIELD32(0x000000ff) + /* TX_PWR_CFG_9 */ #define TX_PWR_CFG_9 0x13dc #define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f) #define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0) #define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00) +/* bits for new 2T devices */ +#define TX_PWR_CFG_9B_STBC_MCS7 FIELD32(0x000000ff) /* * RX_FILTER_CFG: RX configuration register. @@ -1760,6 +1913,8 @@ #define TX_STA_FIFO_WCID FIELD32(0x0000ff00) #define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000) #define TX_STA_FIFO_MCS FIELD32(0x007f0000) +#define TX_STA_FIFO_BW FIELD32(0x00800000) +#define TX_STA_FIFO_SGI FIELD32(0x01000000) #define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000) /* @@ -2135,11 +2290,14 @@ struct mac_iveiv_entry { #define RFCSR1_TX1_PD FIELD8(0x20) #define RFCSR1_RX2_PD FIELD8(0x40) #define RFCSR1_TX2_PD FIELD8(0x80) +#define RFCSR1_TX2_EN_MT7620 FIELD8(0x02) /* * RFCSR 2: */ #define RFCSR2_RESCAL_EN FIELD8(0x80) +#define RFCSR2_RX2_EN_MT7620 FIELD8(0x02) +#define RFCSR2_TX2_EN_MT7620 FIELD8(0x20) /* * RFCSR 3: @@ -2157,6 +2315,12 @@ struct mac_iveiv_entry { #define RFCSR3_BIT4 FIELD8(0x10) #define RFCSR3_BIT5 FIELD8(0x20) +/* + * RFCSR 4: + * VCOCAL_EN used by MT7620 + */ +#define RFCSR4_VCOCAL_EN FIELD8(0x80) + /* * FRCSR 5: */ @@ -2212,6 +2376,7 @@ struct mac_iveiv_entry { */ #define RFCSR13_TX_POWER FIELD8(0x1f) #define RFCSR13_DR0 FIELD8(0xe0) +#define RFCSR13_RDIV_MT7620 FIELD8(0x03) /* * RFCSR 15: @@ -2222,6 +2387,8 @@ struct mac_iveiv_entry { * RFCSR 16: */ #define RFCSR16_TXMIXER_GAIN FIELD8(0x07) +#define RFCSR16_RF_PLL_FREQ_SEL_MT7620 FIELD8(0x0F) +#define RFCSR16_SDM_MODE_MT7620 FIELD8(0xE0) /* * RFCSR 17: @@ -2234,6 +2401,8 @@ struct mac_iveiv_entry { /* RFCSR 18 */ #define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40) +/* RFCSR 19 */ +#define RFCSR19_K FIELD8(0x03) /* * RFCSR 20: @@ -2244,11 +2413,14 @@ struct mac_iveiv_entry { * RFCSR 21: */ #define RFCSR21_RX_LO2_EN FIELD8(0x08) +#define RFCSR21_BIT1 FIELD8(0x01) +#define RFCSR21_BIT8 FIELD8(0x80) /* * RFCSR 22: */ #define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01) +#define RFCSR22_FREQPLAN_D_MT7620 FIELD8(0x07) /* * RFCSR 23: @@ -2270,6 +2442,11 @@ struct mac_iveiv_entry { #define RFCSR27_R3 FIELD8(0x30) #define RFCSR27_R4 FIELD8(0x40) +/* + * RFCSR 28: + */ +#define RFCSR28_CH11_HT40 FIELD8(0x04) + /* * RFCSR 29: */ @@ -2331,6 +2508,7 @@ struct mac_iveiv_entry { */ #define RFCSR42_BIT1 FIELD8(0x01) #define RFCSR42_BIT4 FIELD8(0x08) +#define RFCSR42_TX2_EN_MT7620 FIELD8(0x40) /* * RFCSR 49: @@ -2433,6 +2611,7 @@ enum rt2800_eeprom_word { EEPROM_TSSI_BOUND_BG5, EEPROM_TXPOWER_A1, EEPROM_TXPOWER_A2, + EEPROM_TXPOWER_INIT, EEPROM_TSSI_BOUND_A1, EEPROM_TSSI_BOUND_A2, EEPROM_TSSI_BOUND_A3, @@ -2987,29 +3166,4 @@ enum rt2800_eeprom_word { */ #define BCN_TBTT_OFFSET 64 -/* - * Hardware has 255 WCID table entries. First 32 entries are reserved for - * shared keys. Since parts of the pairwise key table might be shared with - * the beacon frame buffers 6 & 7 we could only use the first 222 entries. - */ -#define WCID_START 33 -#define WCID_END 222 -#define STA_IDS_SIZE (WCID_END - WCID_START + 2) - -/* - * RT2800 driver data structure - */ -struct rt2800_drv_data { - u8 calibration_bw20; - u8 calibration_bw40; - u8 bbp25; - u8 bbp26; - u8 txmixer_gain_24g; - u8 txmixer_gain_5g; - u8 max_psdu; - unsigned int tbtt_tick; - unsigned int ampdu_factor_cnt[4]; - DECLARE_BITMAP(sta_ids, STA_IDS_SIZE); -}; - #endif /* RT2800_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 8223a15203165d32f09c03fff245162d04968b60..d11c7b210e8133174b59688994f0c1e9c22c09ad 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -59,6 +59,9 @@ rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg)) #define WAIT_FOR_RFCSR(__dev, __reg) \ rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg)) +#define WAIT_FOR_RFCSR_MT7620(__dev, __reg) \ + rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY_MT7620, \ + (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg)) #define WAIT_FOR_MCU(__dev, __reg) \ @@ -150,19 +153,56 @@ static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev, * Wait until the RFCSR becomes available, afterwards we * can safely write the new data into the register. */ - if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { - reg = 0; - rt2x00_set_field32(®, RF_CSR_CFG_DATA, value); - rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); - rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 1); - rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); + switch (rt2x00dev->chip.rt) { + case RT6352: + if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_DATA_MT7620, value); + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM_MT7620, + word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE_MT7620, 1); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY_MT7620, 1); + + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + } + break; - rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + default: + if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_DATA, value); + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 1); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); + + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + } + break; } mutex_unlock(&rt2x00dev->csr_mutex); } +static void rt2800_rfcsr_write_bank(struct rt2x00_dev *rt2x00dev, const u8 bank, + const unsigned int reg, const u8 value) +{ + rt2800_rfcsr_write(rt2x00dev, (reg | (bank << 6)), value); +} + +static void rt2800_rfcsr_write_chanreg(struct rt2x00_dev *rt2x00dev, + const unsigned int reg, const u8 value) +{ + rt2800_rfcsr_write_bank(rt2x00dev, 4, reg, value); + rt2800_rfcsr_write_bank(rt2x00dev, 6, reg, value); +} + +static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev, + const unsigned int reg, const u8 value) +{ + rt2800_rfcsr_write_bank(rt2x00dev, 5, reg, value); + rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value); +} + static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev, const unsigned int word, u8 *value) { @@ -178,22 +218,48 @@ static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev, * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ - if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { - reg = 0; - rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); - rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 0); - rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); + switch (rt2x00dev->chip.rt) { + case RT6352: + if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM_MT7620, + word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE_MT7620, 0); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY_MT7620, 1); - rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); - WAIT_FOR_RFCSR(rt2x00dev, ®); - } + WAIT_FOR_RFCSR_MT7620(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620); + break; + + default: + if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 0); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); - *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA); + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + + WAIT_FOR_RFCSR(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA); + break; + } mutex_unlock(&rt2x00dev->csr_mutex); } +static void rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank, + const unsigned int reg, u8 *value) +{ + rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6)), value); +} + static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { @@ -250,6 +316,7 @@ static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = { [EEPROM_TSSI_BOUND_BG5] = 0x003b, [EEPROM_TXPOWER_A1] = 0x003c, [EEPROM_TXPOWER_A2] = 0x0053, + [EEPROM_TXPOWER_INIT] = 0x0068, [EEPROM_TSSI_BOUND_A1] = 0x006a, [EEPROM_TSSI_BOUND_A2] = 0x006b, [EEPROM_TSSI_BOUND_A3] = 0x006c, @@ -524,6 +591,7 @@ void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev, break; case RT5592: + case RT6352: *txwi_size = TXWI_DESC_SIZE_5WORDS; *rxwi_size = RXWI_DESC_SIZE_6WORDS; break; @@ -821,10 +889,10 @@ void rt2800_process_rxwi(struct queue_entry *entry, rt2x00_desc_read(rxwi, 1, &word); if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI)) - rxdesc->flags |= RX_FLAG_SHORT_GI; + rxdesc->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rt2x00_get_field32(word, RXWI_W1_BW)) - rxdesc->flags |= RX_FLAG_40MHZ; + rxdesc->bw = RATE_INFO_BW_40; /* * Detect RX rate, always use MCS as signal type. @@ -852,14 +920,49 @@ void rt2800_process_rxwi(struct queue_entry *entry, } EXPORT_SYMBOL_GPL(rt2800_process_rxwi); -void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi) +static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc, + u32 status, enum nl80211_band band) +{ + u8 flags = 0; + u8 idx = rt2x00_get_field32(status, TX_STA_FIFO_MCS); + + switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) { + case RATE_MODE_HT_GREENFIELD: + flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case RATE_MODE_HT_MIX: + flags |= IEEE80211_TX_RC_MCS; + break; + case RATE_MODE_OFDM: + if (band == NL80211_BAND_2GHZ) + idx += 4; + break; + case RATE_MODE_CCK: + if (idx >= 8) + idx -= 8; + break; + } + + if (rt2x00_get_field32(status, TX_STA_FIFO_BW)) + flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rt2x00_get_field32(status, TX_STA_FIFO_SGI)) + flags |= IEEE80211_TX_RC_SHORT_GI; + + skbdesc->tx_rate_idx = idx; + skbdesc->tx_rate_flags = flags; +} + +void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, + bool match) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct txdone_entry_desc txdesc; u32 word; u16 mcs, real_mcs; - int aggr, ampdu; + int aggr, ampdu, wcid, ack_req; /* * Obtain the status about this packet. @@ -872,6 +975,8 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi) real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS); aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE); + wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID); + ack_req = rt2x00_get_field32(status, TX_STA_FIFO_TX_ACK_REQUIRED); /* * If a frame was meant to be sent as a single non-aggregated MPDU @@ -888,15 +993,22 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi) * Hence, replace the requested rate with the real tx rate to not * confuse the rate control algortihm by providing clearly wrong * data. - */ - if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) { - skbdesc->tx_rate_idx = real_mcs; + * + * FIXME: if we do not find matching entry, we tell that frame was + * posted without any retries. We need to find a way to fix that + * and provide retry count. + */ + if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) { + rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band); mcs = real_mcs; } if (aggr == 1 || ampdu == 1) __set_bit(TXDONE_AMPDU, &txdesc.flags); + if (!ack_req) + __set_bit(TXDONE_NO_ACK_REQ, &txdesc.flags); + /* * Ralink has a retry mechanism using a global fallback * table. We setup this fallback table to try the immediate @@ -928,7 +1040,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi) if (txdesc.retry) __set_bit(TXDONE_FALLBACK, &txdesc.flags); - rt2x00lib_txdone(entry, &txdesc); + if (!match) { + /* RCU assures non-null sta will not be freed by mac80211. */ + rcu_read_lock(); + if (likely(wcid >= WCID_START && wcid <= WCID_END)) + skbdesc->sta = drv_data->wcid_to_sta[wcid - WCID_START]; + else + skbdesc->sta = NULL; + rt2x00lib_txdone_nomatch(entry, &txdesc); + rcu_read_unlock(); + } else { + rt2x00lib_txdone(entry, &txdesc); + } } EXPORT_SYMBOL_GPL(rt2800_txdone_entry); @@ -1468,6 +1591,7 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, return 0; __set_bit(wcid - WCID_START, drv_data->sta_ids); + drv_data->wcid_to_sta[wcid - WCID_START] = sta; /* * Clean up WCID attributes and write STA address to the device. @@ -1498,6 +1622,7 @@ int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta) * get renewed when the WCID is reused. */ rt2800_config_wcid(rt2x00dev, NULL, wcid); + drv_data->wcid_to_sta[wcid - WCID_START] = NULL; __clear_bit(wcid - WCID_START, drv_data->sta_ids); return 0; @@ -2753,7 +2878,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]); } else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { + rt2x00_rt(rt2x00dev, RT5392) || + rt2x00_rt(rt2x00dev, RT6352)) { static const char r59_non_bt[] = {0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; @@ -3047,6 +3173,244 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F); } +static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + u8 rx_agc_fc, tx_agc_fc; + u8 rfcsr; + + /* Frequeny plan setting */ + /* Rdiv setting (set 0x03 if Xtal==20) + * R13[1:0] + */ + rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR13_RDIV_MT7620, + rt2800_clk_is_20mhz(rt2x00dev) ? 3 : 0); + rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); + + /* N setting + * R20[7:0] in rf->rf1 + * R21[0] always 0 + */ + rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr); + rfcsr = (rf->rf1 & 0x00ff); + rt2800_rfcsr_write(rt2x00dev, 20, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR21_BIT1, 0); + rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); + + /* K setting (always 0) + * R16[3:0] (RF PLL freq selection) + */ + rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR16_RF_PLL_FREQ_SEL_MT7620, 0); + rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); + + /* D setting (always 0) + * R22[2:0] (D=15, R22[2:0]=<111>) + */ + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR22_FREQPLAN_D_MT7620, 0); + rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); + + /* Ksd setting + * Ksd: R17<7:0> in rf->rf2 + * R18<7:0> in rf->rf3 + * R19<1:0> in rf->rf4 + */ + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); + rfcsr = rf->rf2; + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr); + rfcsr = rf->rf3; + rt2800_rfcsr_write(rt2x00dev, 18, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 19, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR19_K, rf->rf4); + rt2800_rfcsr_write(rt2x00dev, 19, rfcsr); + + /* Default: XO=20MHz , SDM mode */ + rt2800_rfcsr_read(rt2x00dev, 16, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR16_SDM_MODE_MT7620, 0x80); + rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR21_BIT8, 1); + rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR1_TX2_EN_MT7620, + rt2x00dev->default_ant.tx_chain_num != 1); + rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR2_TX2_EN_MT7620, + rt2x00dev->default_ant.tx_chain_num != 1); + rt2x00_set_field8(&rfcsr, RFCSR2_RX2_EN_MT7620, + rt2x00dev->default_ant.rx_chain_num != 1); + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 42, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR42_TX2_EN_MT7620, + rt2x00dev->default_ant.tx_chain_num != 1); + rt2800_rfcsr_write(rt2x00dev, 42, rfcsr); + + /* RF for DC Cal BW */ + if (conf_is_ht40(conf)) { + rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04); + rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10); + } else { + rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x20); + } + + if (conf_is_ht40(conf)) { + rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x08); + rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x08); + } else { + rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x28); + rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x28); + } + + rt2800_rfcsr_read(rt2x00dev, 28, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR28_CH11_HT40, + conf_is_ht40(conf) && (rf->channel == 11)); + rt2800_rfcsr_write(rt2x00dev, 28, rfcsr); + + if (!test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) { + if (conf_is_ht40(conf)) { + rx_agc_fc = drv_data->rx_calibration_bw40; + tx_agc_fc = drv_data->tx_calibration_bw40; + } else { + rx_agc_fc = drv_data->rx_calibration_bw20; + tx_agc_fc = drv_data->tx_calibration_bw20; + } + rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 7, 6, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 7, 6, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 7, 7, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 7, 7, rfcsr); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 7, 58, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 7, 58, rfcsr); + rt2800_rfcsr_read_bank(rt2x00dev, 7, 59, &rfcsr); + rfcsr &= (~0x3F); + rfcsr |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr); + } +} + +static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) { + u16 eeprom, target_power, max_power; + u32 mac_sys_ctrl, mac_status; + u32 reg; + u8 bbp; + int i; + + /* hardware unit is 0.5dBm, limited to 23.5dBm */ + power_level *= 2; + if (power_level > 0x2f) + power_level = 0x2f; + + max_power = chan->max_power * 2; + if (max_power > 0x2f) + max_power = 0x2f; + + rt2800_register_read(rt2x00dev, TX_ALC_CFG_0, ®); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, power_level); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, power_level); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, max_power); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, max_power); + + rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) { + /* init base power by eeprom target power */ + rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_INIT, + &target_power); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, target_power); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, target_power); + } + rt2800_register_write(rt2x00dev, TX_ALC_CFG_0, reg); + + rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, ®); + rt2x00_set_field32(®, TX_ALC_CFG_1_TX_TEMP_COMP, 0); + rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); + + /* Save MAC SYS CTRL registers */ + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &mac_sys_ctrl); + /* Disable Tx/Rx */ + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0); + /* Check MAC Tx/Rx idle */ + for (i = 0; i < 10000; i++) { + rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, + &mac_status); + if (mac_status & 0x3) + usleep_range(50, 200); + else + break; + } + + if (i == 10000) + rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n"); + + if (chan->center_freq > 2457) { + rt2800_bbp_read(rt2x00dev, 30, &bbp); + bbp = 0x40; + rt2800_bbp_write(rt2x00dev, 30, bbp); + rt2800_rfcsr_write(rt2x00dev, 39, 0); + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) + rt2800_rfcsr_write(rt2x00dev, 42, 0xfb); + else + rt2800_rfcsr_write(rt2x00dev, 42, 0x7b); + } else { + rt2800_bbp_read(rt2x00dev, 30, &bbp); + bbp = 0x1f; + rt2800_bbp_write(rt2x00dev, 30, bbp); + rt2800_rfcsr_write(rt2x00dev, 39, 0x80); + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) + rt2800_rfcsr_write(rt2x00dev, 42, 0xdb); + else + rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); + } + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, mac_sys_ctrl); + + rt2800_vco_calibration(rt2x00dev); +} + static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) @@ -3171,7 +3535,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, struct channel_info *info) { u32 reg; - unsigned int tx_pin; + u32 tx_pin; u8 bbp, rfcsr; info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, @@ -3216,6 +3580,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, case RF5592: rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info); break; + case RF7620: + rt2800_config_channel_rf7620(rt2x00dev, conf, rf, info); + break; default: rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); } @@ -3290,7 +3657,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rf->channel <= 14) { if (!rt2x00_rt(rt2x00dev, RT5390) && - !rt2x00_rt(rt2x00dev, RT5392)) { + !rt2x00_rt(rt2x00dev, RT5392) && + !rt2x00_rt(rt2x00dev, RT6352)) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 75, 0x46); @@ -3310,7 +3678,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 82, 0x94); else if (rt2x00_rt(rt2x00dev, RT3593)) rt2800_bbp_write(rt2x00dev, 82, 0x82); - else + else if (!rt2x00_rt(rt2x00dev, RT6352)) rt2800_bbp_write(rt2x00dev, 82, 0xf2); if (rt2x00_rt(rt2x00dev, RT3593)) @@ -3331,7 +3699,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_rfcsr_write(rt2x00dev, 8, 0); - tx_pin = 0; + rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin); switch (rt2x00dev->default_ant.tx_chain_num) { case 3: @@ -3380,6 +3748,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); /* mt7620 */ rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); @@ -3438,12 +3807,26 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, usleep_range(1000, 1500); } - if (rt2x00_rt(rt2x00dev, RT5592)) { + if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) { + reg = 0x10; + if (!conf_is_ht40(conf)) { + if (rt2x00_rt(rt2x00dev, RT6352) && + rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + reg |= 0x5; + } else { + reg |= 0xa; + } + } rt2800_bbp_write(rt2x00dev, 195, 141); - rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a); + rt2800_bbp_write(rt2x00dev, 196, reg); /* AGC init */ - reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain; + if (rt2x00_rt(rt2x00dev, RT6352)) + reg = 0x04; + else + reg = rf->channel <= 14 ? 0x1c : 0x24; + + reg += 2 * rt2x00dev->lna_gain; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); rt2800_iq_calibrate(rt2x00dev, rf->channel); @@ -4125,6 +4508,128 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, (unsigned long) regs[i]); } +static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) +{ + u32 reg, pwreg; + u16 eeprom; + u32 data, gdata; + u8 t, i; + enum nl80211_band band = chan->band; + int delta; + + /* Warn user if bw_comp is set in EEPROM */ + delta = rt2800_get_txpower_bw_comp(rt2x00dev, band); + + if (delta) + rt2x00_warn(rt2x00dev, "ignoring EEPROM HT40 power delta: %d\n", + delta); + + /* populate TX_PWR_CFG_0 up to TX_PWR_CFG_4 from EEPROM for HT20, limit + * value to 0x3f and replace 0x20 by 0x21 as this is what the vendor + * driver does as well, though it looks kinda wrong. + * Maybe some misunderstanding of what a signed 8-bit value is? Maybe + * the hardware has a problem handling 0x20, and as the code initially + * used a fixed offset between HT20 and HT40 rates they had to work- + * around that issue and most likely just forgot about it later on. + * Maybe we should use rt2800_get_txpower_bw_comp() here as well, + * however, the corresponding EEPROM value is not respected by the + * vendor driver, so maybe this is rather being taken care of the + * TXALC and the driver doesn't need to handle it...? + * Though this is all very awkward, just do as they did, as that's what + * board vendors expected when they populated the EEPROM... + */ + for (i = 0; i < 5; i++) { + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + i * 2, &eeprom); + + data = eeprom; + + t = eeprom & 0x3f; + if (t == 32) + t++; + + gdata = t; + + t = (eeprom & 0x3f00) >> 8; + if (t == 32) + t++; + + gdata |= (t << 8); + + rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, + (i * 2) + 1, &eeprom); + + t = eeprom & 0x3f; + if (t == 32) + t++; + + gdata |= (t << 16); + + t = (eeprom & 0x3f00) >> 8; + if (t == 32) + t++; + + gdata |= (t << 24); + data |= (eeprom << 16); + + if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) { + /* HT20 */ + if (data != 0xffffffff) + rt2800_register_write(rt2x00dev, + TX_PWR_CFG_0 + (i * 4), + data); + } else { + /* HT40 */ + if (gdata != 0xffffffff) + rt2800_register_write(rt2x00dev, + TX_PWR_CFG_0 + (i * 4), + gdata); + } + } + + /* Aparently Ralink ran out of space in the BYRATE calibration section + * of the EERPOM which is copied to the corresponding TX_PWR_CFG_x + * registers. As recent 2T chips use 8-bit instead of 4-bit values for + * power-offsets more space would be needed. Ralink decided to keep the + * EEPROM layout untouched and rather have some shared values covering + * multiple bitrates. + * Populate the registers not covered by the EEPROM in the same way the + * vendor driver does. + */ + + /* For OFDM 54MBS use value from OFDM 48MBS */ + pwreg = 0; + rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, ®); + t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS); + rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t); + + /* For MCS 7 use value from MCS 6 */ + rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, ®); + t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7); + rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, pwreg); + + /* For MCS 15 use value from MCS 14 */ + pwreg = 0; + rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, ®); + t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14); + rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, pwreg); + + /* For STBC MCS 7 use value from STBC MCS 6 */ + pwreg = 0; + rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, ®); + t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6); + rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg); + + rt2800_config_alc(rt2x00dev, chan, power_level); + + /* TODO: temperature compensation code! */ +} + /* * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values, @@ -4321,6 +4826,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, { if (rt2x00_rt(rt2x00dev, RT3593)) rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level); + else if (rt2x00_rt(rt2x00dev, RT6352)) + rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level); else rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level); } @@ -4336,6 +4843,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) { u32 tx_pin; u8 rfcsr; + unsigned long min_sleep = 0; /* * A voltage-controlled oscillator(VCO) is an electronic oscillator @@ -4374,6 +4882,15 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); + min_sleep = 1000; + break; + case RF7620: + rt2800_rfcsr_write(rt2x00dev, 5, 0x40); + rt2800_rfcsr_write(rt2x00dev, 4, 0x0C); + rt2800_rfcsr_read(rt2x00dev, 4, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR4_VCOCAL_EN, 1); + rt2800_rfcsr_write(rt2x00dev, 4, rfcsr); + min_sleep = 2000; break; default: WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration", @@ -4381,7 +4898,8 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) return; } - usleep_range(1000, 1500); + if (min_sleep > 0) + usleep_range(min_sleep, min_sleep * 2); rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin); if (rt2x00dev->rf_channel <= 14) { @@ -4413,6 +4931,42 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) } rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); + if (rt2x00_rt(rt2x00dev, RT6352)) { + if (rt2x00dev->default_ant.rx_chain_num == 1) { + rt2800_bbp_write(rt2x00dev, 91, 0x07); + rt2800_bbp_write(rt2x00dev, 95, 0x1A); + rt2800_bbp_write(rt2x00dev, 195, 128); + rt2800_bbp_write(rt2x00dev, 196, 0xA0); + rt2800_bbp_write(rt2x00dev, 195, 170); + rt2800_bbp_write(rt2x00dev, 196, 0x12); + rt2800_bbp_write(rt2x00dev, 195, 171); + rt2800_bbp_write(rt2x00dev, 196, 0x10); + } else { + rt2800_bbp_write(rt2x00dev, 91, 0x06); + rt2800_bbp_write(rt2x00dev, 95, 0x9A); + rt2800_bbp_write(rt2x00dev, 195, 128); + rt2800_bbp_write(rt2x00dev, 196, 0xE0); + rt2800_bbp_write(rt2x00dev, 195, 170); + rt2800_bbp_write(rt2x00dev, 196, 0x30); + rt2800_bbp_write(rt2x00dev, 195, 171); + rt2800_bbp_write(rt2x00dev, 196, 0x30); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_bbp_write(rt2x00dev, 75, 0x68); + rt2800_bbp_write(rt2x00dev, 76, 0x4C); + rt2800_bbp_write(rt2x00dev, 79, 0x1C); + rt2800_bbp_write(rt2x00dev, 80, 0x0C); + rt2800_bbp_write(rt2x00dev, 82, 0xB6); + } + + /* On 11A, We should delay and wait RF/BBP to be stable + * and the appropriate time should be 1000 micro seconds + * 2005/06/05 - On 11G, we also need this delay time. + * Otherwise it's difficult to pass the WHQL. + */ + usleep_range(1000, 1500); + } } EXPORT_SYMBOL_GPL(rt2800_vco_calibration); @@ -4511,7 +5065,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || - rt2x00_rt(rt2x00dev, RT5592)) + rt2x00_rt(rt2x00dev, RT5592) || + rt2x00_rt(rt2x00dev, RT6352)) vgc = 0x1c + (2 * rt2x00dev->lna_gain); else vgc = 0x2e + rt2x00dev->lna_gain; @@ -4738,7 +5293,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 0x00000000); } } else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { + rt2x00_rt(rt2x00dev, RT5392) || + rt2x00_rt(rt2x00dev, RT6352)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); @@ -4748,6 +5304,24 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else if (rt2x00_rt(rt2x00dev, RT5350)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); + } else if (rt2x00_rt(rt2x00dev, RT6352)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000); + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); + rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F); + rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606); + rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); + rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6C6C666C); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, + 0x3630363A); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_CORRECT, + 0x3630363A); + rt2800_register_read(rt2x00dev, TX_ALC_CFG_1, ®); + rt2x00_set_field32(®, TX_ALC_CFG_1_ROS_BUSY_EN, 0); + rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -5729,6 +6303,231 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 103, 0xc0); } +static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev, + const u8 reg, const u8 value) +{ + rt2800_bbp_write(rt2x00dev, 195, reg); + rt2800_bbp_write(rt2x00dev, 196, value); +} + +static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev, + const u8 reg, const u8 value) +{ + rt2800_bbp_write(rt2x00dev, 158, reg); + rt2800_bbp_write(rt2x00dev, 159, value); +} + +static void rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, + const u8 reg, u8 *value) +{ + rt2800_bbp_write(rt2x00dev, 158, reg); + rt2800_bbp_read(rt2x00dev, 159, value); +} + +static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev) +{ + u8 bbp; + + /* Apply Maximum Likelihood Detection (MLD) for 2 stream case */ + rt2800_bbp_read(rt2x00dev, 105, &bbp); + rt2x00_set_field8(&bbp, BBP105_MLD, + rt2x00dev->default_ant.rx_chain_num == 2); + rt2800_bbp_write(rt2x00dev, 105, bbp); + + /* Avoid data loss and CRC errors */ + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + /* Fix I/Q swap issue */ + rt2800_bbp_read(rt2x00dev, 1, &bbp); + bbp |= 0x04; + rt2800_bbp_write(rt2x00dev, 1, bbp); + + /* BBP for G band */ + rt2800_bbp_write(rt2x00dev, 3, 0x08); + rt2800_bbp_write(rt2x00dev, 4, 0x00); /* rt2800_bbp4_mac_if_ctrl? */ + rt2800_bbp_write(rt2x00dev, 6, 0x08); + rt2800_bbp_write(rt2x00dev, 14, 0x09); + rt2800_bbp_write(rt2x00dev, 15, 0xFF); + rt2800_bbp_write(rt2x00dev, 16, 0x01); + rt2800_bbp_write(rt2x00dev, 20, 0x06); + rt2800_bbp_write(rt2x00dev, 21, 0x00); + rt2800_bbp_write(rt2x00dev, 22, 0x00); + rt2800_bbp_write(rt2x00dev, 27, 0x00); + rt2800_bbp_write(rt2x00dev, 28, 0x00); + rt2800_bbp_write(rt2x00dev, 30, 0x00); + rt2800_bbp_write(rt2x00dev, 31, 0x48); + rt2800_bbp_write(rt2x00dev, 47, 0x40); + rt2800_bbp_write(rt2x00dev, 62, 0x00); + rt2800_bbp_write(rt2x00dev, 63, 0x00); + rt2800_bbp_write(rt2x00dev, 64, 0x00); + rt2800_bbp_write(rt2x00dev, 65, 0x2C); + rt2800_bbp_write(rt2x00dev, 66, 0x1C); + rt2800_bbp_write(rt2x00dev, 67, 0x20); + rt2800_bbp_write(rt2x00dev, 68, 0xDD); + rt2800_bbp_write(rt2x00dev, 69, 0x10); + rt2800_bbp_write(rt2x00dev, 70, 0x05); + rt2800_bbp_write(rt2x00dev, 73, 0x18); + rt2800_bbp_write(rt2x00dev, 74, 0x0F); + rt2800_bbp_write(rt2x00dev, 75, 0x60); + rt2800_bbp_write(rt2x00dev, 76, 0x44); + rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 78, 0x1E); + rt2800_bbp_write(rt2x00dev, 79, 0x1C); + rt2800_bbp_write(rt2x00dev, 80, 0x0C); + rt2800_bbp_write(rt2x00dev, 81, 0x3A); + rt2800_bbp_write(rt2x00dev, 82, 0xB6); + rt2800_bbp_write(rt2x00dev, 83, 0x9A); + rt2800_bbp_write(rt2x00dev, 84, 0x9A); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 95, 0x9A); + rt2800_bbp_write(rt2x00dev, 96, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0xC0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + /* FIXME BBP105 owerwrite */ + rt2800_bbp_write(rt2x00dev, 105, 0x3C); + rt2800_bbp_write(rt2x00dev, 106, 0x12); + rt2800_bbp_write(rt2x00dev, 109, 0x00); + rt2800_bbp_write(rt2x00dev, 134, 0x10); + rt2800_bbp_write(rt2x00dev, 135, 0xA6); + rt2800_bbp_write(rt2x00dev, 137, 0x04); + rt2800_bbp_write(rt2x00dev, 142, 0x30); + rt2800_bbp_write(rt2x00dev, 143, 0xF7); + rt2800_bbp_write(rt2x00dev, 160, 0xEC); + rt2800_bbp_write(rt2x00dev, 161, 0xC4); + rt2800_bbp_write(rt2x00dev, 162, 0x77); + rt2800_bbp_write(rt2x00dev, 163, 0xF9); + rt2800_bbp_write(rt2x00dev, 164, 0x00); + rt2800_bbp_write(rt2x00dev, 165, 0x00); + rt2800_bbp_write(rt2x00dev, 186, 0x00); + rt2800_bbp_write(rt2x00dev, 187, 0x00); + rt2800_bbp_write(rt2x00dev, 188, 0x00); + rt2800_bbp_write(rt2x00dev, 186, 0x00); + rt2800_bbp_write(rt2x00dev, 187, 0x01); + rt2800_bbp_write(rt2x00dev, 188, 0x00); + rt2800_bbp_write(rt2x00dev, 189, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x06); + rt2800_bbp_write(rt2x00dev, 92, 0x04); + rt2800_bbp_write(rt2x00dev, 93, 0x54); + rt2800_bbp_write(rt2x00dev, 99, 0x50); + rt2800_bbp_write(rt2x00dev, 148, 0x84); + rt2800_bbp_write(rt2x00dev, 167, 0x80); + rt2800_bbp_write(rt2x00dev, 178, 0xFF); + rt2800_bbp_write(rt2x00dev, 106, 0x13); + + /* BBP for G band GLRT function (BBP_128 ~ BBP_221) */ + rt2800_bbp_glrt_write(rt2x00dev, 0, 0x00); + rt2800_bbp_glrt_write(rt2x00dev, 1, 0x14); + rt2800_bbp_glrt_write(rt2x00dev, 2, 0x20); + rt2800_bbp_glrt_write(rt2x00dev, 3, 0x0A); + rt2800_bbp_glrt_write(rt2x00dev, 10, 0x16); + rt2800_bbp_glrt_write(rt2x00dev, 11, 0x06); + rt2800_bbp_glrt_write(rt2x00dev, 12, 0x02); + rt2800_bbp_glrt_write(rt2x00dev, 13, 0x07); + rt2800_bbp_glrt_write(rt2x00dev, 14, 0x05); + rt2800_bbp_glrt_write(rt2x00dev, 15, 0x09); + rt2800_bbp_glrt_write(rt2x00dev, 16, 0x20); + rt2800_bbp_glrt_write(rt2x00dev, 17, 0x08); + rt2800_bbp_glrt_write(rt2x00dev, 18, 0x4A); + rt2800_bbp_glrt_write(rt2x00dev, 19, 0x00); + rt2800_bbp_glrt_write(rt2x00dev, 20, 0x00); + rt2800_bbp_glrt_write(rt2x00dev, 128, 0xE0); + rt2800_bbp_glrt_write(rt2x00dev, 129, 0x1F); + rt2800_bbp_glrt_write(rt2x00dev, 130, 0x4F); + rt2800_bbp_glrt_write(rt2x00dev, 131, 0x32); + rt2800_bbp_glrt_write(rt2x00dev, 132, 0x08); + rt2800_bbp_glrt_write(rt2x00dev, 133, 0x28); + rt2800_bbp_glrt_write(rt2x00dev, 134, 0x19); + rt2800_bbp_glrt_write(rt2x00dev, 135, 0x0A); + rt2800_bbp_glrt_write(rt2x00dev, 138, 0x16); + rt2800_bbp_glrt_write(rt2x00dev, 139, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 140, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1A); + rt2800_bbp_glrt_write(rt2x00dev, 142, 0x36); + rt2800_bbp_glrt_write(rt2x00dev, 143, 0x2C); + rt2800_bbp_glrt_write(rt2x00dev, 144, 0x26); + rt2800_bbp_glrt_write(rt2x00dev, 145, 0x24); + rt2800_bbp_glrt_write(rt2x00dev, 146, 0x42); + rt2800_bbp_glrt_write(rt2x00dev, 147, 0x40); + rt2800_bbp_glrt_write(rt2x00dev, 148, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 149, 0x29); + rt2800_bbp_glrt_write(rt2x00dev, 150, 0x4C); + rt2800_bbp_glrt_write(rt2x00dev, 151, 0x46); + rt2800_bbp_glrt_write(rt2x00dev, 152, 0x3D); + rt2800_bbp_glrt_write(rt2x00dev, 153, 0x40); + rt2800_bbp_glrt_write(rt2x00dev, 154, 0x3E); + rt2800_bbp_glrt_write(rt2x00dev, 155, 0x38); + rt2800_bbp_glrt_write(rt2x00dev, 156, 0x3D); + rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2F); + rt2800_bbp_glrt_write(rt2x00dev, 158, 0x3C); + rt2800_bbp_glrt_write(rt2x00dev, 159, 0x34); + rt2800_bbp_glrt_write(rt2x00dev, 160, 0x2C); + rt2800_bbp_glrt_write(rt2x00dev, 161, 0x2F); + rt2800_bbp_glrt_write(rt2x00dev, 162, 0x3C); + rt2800_bbp_glrt_write(rt2x00dev, 163, 0x35); + rt2800_bbp_glrt_write(rt2x00dev, 164, 0x2E); + rt2800_bbp_glrt_write(rt2x00dev, 165, 0x2F); + rt2800_bbp_glrt_write(rt2x00dev, 166, 0x49); + rt2800_bbp_glrt_write(rt2x00dev, 167, 0x41); + rt2800_bbp_glrt_write(rt2x00dev, 168, 0x36); + rt2800_bbp_glrt_write(rt2x00dev, 169, 0x39); + rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 172, 0x0E); + rt2800_bbp_glrt_write(rt2x00dev, 173, 0x0D); + rt2800_bbp_glrt_write(rt2x00dev, 174, 0x28); + rt2800_bbp_glrt_write(rt2x00dev, 175, 0x21); + rt2800_bbp_glrt_write(rt2x00dev, 176, 0x1C); + rt2800_bbp_glrt_write(rt2x00dev, 177, 0x16); + rt2800_bbp_glrt_write(rt2x00dev, 178, 0x50); + rt2800_bbp_glrt_write(rt2x00dev, 179, 0x4A); + rt2800_bbp_glrt_write(rt2x00dev, 180, 0x43); + rt2800_bbp_glrt_write(rt2x00dev, 181, 0x50); + rt2800_bbp_glrt_write(rt2x00dev, 182, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 183, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 184, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 185, 0x10); + rt2800_bbp_glrt_write(rt2x00dev, 200, 0x7D); + rt2800_bbp_glrt_write(rt2x00dev, 201, 0x14); + rt2800_bbp_glrt_write(rt2x00dev, 202, 0x32); + rt2800_bbp_glrt_write(rt2x00dev, 203, 0x2C); + rt2800_bbp_glrt_write(rt2x00dev, 204, 0x36); + rt2800_bbp_glrt_write(rt2x00dev, 205, 0x4C); + rt2800_bbp_glrt_write(rt2x00dev, 206, 0x43); + rt2800_bbp_glrt_write(rt2x00dev, 207, 0x2C); + rt2800_bbp_glrt_write(rt2x00dev, 208, 0x2E); + rt2800_bbp_glrt_write(rt2x00dev, 209, 0x36); + rt2800_bbp_glrt_write(rt2x00dev, 210, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 211, 0x6E); + + /* BBP for G band DCOC function */ + rt2800_bbp_dcoc_write(rt2x00dev, 140, 0x0C); + rt2800_bbp_dcoc_write(rt2x00dev, 141, 0x00); + rt2800_bbp_dcoc_write(rt2x00dev, 142, 0x10); + rt2800_bbp_dcoc_write(rt2x00dev, 143, 0x10); + rt2800_bbp_dcoc_write(rt2x00dev, 144, 0x10); + rt2800_bbp_dcoc_write(rt2x00dev, 145, 0x10); + rt2800_bbp_dcoc_write(rt2x00dev, 146, 0x08); + rt2800_bbp_dcoc_write(rt2x00dev, 147, 0x40); + rt2800_bbp_dcoc_write(rt2x00dev, 148, 0x04); + rt2800_bbp_dcoc_write(rt2x00dev, 149, 0x04); + rt2800_bbp_dcoc_write(rt2x00dev, 150, 0x08); + rt2800_bbp_dcoc_write(rt2x00dev, 151, 0x08); + rt2800_bbp_dcoc_write(rt2x00dev, 152, 0x03); + rt2800_bbp_dcoc_write(rt2x00dev, 153, 0x03); + rt2800_bbp_dcoc_write(rt2x00dev, 154, 0x03); + rt2800_bbp_dcoc_write(rt2x00dev, 155, 0x02); + rt2800_bbp_dcoc_write(rt2x00dev, 156, 0x40); + rt2800_bbp_dcoc_write(rt2x00dev, 157, 0x40); + rt2800_bbp_dcoc_write(rt2x00dev, 158, 0x64); + rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); +} + static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; @@ -5773,6 +6572,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) case RT5592: rt2800_init_bbp_5592(rt2x00dev); return; + case RT6352: + rt2800_init_bbp_6352(rt2x00dev); + break; } for (i = 0; i < EEPROM_BBP_SIZE; i++) { @@ -6228,9 +7030,9 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) { - int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0, + int tx0_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags); - int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1, + int tx1_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags); u8 rfcsr; @@ -6270,9 +7072,9 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rfcsr = 0x01; - if (!tx0_int_pa) + if (tx0_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1); - if (!tx1_int_pa) + if (tx1_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1); rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); rt2800_rfcsr_write(rt2x00dev, 35, 0x03); @@ -6282,13 +7084,13 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); rt2800_rfcsr_write(rt2x00dev, 40, 0x33); rfcsr = 0x52; - if (tx0_int_pa) { + if (!tx0_ext_pa) { rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1); rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1); } rt2800_rfcsr_write(rt2x00dev, 41, rfcsr); rfcsr = 0x52; - if (tx1_int_pa) { + if (!tx1_ext_pa) { rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1); rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1); } @@ -6301,19 +7103,19 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 48, 0x14); rt2800_rfcsr_write(rt2x00dev, 49, 0x00); rfcsr = 0x2d; - if (!tx0_int_pa) + if (tx0_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1); - if (!tx1_int_pa) + if (tx1_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); - rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52)); - rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0)); - rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2)); - rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0)); - rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52)); - rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0)); - rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49)); - rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0)); + rt2800_rfcsr_write(rt2x00dev, 51, (tx0_ext_pa ? 0x52 : 0x7f)); + rt2800_rfcsr_write(rt2x00dev, 52, (tx0_ext_pa ? 0xc0 : 0x00)); + rt2800_rfcsr_write(rt2x00dev, 53, (tx0_ext_pa ? 0xd2 : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 54, (tx0_ext_pa ? 0xc0 : 0x1b)); + rt2800_rfcsr_write(rt2x00dev, 55, (tx1_ext_pa ? 0x52 : 0x7f)); + rt2800_rfcsr_write(rt2x00dev, 56, (tx1_ext_pa ? 0xc0 : 0x00)); + rt2800_rfcsr_write(rt2x00dev, 57, (tx0_ext_pa ? 0x49 : 0x52)); + rt2800_rfcsr_write(rt2x00dev, 58, (tx1_ext_pa ? 0xc0 : 0x1b)); rt2800_rfcsr_write(rt2x00dev, 59, 0x00); rt2800_rfcsr_write(rt2x00dev, 60, 0x00); rt2800_rfcsr_write(rt2x00dev, 61, 0x00); @@ -6844,6 +7646,617 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev) rt2800_led_open_drain_enable(rt2x00dev); } +static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev, + bool set_bw, bool is_ht40) +{ + u8 bbp_val; + + rt2800_bbp_read(rt2x00dev, 21, &bbp_val); + bbp_val |= 0x1; + rt2800_bbp_write(rt2x00dev, 21, bbp_val); + usleep_range(100, 200); + + if (set_bw) { + rt2800_bbp_read(rt2x00dev, 4, &bbp_val); + rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * is_ht40); + rt2800_bbp_write(rt2x00dev, 4, bbp_val); + usleep_range(100, 200); + } + + rt2800_bbp_read(rt2x00dev, 21, &bbp_val); + bbp_val &= (~0x1); + rt2800_bbp_write(rt2x00dev, 21, bbp_val); + usleep_range(100, 200); +} + +static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal) +{ + u8 rf_val; + + if (btxcal) + rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04); + else + rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x02); + + rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x06); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &rf_val); + rf_val |= 0x80; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rf_val); + + if (btxcal) { + rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xC1); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x20); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val); + rf_val &= (~0x3F); + rf_val |= 0x3F; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val); + rf_val &= (~0x3F); + rf_val |= 0x3F; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, 0x31); + } else { + rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xF1); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x18); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &rf_val); + rf_val &= (~0x3F); + rf_val |= 0x34; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &rf_val); + rf_val &= (~0x3F); + rf_val |= 0x34; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val); + } + + return 0; +} + +static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev) +{ + unsigned int cnt; + u8 bbp_val; + char cal_val; + + rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82); + + cnt = 0; + do { + usleep_range(500, 2000); + rt2800_bbp_read(rt2x00dev, 159, &bbp_val); + if (bbp_val == 0x02 || cnt == 20) + break; + + cnt++; + } while (cnt < 20); + + rt2800_bbp_dcoc_read(rt2x00dev, 0x39, &bbp_val); + cal_val = bbp_val & 0x7F; + if (cal_val >= 0x40) + cal_val -= 128; + + return cal_val; +} + +static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev, + bool btxcal) +{ + struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + u8 tx_agc_fc = 0, rx_agc_fc = 0, cmm_agc_fc; + u8 filter_target; + u8 tx_filter_target_20m = 0x09, tx_filter_target_40m = 0x02; + u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31; + int loop = 0, is_ht40, cnt; + u8 bbp_val, rf_val; + char cal_r32_init, cal_r32_val, cal_diff; + u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05; + u8 saverfb5r06, saverfb5r07; + u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20; + u8 saverfb5r37, saverfb5r38, saverfb5r39, saverfb5r40, saverfb5r41; + u8 saverfb5r42, saverfb5r43, saverfb5r44, saverfb5r45, saverfb5r46; + u8 saverfb5r58, saverfb5r59; + u8 savebbp159r0, savebbp159r2, savebbpr23; + u32 MAC_RF_CONTROL0, MAC_RF_BYPASS0; + + /* Save MAC registers */ + rt2800_register_read(rt2x00dev, RF_CONTROL0, &MAC_RF_CONTROL0); + rt2800_register_read(rt2x00dev, RF_BYPASS0, &MAC_RF_BYPASS0); + + /* save BBP registers */ + rt2800_bbp_read(rt2x00dev, 23, &savebbpr23); + + rt2800_bbp_dcoc_read(rt2x00dev, 0, &savebbp159r0); + rt2800_bbp_dcoc_read(rt2x00dev, 2, &savebbp159r2); + + /* Save RF registers */ + rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &saverfb5r00); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &saverfb5r01); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 3, &saverfb5r03); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 4, &saverfb5r04); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 5, &saverfb5r05); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &saverfb5r06); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &saverfb5r07); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &saverfb5r08); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 17, &saverfb5r17); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 18, &saverfb5r18); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 19, &saverfb5r19); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 20, &saverfb5r20); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 37, &saverfb5r37); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 38, &saverfb5r38); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 39, &saverfb5r39); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 40, &saverfb5r40); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 41, &saverfb5r41); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 42, &saverfb5r42); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 43, &saverfb5r43); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 44, &saverfb5r44); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 45, &saverfb5r45); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 46, &saverfb5r46); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &saverfb5r58); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &saverfb5r59); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val); + rf_val |= 0x3; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val); + rf_val |= 0x1; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rf_val); + + cnt = 0; + do { + usleep_range(500, 2000); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 1, &rf_val); + if (((rf_val & 0x1) == 0x00) || (cnt == 40)) + break; + cnt++; + } while (cnt < 40); + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 0, &rf_val); + rf_val &= (~0x3); + rf_val |= 0x1; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val); + + /* I-3 */ + rt2800_bbp_read(rt2x00dev, 23, &bbp_val); + bbp_val &= (~0x1F); + bbp_val |= 0x10; + rt2800_bbp_write(rt2x00dev, 23, bbp_val); + + do { + /* I-4,5,6,7,8,9 */ + if (loop == 0) { + is_ht40 = false; + + if (btxcal) + filter_target = tx_filter_target_20m; + else + filter_target = rx_filter_target_20m; + } else { + is_ht40 = true; + + if (btxcal) + filter_target = tx_filter_target_40m; + else + filter_target = rx_filter_target_40m; + } + + rt2800_rfcsr_read_bank(rt2x00dev, 5, 8, &rf_val); + rf_val &= (~0x04); + if (loop == 1) + rf_val |= 0x4; + + rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, rf_val); + + rt2800_bbp_core_soft_reset(rt2x00dev, true, is_ht40); + + rt2800_rf_lp_config(rt2x00dev, btxcal); + if (btxcal) { + tx_agc_fc = 0; + rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val); + rf_val &= (~0x7F); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val); + rf_val &= (~0x7F); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val); + } else { + rx_agc_fc = 0; + rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val); + rf_val &= (~0x7F); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val); + rf_val &= (~0x7F); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val); + } + + usleep_range(1000, 2000); + + rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val); + bbp_val &= (~0x6); + rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val); + + rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40); + + cal_r32_init = rt2800_lp_tx_filter_bw_cal(rt2x00dev); + + rt2800_bbp_dcoc_read(rt2x00dev, 2, &bbp_val); + bbp_val |= 0x6; + rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val); +do_cal: + if (btxcal) { + rt2800_rfcsr_read_bank(rt2x00dev, 5, 58, &rf_val); + rf_val &= (~0x7F); + rf_val |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 59, &rf_val); + rf_val &= (~0x7F); + rf_val |= tx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val); + } else { + rt2800_rfcsr_read_bank(rt2x00dev, 5, 6, &rf_val); + rf_val &= (~0x7F); + rf_val |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val); + rt2800_rfcsr_read_bank(rt2x00dev, 5, 7, &rf_val); + rf_val &= (~0x7F); + rf_val |= rx_agc_fc; + rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val); + } + + usleep_range(500, 1000); + + rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40); + + cal_r32_val = rt2800_lp_tx_filter_bw_cal(rt2x00dev); + + cal_diff = cal_r32_init - cal_r32_val; + + if (btxcal) + cmm_agc_fc = tx_agc_fc; + else + cmm_agc_fc = rx_agc_fc; + + if (((cal_diff > filter_target) && (cmm_agc_fc == 0)) || + ((cal_diff < filter_target) && (cmm_agc_fc == 0x3f))) { + if (btxcal) + tx_agc_fc = 0; + else + rx_agc_fc = 0; + } else if ((cal_diff <= filter_target) && (cmm_agc_fc < 0x3f)) { + if (btxcal) + tx_agc_fc++; + else + rx_agc_fc++; + goto do_cal; + } + + if (btxcal) { + if (loop == 0) + drv_data->tx_calibration_bw20 = tx_agc_fc; + else + drv_data->tx_calibration_bw40 = tx_agc_fc; + } else { + if (loop == 0) + drv_data->rx_calibration_bw20 = rx_agc_fc; + else + drv_data->rx_calibration_bw40 = rx_agc_fc; + } + + loop++; + } while (loop <= 1); + + rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, saverfb5r00); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, saverfb5r01); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, saverfb5r03); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r04); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, saverfb5r05); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, saverfb5r06); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, saverfb5r07); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, saverfb5r08); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20); + + rt2800_rfcsr_write_bank(rt2x00dev, 5, 37, saverfb5r37); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 38, saverfb5r38); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 39, saverfb5r39); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 40, saverfb5r40); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 41, saverfb5r41); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 42, saverfb5r42); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 43, saverfb5r43); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 44, saverfb5r44); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 45, saverfb5r45); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 46, saverfb5r46); + + rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, saverfb5r58); + rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, saverfb5r59); + + rt2800_bbp_write(rt2x00dev, 23, savebbpr23); + + rt2800_bbp_dcoc_write(rt2x00dev, 0, savebbp159r0); + rt2800_bbp_dcoc_write(rt2x00dev, 2, savebbp159r2); + + rt2800_bbp_read(rt2x00dev, 4, &bbp_val); + rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, + 2 * test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)); + rt2800_bbp_write(rt2x00dev, 4, bbp_val); + + rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0); + rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0); +} + +static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) +{ + /* Initialize RF central register to default value */ + rt2800_rfcsr_write(rt2x00dev, 0, 0x02); + rt2800_rfcsr_write(rt2x00dev, 1, 0x03); + rt2800_rfcsr_write(rt2x00dev, 2, 0x33); + rt2800_rfcsr_write(rt2x00dev, 3, 0xFF); + rt2800_rfcsr_write(rt2x00dev, 4, 0x0C); + rt2800_rfcsr_write(rt2x00dev, 5, 0x40); + rt2800_rfcsr_write(rt2x00dev, 6, 0x00); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 8, 0x00); + rt2800_rfcsr_write(rt2x00dev, 9, 0x00); + rt2800_rfcsr_write(rt2x00dev, 10, 0x00); + rt2800_rfcsr_write(rt2x00dev, 11, 0x00); + rt2800_rfcsr_write(rt2x00dev, 12, rt2x00dev->freq_offset); + rt2800_rfcsr_write(rt2x00dev, 13, 0x00); + rt2800_rfcsr_write(rt2x00dev, 14, 0x40); + rt2800_rfcsr_write(rt2x00dev, 15, 0x22); + rt2800_rfcsr_write(rt2x00dev, 16, 0x4C); + rt2800_rfcsr_write(rt2x00dev, 17, 0x00); + rt2800_rfcsr_write(rt2x00dev, 18, 0x00); + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); + rt2800_rfcsr_write(rt2x00dev, 20, 0xA0); + rt2800_rfcsr_write(rt2x00dev, 21, 0x12); + rt2800_rfcsr_write(rt2x00dev, 22, 0x07); + rt2800_rfcsr_write(rt2x00dev, 23, 0x13); + rt2800_rfcsr_write(rt2x00dev, 24, 0xFE); + rt2800_rfcsr_write(rt2x00dev, 25, 0x24); + rt2800_rfcsr_write(rt2x00dev, 26, 0x7A); + rt2800_rfcsr_write(rt2x00dev, 27, 0x00); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0x05); + rt2800_rfcsr_write(rt2x00dev, 30, 0x00); + rt2800_rfcsr_write(rt2x00dev, 31, 0x00); + rt2800_rfcsr_write(rt2x00dev, 32, 0x00); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x00); + rt2800_rfcsr_write(rt2x00dev, 35, 0x00); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x00); + rt2800_rfcsr_write(rt2x00dev, 38, 0x00); + rt2800_rfcsr_write(rt2x00dev, 39, 0x00); + rt2800_rfcsr_write(rt2x00dev, 40, 0x00); + rt2800_rfcsr_write(rt2x00dev, 41, 0xD0); + rt2800_rfcsr_write(rt2x00dev, 42, 0x5B); + rt2800_rfcsr_write(rt2x00dev, 43, 0x00); + + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + if (rt2800_clk_is_20mhz(rt2x00dev)) + rt2800_rfcsr_write(rt2x00dev, 13, 0x03); + else + rt2800_rfcsr_write(rt2x00dev, 13, 0x00); + rt2800_rfcsr_write(rt2x00dev, 14, 0x7C); + rt2800_rfcsr_write(rt2x00dev, 16, 0x80); + rt2800_rfcsr_write(rt2x00dev, 17, 0x99); + rt2800_rfcsr_write(rt2x00dev, 18, 0x99); + rt2800_rfcsr_write(rt2x00dev, 19, 0x09); + rt2800_rfcsr_write(rt2x00dev, 20, 0x50); + rt2800_rfcsr_write(rt2x00dev, 21, 0xB0); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x06); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + rt2800_rfcsr_write(rt2x00dev, 25, 0x00); + rt2800_rfcsr_write(rt2x00dev, 26, 0x5D); + rt2800_rfcsr_write(rt2x00dev, 27, 0x00); + rt2800_rfcsr_write(rt2x00dev, 28, 0x61); + rt2800_rfcsr_write(rt2x00dev, 29, 0xB5); + rt2800_rfcsr_write(rt2x00dev, 43, 0x02); + + rt2800_rfcsr_write(rt2x00dev, 28, 0x62); + rt2800_rfcsr_write(rt2x00dev, 29, 0xAD); + rt2800_rfcsr_write(rt2x00dev, 39, 0x80); + + /* Initialize RF channel register to default value */ + rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x03); + rt2800_rfcsr_write_chanreg(rt2x00dev, 1, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 2, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 3, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 4, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 5, 0x08); + rt2800_rfcsr_write_chanreg(rt2x00dev, 6, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 7, 0x51); + rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x53); + rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x16); + rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x61); + rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53); + rt2800_rfcsr_write_chanreg(rt2x00dev, 12, 0x22); + rt2800_rfcsr_write_chanreg(rt2x00dev, 13, 0x3D); + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06); + rt2800_rfcsr_write_chanreg(rt2x00dev, 15, 0x13); + rt2800_rfcsr_write_chanreg(rt2x00dev, 16, 0x22); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02); + rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x01); + rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x52); + rt2800_rfcsr_write_chanreg(rt2x00dev, 22, 0x80); + rt2800_rfcsr_write_chanreg(rt2x00dev, 23, 0xB3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 24, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 25, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 27, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x5C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0x6B); + rt2800_rfcsr_write_chanreg(rt2x00dev, 30, 0x6B); + rt2800_rfcsr_write_chanreg(rt2x00dev, 31, 0x31); + rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x5D); + rt2800_rfcsr_write_chanreg(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xE6); + rt2800_rfcsr_write_chanreg(rt2x00dev, 35, 0x55); + rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 37, 0xBB); + rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 39, 0xB3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 40, 0x03); + rt2800_rfcsr_write_chanreg(rt2x00dev, 41, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 42, 0x00); + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xB3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xD3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x07); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x68); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xEF); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x07); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xA8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0x85); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x10); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x07); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6A); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0x85); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x10); + rt2800_rfcsr_write_chanreg(rt2x00dev, 62, 0x1C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 63, 0x00); + + rt2800_rfcsr_write_bank(rt2x00dev, 6, 45, 0xC5); + + rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x47); + rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x71); + rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x33); + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x0E); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23); + rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x02); + rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x12); + rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x1C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0xEB); + rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x7D); + rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xD6); + rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x08); + rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xB3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); + rt2800_rfcsr_write_bank(rt2x00dev, 4, 47, 0x67); + rt2800_rfcsr_write_bank(rt2x00dev, 6, 47, 0x69); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFF); + rt2800_rfcsr_write_bank(rt2x00dev, 4, 54, 0x27); + rt2800_rfcsr_write_bank(rt2x00dev, 6, 54, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xFF); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xF7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09); + + rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x51); + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06); + rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x2C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x64); + rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x51); + rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x36); + rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53); + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16); + + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6C); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFC); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1F); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B); + + /* Initialize RF channel register for DRQFN */ + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xE3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xE5); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x28); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x68); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xF7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x02); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xC7); + + /* Initialize RF DC calibration register to default value */ + rt2800_rfcsr_write_dccal(rt2x00dev, 0, 0x47); + rt2800_rfcsr_write_dccal(rt2x00dev, 1, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 2, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04); + rt2800_rfcsr_write_dccal(rt2x00dev, 9, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 10, 0x07); + rt2800_rfcsr_write_dccal(rt2x00dev, 11, 0x01); + rt2800_rfcsr_write_dccal(rt2x00dev, 12, 0x07); + rt2800_rfcsr_write_dccal(rt2x00dev, 13, 0x07); + rt2800_rfcsr_write_dccal(rt2x00dev, 14, 0x07); + rt2800_rfcsr_write_dccal(rt2x00dev, 15, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 16, 0x22); + rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 21, 0xF1); + rt2800_rfcsr_write_dccal(rt2x00dev, 22, 0x11); + rt2800_rfcsr_write_dccal(rt2x00dev, 23, 0x02); + rt2800_rfcsr_write_dccal(rt2x00dev, 24, 0x41); + rt2800_rfcsr_write_dccal(rt2x00dev, 25, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 27, 0xD7); + rt2800_rfcsr_write_dccal(rt2x00dev, 28, 0xA2); + rt2800_rfcsr_write_dccal(rt2x00dev, 29, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 30, 0x49); + rt2800_rfcsr_write_dccal(rt2x00dev, 31, 0x20); + rt2800_rfcsr_write_dccal(rt2x00dev, 32, 0x04); + rt2800_rfcsr_write_dccal(rt2x00dev, 33, 0xF1); + rt2800_rfcsr_write_dccal(rt2x00dev, 34, 0xA1); + rt2800_rfcsr_write_dccal(rt2x00dev, 35, 0x01); + rt2800_rfcsr_write_dccal(rt2x00dev, 41, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 42, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 43, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 44, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 45, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 46, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 47, 0x3E); + rt2800_rfcsr_write_dccal(rt2x00dev, 48, 0x3D); + rt2800_rfcsr_write_dccal(rt2x00dev, 49, 0x3E); + rt2800_rfcsr_write_dccal(rt2x00dev, 50, 0x3D); + rt2800_rfcsr_write_dccal(rt2x00dev, 51, 0x3E); + rt2800_rfcsr_write_dccal(rt2x00dev, 52, 0x3D); + rt2800_rfcsr_write_dccal(rt2x00dev, 53, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 54, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 55, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 56, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 57, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10); + rt2800_rfcsr_write_dccal(rt2x00dev, 60, 0x0A); + rt2800_rfcsr_write_dccal(rt2x00dev, 61, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 63, 0x00); + + rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x08); + rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x04); + rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x20); + + rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); + rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C); + + rt2800_bw_filter_calibration(rt2x00dev, true); + rt2800_bw_filter_calibration(rt2x00dev, false); +} + static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) { if (rt2800_is_305x_soc(rt2x00dev)) { @@ -6884,6 +8297,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) case RT5592: rt2800_init_rfcsr_5592(rt2x00dev); break; + case RT6352: + rt2800_init_rfcsr_6352(rt2x00dev); + break; } } @@ -7250,7 +8666,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) */ if (rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) + rt2x00_rt(rt2x00dev, RT5392) || + rt2x00_rt(rt2x00dev, RT6352)) rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf); else if (rt2x00_rt(rt2x00dev, RT3352)) rf = RF3322; @@ -7282,6 +8699,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF5390: case RF5392: case RF5592: + case RF7620: break; default: rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n", @@ -7382,13 +8800,13 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); if (rt2x00_rt(rt2x00dev, RT3352)) { - if (!rt2x00_get_field16(eeprom, + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352)) - __set_bit(CAPABILITY_INTERNAL_PA_TX0, + __set_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags); - if (!rt2x00_get_field16(eeprom, + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352)) - __set_bit(CAPABILITY_INTERNAL_PA_TX1, + __set_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags); } @@ -7689,6 +9107,23 @@ static const struct rf_channel rf_vals_5592_xtal40[] = { {196, 83, 0, 12, 1}, }; +static const struct rf_channel rf_vals_7620[] = { + {1, 0x50, 0x99, 0x99, 1}, + {2, 0x50, 0x44, 0x44, 2}, + {3, 0x50, 0xEE, 0xEE, 2}, + {4, 0x50, 0x99, 0x99, 3}, + {5, 0x51, 0x44, 0x44, 0}, + {6, 0x51, 0xEE, 0xEE, 0}, + {7, 0x51, 0x99, 0x99, 1}, + {8, 0x51, 0x44, 0x44, 2}, + {9, 0x51, 0xEE, 0xEE, 2}, + {10, 0x51, 0x99, 0x99, 3}, + {11, 0x52, 0x44, 0x44, 0}, + {12, 0x52, 0xEE, 0xEE, 0}, + {13, 0x52, 0x99, 0x99, 1}, + {14, 0x52, 0x33, 0x33, 3}, +}; + static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; @@ -7792,6 +9227,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels = rf_vals_3x; break; + case RF7620: + spec->num_channels = ARRAY_SIZE(rf_vals_7620); + spec->channels = rf_vals_7620; + break; + case RF3052: case RF3053: spec->num_channels = ARRAY_SIZE(rf_vals_3x); @@ -7923,6 +9363,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF5390: case RF5392: case RF5592: + case RF7620: __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); break; } @@ -7967,6 +9408,9 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev) return -ENODEV; } + if (rt == RT5390 && rt2x00_is_soc(rt2x00dev)) + rt = RT6352; + rt2x00_set_rt(rt2x00dev, rt, rev); return 0; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 0a8b4df665fe36bde6be9fd1e20fd232797c8d19..f357531d9488d99d97033ac049d4ebbe99b70be2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -20,6 +20,34 @@ #ifndef RT2800LIB_H #define RT2800LIB_H +/* + * Hardware has 255 WCID table entries. First 32 entries are reserved for + * shared keys. Since parts of the pairwise key table might be shared with + * the beacon frame buffers 6 & 7 we could only use the first 222 entries. + */ +#define WCID_START 33 +#define WCID_END 222 +#define STA_IDS_SIZE (WCID_END - WCID_START + 2) + +/* RT2800 driver data structure */ +struct rt2800_drv_data { + u8 calibration_bw20; + u8 calibration_bw40; + char rx_calibration_bw20; + char rx_calibration_bw40; + char tx_calibration_bw20; + char tx_calibration_bw40; + u8 bbp25; + u8 bbp26; + u8 txmixer_gain_24g; + u8 txmixer_gain_5g; + u8 max_psdu; + unsigned int tbtt_tick; + unsigned int ampdu_factor_cnt[4]; + DECLARE_BITMAP(sta_ids, STA_IDS_SIZE); + struct ieee80211_sta *wcid_to_sta[STA_IDS_SIZE]; +}; + struct rt2800_ops { void (*register_read)(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 *value); @@ -167,7 +195,8 @@ void rt2800_write_tx_data(struct queue_entry *entry, struct txentry_desc *txdesc); void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc); -void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32* txwi); +void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, + bool match); void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); void rt2800_clear_beacon(struct queue_entry *entry); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index de4790b41be7d64a7c3a9651e27451c3f76183d6..3ab3b53238974a72c5d2170215666e4890110d4e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -239,7 +239,7 @@ static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry, { if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) { rt2800_txdone_entry(entry, entry->status, - rt2800mmio_get_txwi(entry)); + rt2800mmio_get_txwi(entry), true); return false; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 205a7b8ac8a7a252845f8e8c5866f2ed4cd1e14d..f11e3f532a84e48e17e9d530030c3f066099fe7f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -501,8 +501,7 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry) /* * TX control handlers */ -static enum txdone_entry_desc_flags -rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) +static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) { __le32 *txwi; u32 word; @@ -515,7 +514,7 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) * frame. */ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) - return TXDONE_FAILURE; + return false; wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); @@ -537,10 +536,10 @@ rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) rt2x00_dbg(entry->queue->rt2x00dev, "TX status report missed for queue %d entry %d\n", entry->queue->qid, entry->entry_idx); - return TXDONE_UNKNOWN; + return false; } - return TXDONE_SUCCESS; + return true; } static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) @@ -549,7 +548,7 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) struct queue_entry *entry; u32 reg; u8 qid; - enum txdone_entry_desc_flags done_status; + bool match; while (kfifo_get(&rt2x00dev->txstatus_fifo, ®)) { /* @@ -574,11 +573,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) break; } - done_status = rt2800usb_txdone_entry_check(entry, reg); - if (likely(done_status == TXDONE_SUCCESS)) - rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry)); - else - rt2x00lib_txdone_noinfo(entry, done_status); + match = rt2800usb_txdone_entry_check(entry, reg); + rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match); } } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 340787894c694aaa45c94cff33f66bead02af3ce..1bc353eafe37dea8408428ffc0b710ddd7ffadc9 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -174,6 +174,7 @@ struct rt2x00_chip { #define RT5390 0x5390 /* 2.4GHz */ #define RT5392 0x5392 /* 2.4GHz */ #define RT5592 0x5592 +#define RT6352 0x6352 /* WSOC 2.4GHz */ u16 rf; u16 rev; @@ -718,8 +719,8 @@ enum rt2x00_capability_flags { CAPABILITY_DOUBLE_ANTENNA, CAPABILITY_BT_COEXIST, CAPABILITY_VCO_RECALIBRATION, - CAPABILITY_INTERNAL_PA_TX0, - CAPABILITY_INTERNAL_PA_TX1, + CAPABILITY_EXTERNAL_PA_TX0, + CAPABILITY_EXTERNAL_PA_TX1, }; /* @@ -1396,7 +1397,7 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop); * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs. * @rt2x00dev: Pointer to &struct rt2x00_dev. * @type: The type of frame that is being dumped. - * @skb: The skb containing the frame to be dumped. + * @entry: The queue entry containing the frame to be dumped. */ #ifdef CONFIG_RT2X00_LIB_DEBUGFS void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, @@ -1425,6 +1426,8 @@ void rt2x00lib_dmastart(struct queue_entry *entry); void rt2x00lib_dmadone(struct queue_entry *entry); void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc); +void rt2x00lib_txdone_nomatch(struct queue_entry *entry, + struct txdone_entry_desc *txdesc); void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status); void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index dd6678109b7e4609533c296f774dea5864df5370..357c0941aaad4748fd5b69871359f5cb0475216f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -313,73 +313,14 @@ static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) return ret; } -void rt2x00lib_txdone(struct queue_entry *entry, - struct txdone_entry_desc *txdesc) +static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev, + struct ieee80211_tx_info *tx_info, + struct skb_frame_desc *skbdesc, + struct txdone_entry_desc *txdesc, + bool success) { - struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); - struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); - unsigned int header_length, i; u8 rate_idx, rate_flags, retry_rates; - u8 skbdesc_flags = skbdesc->flags; - bool success; - - /* - * Unmap the skb. - */ - rt2x00queue_unmap_skb(entry); - - /* - * Remove the extra tx headroom from the skb. - */ - skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); - - /* - * Signal that the TX descriptor is no longer in the skb. - */ - skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; - - /* - * Determine the length of 802.11 header. - */ - header_length = ieee80211_get_hdrlen_from_skb(entry->skb); - - /* - * Remove L2 padding which was added during - */ - if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) - rt2x00queue_remove_l2pad(entry->skb, header_length); - - /* - * If the IV/EIV data was stripped from the frame before it was - * passed to the hardware, we should now reinsert it again because - * mac80211 will expect the same data to be present it the - * frame as it was passed to us. - */ - if (rt2x00_has_cap_hw_crypto(rt2x00dev)) - rt2x00crypto_tx_insert_iv(entry->skb, header_length); - - /* - * Send frame to debugfs immediately, after this call is completed - * we are going to overwrite the skb->cb array. - */ - rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); - - /* - * Determine if the frame has been successfully transmitted and - * remove BARs from our check list while checking for their - * TX status. - */ - success = - rt2x00lib_txdone_bar_status(entry) || - test_bit(TXDONE_SUCCESS, &txdesc->flags) || - test_bit(TXDONE_UNKNOWN, &txdesc->flags); - - /* - * Update TX statistics. - */ - rt2x00dev->link.qual.tx_success += success; - rt2x00dev->link.qual.tx_failed += !success; + int i; rate_idx = skbdesc->tx_rate_idx; rate_flags = skbdesc->tx_rate_flags; @@ -416,6 +357,9 @@ void rt2x00lib_txdone(struct queue_entry *entry, if (i < (IEEE80211_TX_MAX_RATES - 1)) tx_info->status.rates[i].idx = -1; /* terminate */ + if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags)) + tx_info->flags |= IEEE80211_TX_CTL_NO_ACK; + if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (success) tx_info->flags |= IEEE80211_TX_STAT_ACK; @@ -434,7 +378,8 @@ void rt2x00lib_txdone(struct queue_entry *entry, */ if (test_bit(TXDONE_AMPDU, &txdesc->flags) || tx_info->flags & IEEE80211_TX_CTL_AMPDU) { - tx_info->flags |= IEEE80211_TX_STAT_AMPDU; + tx_info->flags |= IEEE80211_TX_STAT_AMPDU | + IEEE80211_TX_CTL_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; @@ -448,21 +393,11 @@ void rt2x00lib_txdone(struct queue_entry *entry, else rt2x00dev->low_level_stats.dot11RTSFailureCount++; } +} - /* - * Only send the status report to mac80211 when it's a frame - * that originated in mac80211. If this was a extra frame coming - * through a mac80211 library call (RTS/CTS) then we should not - * send the status report back. - */ - if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { - if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) - ieee80211_tx_status(rt2x00dev->hw, entry->skb); - else - ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); - } else - dev_kfree_skb_any(entry->skb); - +static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry) +{ /* * Make this entry available for reuse. */ @@ -485,6 +420,143 @@ void rt2x00lib_txdone(struct queue_entry *entry, rt2x00queue_unpause_queue(entry->queue); spin_unlock_bh(&entry->queue->tx_lock); } + +void rt2x00lib_txdone_nomatch(struct queue_entry *entry, + struct txdone_entry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + struct ieee80211_tx_info txinfo = {}; + bool success; + + /* + * Unmap the skb. + */ + rt2x00queue_unmap_skb(entry); + + /* + * Signal that the TX descriptor is no longer in the skb. + */ + skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; + + /* + * Send frame to debugfs immediately, after this call is completed + * we are going to overwrite the skb->cb array. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); + + /* + * Determine if the frame has been successfully transmitted and + * remove BARs from our check list while checking for their + * TX status. + */ + success = + rt2x00lib_txdone_bar_status(entry) || + test_bit(TXDONE_SUCCESS, &txdesc->flags); + + if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) { + /* + * Update TX statistics. + */ + rt2x00dev->link.qual.tx_success += success; + rt2x00dev->link.qual.tx_failed += !success; + + rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc, + success); + ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo); + } + + dev_kfree_skb_any(entry->skb); + rt2x00lib_clear_entry(rt2x00dev, entry); +} +EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch); + +void rt2x00lib_txdone(struct queue_entry *entry, + struct txdone_entry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u8 skbdesc_flags = skbdesc->flags; + unsigned int header_length; + bool success; + + /* + * Unmap the skb. + */ + rt2x00queue_unmap_skb(entry); + + /* + * Remove the extra tx headroom from the skb. + */ + skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); + + /* + * Signal that the TX descriptor is no longer in the skb. + */ + skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; + + /* + * Determine the length of 802.11 header. + */ + header_length = ieee80211_get_hdrlen_from_skb(entry->skb); + + /* + * Remove L2 padding which was added during + */ + if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) + rt2x00queue_remove_l2pad(entry->skb, header_length); + + /* + * If the IV/EIV data was stripped from the frame before it was + * passed to the hardware, we should now reinsert it again because + * mac80211 will expect the same data to be present it the + * frame as it was passed to us. + */ + if (rt2x00_has_cap_hw_crypto(rt2x00dev)) + rt2x00crypto_tx_insert_iv(entry->skb, header_length); + + /* + * Send frame to debugfs immediately, after this call is completed + * we are going to overwrite the skb->cb array. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); + + /* + * Determine if the frame has been successfully transmitted and + * remove BARs from our check list while checking for their + * TX status. + */ + success = + rt2x00lib_txdone_bar_status(entry) || + test_bit(TXDONE_SUCCESS, &txdesc->flags) || + test_bit(TXDONE_UNKNOWN, &txdesc->flags); + + /* + * Update TX statistics. + */ + rt2x00dev->link.qual.tx_success += success; + rt2x00dev->link.qual.tx_failed += !success; + + rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success); + + /* + * Only send the status report to mac80211 when it's a frame + * that originated in mac80211. If this was a extra frame coming + * through a mac80211 library call (RTS/CTS) then we should not + * send the status report back. + */ + if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { + if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) + ieee80211_tx_status(rt2x00dev->hw, entry->skb); + else + ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); + } else { + dev_kfree_skb_any(entry->skb); + } + + rt2x00lib_clear_entry(rt2x00dev, entry); +} EXPORT_SYMBOL_GPL(rt2x00lib_txdone); void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status) @@ -753,7 +825,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); if (rxdesc.rate_mode == RATE_MODE_HT_MIX || rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD) - rxdesc.flags |= RX_FLAG_HT; + rxdesc.encoding = RX_ENC_HT; /* * Check if this is a beacon, and more frames have been @@ -793,6 +865,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) rx_status->rate_idx = rate_idx; rx_status->signal = rxdesc.rssi; rx_status->flag = rxdesc.flags; + rx_status->enc_flags = rxdesc.enc_flags; + rx_status->encoding = rxdesc.encoding; + rx_status->bw = rxdesc.bw; rx_status->antenna = rt2x00dev->link.ant.active.rx; ieee80211_rx_ni(rt2x00dev->hw, entry->skb); @@ -1384,6 +1459,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy_ext_feature_set(rt2x00dev->hw->wiphy, + NL80211_EXT_FEATURE_CQM_RSSI_LIST); + /* * Initialize ieee80211 structure. */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index e1660b92b20c7793c88ea5d470e1d793f495bbf6..a2c1ca5c76d1c7d5b9191d8ad0f35aba949dfcc7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -372,15 +372,16 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, /* * Determine IFS values - * - Use TXOP_BACKOFF for management frames except beacons + * - Use TXOP_BACKOFF for probe and management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ - if (ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_beacon(hdr->frame_control)) + if ((ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) || + (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h index 22d18818e85004261bbc6c8e6c63d6c8208058ac..6055f36211b95fcd9dde9d5bb3fb8d981f8cd7a8 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h @@ -102,7 +102,7 @@ enum skb_frame_desc_flags { * of the scope of the skb->data pointer. * @iv: IV/EIV data used during encryption/decryption. * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. - * @entry: The entry to which this sk buffer belongs. + * @sta: The station where sk buffer was sent. */ struct skb_frame_desc { u8 flags; @@ -116,6 +116,7 @@ struct skb_frame_desc { __le32 iv[2]; dma_addr_t skb_dma; + struct ieee80211_sta *sta; }; /** @@ -183,6 +184,9 @@ struct rxdone_entry_desc { int flags; int dev_flags; u16 rate_mode; + u16 enc_flags; + enum mac80211_rx_encoding encoding; + enum rate_info_bw bw; u8 cipher; u8 cipher_status; @@ -214,6 +218,7 @@ enum txdone_entry_desc_flags { TXDONE_FAILURE, TXDONE_EXCESSIVE_RETRY, TXDONE_AMPDU, + TXDONE_NO_ACK_REQ, }; /** diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index e895a84481da0c68adf6642fc9614e3469e5ba90..225c1c8851cc4630eee8b857aba2bd9bd056c3ff 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -315,7 +315,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) rx_status.mactime = tsft; rx_status.flag |= RX_FLAG_MACTIME_START; if (flags & RTL818X_RX_DESC_FLAG_SPLCP) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; @@ -1877,6 +1877,8 @@ static int rtl8180_probe(struct pci_dev *pdev, else ieee80211_hw_set(dev, SIGNAL_UNSPEC); + wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + rtl8180_eeprom_read(priv); switch (priv->rf_type) { diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 231f84db9ab0691e46eee60c41f2b1ce3ccf9312..35fe991dcc569f21ac52e7afd03eee3798cd477e 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -389,7 +389,7 @@ static void rtl8187_rx_cb(struct urb *urb) rx_status.band = dev->conf.chandef.chan->band; rx_status.flag |= RX_FLAG_MACTIME_START; if (flags & RTL818X_RX_DESC_FLAG_SPLCP) - rx_status.flag |= RX_FLAG_SHORTPRE; + rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); @@ -946,8 +946,7 @@ static int rtl8187_start(struct ieee80211_hw *dev) (7 << 13 /* RX FIFO threshold NONE */) | (7 << 10 /* MAX RX DMA */) | RTL818X_RX_CONF_RX_AUTORESETPHY | - RTL818X_RX_CONF_ONLYERLPKT | - RTL818X_RX_CONF_MULTICAST; + RTL818X_RX_CONF_ONLYERLPKT; priv->rx_conf = reg; rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); @@ -1319,12 +1318,11 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev, priv->rx_conf ^= RTL818X_RX_CONF_FCS; if (changed_flags & FIF_CONTROL) priv->rx_conf ^= RTL818X_RX_CONF_CTRL; - if (changed_flags & FIF_OTHER_BSS) - priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; - if (*total_flags & FIF_ALLMULTI || multicast > 0) - priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_ALLMULTI || multicast > 0) + priv->rx_conf |= RTL818X_RX_CONF_MONITOR; else - priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST; + priv->rx_conf &= ~RTL818X_RX_CONF_MONITOR; *total_flags = 0; @@ -1332,10 +1330,10 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev, *total_flags |= FIF_FCSFAIL; if (priv->rx_conf & RTL818X_RX_CONF_CTRL) *total_flags |= FIF_CONTROL; - if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) + if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) { *total_flags |= FIF_OTHER_BSS; - if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) *total_flags |= FIF_ALLMULTI; + } rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); } @@ -1609,6 +1607,8 @@ static int rtl8187_probe(struct usb_interface *intf, dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) ; + wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) printk(KERN_INFO "rtl8187: inconsistency between id with OEM" " info!\n"); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index e544dd1d618c3cea36d1a5d11068295e008c13c5..39d56313bc94509c4ec60a23f14829bf53c0a009 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5041,7 +5041,7 @@ static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv, u32 rxmcs) { if (phy_stats->sgi_en) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rxmcs < DESC_RATE_6M) { /* @@ -5267,10 +5267,10 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (rx_desc->rxht) { - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; } else { rx_status->rate_idx = rx_desc->rxmcs; @@ -5337,10 +5337,10 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (rx_desc->rxmcs >= DESC_RATE_MCS0) { - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; } else { rx_status->rate_idx = rx_desc->rxmcs; @@ -6135,6 +6135,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface, ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ret = ieee80211_register_hw(priv->hw); if (ret) { dev_err(&udev->dev, "%s: Failed to register: %i\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index ffa1f438424d44c67a9f848b1e43b2969be71737..57e633dbf9a9ab76bdf31e4ec9c3c5094eab180e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -44,7 +44,7 @@ static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant; static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant; static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant; -static const char *const GLBtInfoSrc8192e2Ant[] = { +static const char *const glbt_info_src_8192e_2ant[] = { "BT Info[wifi fw]", "BT Info[bt rsp]", "BT Info[bt auto report]", @@ -57,31 +57,31 @@ static u32 glcoex_ver_8192e_2ant = 0x34; * local function proto type if needed **************************************************************/ /************************************************************** - * local function start with halbtc8192e2ant_ + * local function start with btc8192e2ant_ **************************************************************/ -static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) +static u8 btc8192e2ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; - int btrssi = 0; - u8 btrssi_state = coex_sta->pre_bt_rssi_state; + int bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; - btrssi = coex_sta->bt_rssi; + bt_rssi = coex_sta->bt_rssi; if (level_num == 2) { if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - if (btrssi >= + if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - btrssi_state = BTC_RSSI_STATE_HIGH; + bt_rssi_state = BTC_RSSI_STATE_HIGH; else - btrssi_state = BTC_RSSI_STATE_STAY_LOW; + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; } else { - if (btrssi < rssi_thresh) - btrssi_state = BTC_RSSI_STATE_LOW; + if (bt_rssi < rssi_thresh) + bt_rssi_state = BTC_RSSI_STATE_LOW; else - btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { @@ -89,62 +89,63 @@ static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist, "[BTCoex], BT Rssi thresh error!!\n"); return coex_sta->pre_bt_rssi_state; } + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - if (btrssi >= + if (bt_rssi >= (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - btrssi_state = BTC_RSSI_STATE_MEDIUM; + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; else - btrssi_state = BTC_RSSI_STATE_STAY_LOW; + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; } else if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - if (btrssi >= (rssi_thresh1 + + if (bt_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - btrssi_state = BTC_RSSI_STATE_HIGH; - else if (btrssi < rssi_thresh) - btrssi_state = BTC_RSSI_STATE_LOW; + bt_rssi_state = BTC_RSSI_STATE_HIGH; + else if (bt_rssi < rssi_thresh) + bt_rssi_state = BTC_RSSI_STATE_LOW; else - btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; } else { - if (btrssi < rssi_thresh1) - btrssi_state = BTC_RSSI_STATE_MEDIUM; + if (bt_rssi < rssi_thresh1) + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; else - btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; } } - coex_sta->pre_bt_rssi_state = btrssi_state; + coex_sta->pre_bt_rssi_state = bt_rssi_state; - return btrssi_state; + return bt_rssi_state; } -static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, - u8 index, u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) +static u8 btc8192e2ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; - int wifirssi = 0; - u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index]; + int wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; - btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); if (level_num == 2) { if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { - if (wifirssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - wifirssi_state = BTC_RSSI_STATE_HIGH; + if (wifi_rssi >= + (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) + wifi_rssi_state = BTC_RSSI_STATE_HIGH; else - wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; } else { - if (wifirssi < rssi_thresh) - wifirssi_state = BTC_RSSI_STATE_LOW; + if (wifi_rssi < rssi_thresh) + wifi_rssi_state = BTC_RSSI_STATE_LOW; else - wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; } } else if (level_num == 3) { if (rssi_thresh > rssi_thresh1) { @@ -157,36 +158,37 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, BTC_RSSI_STATE_LOW) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { - if (wifirssi >= (rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - wifirssi_state = BTC_RSSI_STATE_MEDIUM; + if (wifi_rssi >= + (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; else - wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; } else if ((coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM)) { - if (wifirssi >= (rssi_thresh1 + + if (wifi_rssi >= (rssi_thresh1 + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) - wifirssi_state = BTC_RSSI_STATE_HIGH; - else if (wifirssi < rssi_thresh) - wifirssi_state = BTC_RSSI_STATE_LOW; + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + else if (wifi_rssi < rssi_thresh) + wifi_rssi_state = BTC_RSSI_STATE_LOW; else - wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; } else { - if (wifirssi < rssi_thresh1) - wifirssi_state = BTC_RSSI_STATE_MEDIUM; + if (wifi_rssi < rssi_thresh1) + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; else - wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; } } - coex_sta->pre_wifi_rssi_state[index] = wifirssi_state; + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; - return wifirssi_state; + return wifi_rssi_state; } -static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) +static void btc8192e2ant_monitor_bt_enable_disable(struct btc_coexist + *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; static bool pre_bt_disabled; @@ -236,57 +238,57 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) } } -static u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist, - u8 sstype, u32 ra_masktype) +static u32 btc8192e2ant_decide_ra_mask(struct btc_coexist *btcoexist, + u8 ss_type, u32 ra_mask_type) { - u32 disra_mask = 0x0; + u32 dis_ra_mask = 0x0; - switch (ra_masktype) { + switch (ra_mask_type) { case 0: /* normal mode */ - if (sstype == 2) - disra_mask = 0x0; /* enable 2ss */ + if (ss_type == 2) + dis_ra_mask = 0x0; /* enable 2ss */ else - disra_mask = 0xfff00000;/* disable 2ss */ + dis_ra_mask = 0xfff00000; /* disable 2ss */ break; case 1: /* disable cck 1/2 */ - if (sstype == 2) - disra_mask = 0x00000003;/* enable 2ss */ + if (ss_type == 2) + dis_ra_mask = 0x00000003; /* enable 2ss */ else - disra_mask = 0xfff00003;/* disable 2ss */ + dis_ra_mask = 0xfff00003; /* disable 2ss */ break; case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */ - if (sstype == 2) - disra_mask = 0x0001f1f7;/* enable 2ss */ + if (ss_type == 2) + dis_ra_mask = 0x0001f1f7; /* enable 2ss */ else - disra_mask = 0xfff1f1f7;/* disable 2ss */ + dis_ra_mask = 0xfff1f1f7; /* disable 2ss */ break; default: break; } - return disra_mask; + return dis_ra_mask; } -static void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist, - bool force_exec, u32 dis_ratemask) +static void btc8192e2ant_update_ra_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_rate_mask) { - coex_dm->curra_mask = dis_ratemask; + coex_dm->cur_ra_mask = dis_rate_mask; - if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) - btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, - &coex_dm->curra_mask); - coex_dm->prera_mask = coex_dm->curra_mask; + if (force_exec || (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask)) + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK, + &coex_dm->cur_ra_mask); + coex_dm->pre_ra_mask = coex_dm->cur_ra_mask; } -static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8192e2ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { - bool wifi_under_bmode = false; + bool wifi_under_b_mode = false; - coex_dm->cur_arfrtype = type; + coex_dm->cur_arfr_type = type; - if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) { - switch (coex_dm->cur_arfrtype) { + if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) { + switch (coex_dm->cur_arfr_type) { case 0: /* normal mode */ btcoexist->btc_write_4byte(btcoexist, 0x430, coex_dm->backup_arfr_cnt1); @@ -296,8 +298,8 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist, case 1: btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, - &wifi_under_bmode); - if (wifi_under_bmode) { + &wifi_under_b_mode); + if (wifi_under_b_mode) { btcoexist->btc_write_4byte(btcoexist, 0x430, 0x0); btcoexist->btc_write_4byte(btcoexist, 0x434, @@ -314,46 +316,45 @@ static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist, } } - coex_dm->pre_arfrtype = coex_dm->cur_arfrtype; + coex_dm->pre_arfr_type = coex_dm->cur_arfr_type; } -static void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8192e2ant_retry_limit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { - coex_dm->cur_retrylimit_type = type; + coex_dm->cur_retry_limit_type = type; - if (force_exec || (coex_dm->pre_retrylimit_type != - coex_dm->cur_retrylimit_type)) { - switch (coex_dm->cur_retrylimit_type) { + if (force_exec || (coex_dm->pre_retry_limit_type != + coex_dm->cur_retry_limit_type)) { + switch (coex_dm->cur_retry_limit_type) { case 0: /* normal mode */ - btcoexist->btc_write_2byte(btcoexist, 0x42a, - coex_dm->backup_retrylimit); - break; + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retry_limit); + break; case 1: /* retry limit = 8 */ - btcoexist->btc_write_2byte(btcoexist, 0x42a, - 0x0808); - break; + btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); + break; default: - break; + break; } } - coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type; + coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type; } -static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { - coex_dm->cur_ampdutime_type = type; + coex_dm->cur_ampdu_time_type = type; - if (force_exec || (coex_dm->pre_ampdutime_type != - coex_dm->cur_ampdutime_type)) { - switch (coex_dm->cur_ampdutime_type) { + if (force_exec || (coex_dm->pre_ampdu_time_type != + coex_dm->cur_ampdu_time_type)) { + switch (coex_dm->cur_ampdu_time_type) { case 0: /* normal mode */ btcoexist->btc_write_1byte(btcoexist, 0x456, coex_dm->backup_ampdu_maxtime); break; - case 1: /* AMPDU timw = 0x38 * 32us */ + case 1: /* AMPDU time = 0x38 * 32us */ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); break; default: @@ -361,30 +362,30 @@ static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist, } } - coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type; + coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type; } -static void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist, - bool force_exec, u8 ra_masktype, - u8 arfr_type, u8 retrylimit_type, - u8 ampdutime_type) +static void btc8192e2ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_mask_type, + u8 arfr_type, u8 retry_limit_type, + u8 ampdu_time_type) { - u32 disra_mask = 0x0; + u32 dis_ra_mask = 0x0; - coex_dm->curra_masktype = ra_masktype; - disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, - coex_dm->cur_sstype, - ra_masktype); - halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask); -btc8192e2ant_autorate_fallback_retry(btcoexist, force_exec, arfr_type); - halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type); - halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type); + coex_dm->cur_ra_mask_type = ra_mask_type; + dis_ra_mask = + btc8192e2ant_decide_ra_mask(btcoexist, coex_dm->cur_ss_type, + ra_mask_type); + btc8192e2ant_update_ra_mask(btcoexist, force_exec, dis_ra_mask); + btc8192e2ant_auto_rate_fallback_retry(btcoexist, force_exec, arfr_type); + btc8192e2ant_retry_limit(btcoexist, force_exec, retry_limit_type); + btc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type); } -static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist, - bool force_exec, bool rej_ap_agg_pkt, - bool bt_ctrl_agg_buf_size, - u8 agg_buf_size) +static void btc8192e2ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) { bool reject_rx_agg = rej_ap_agg_pkt; bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; @@ -406,7 +407,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); } -static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +static void btc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u32 reg_hp_txrx, reg_lp_txrx, u32tmp; @@ -417,11 +418,11 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); reg_hp_tx = u32tmp & MASKLWORD; - reg_hp_rx = (u32tmp & MASKHWORD)>>16; + reg_hp_rx = (u32tmp & MASKHWORD) >> 16; u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); reg_lp_tx = u32tmp & MASKLWORD; - reg_lp_rx = (u32tmp & MASKHWORD)>>16; + reg_lp_rx = (u32tmp & MASKHWORD) >> 16; coex_sta->high_priority_tx = reg_hp_tx; coex_sta->high_priority_rx = reg_hp_rx; @@ -439,14 +440,14 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); } -static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) +static void btc8192e2ant_query_bt_info(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; - h2c_parameter[0] |= BIT0; /* trigger */ + h2c_parameter[0] |= BIT0; /* trigger */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", @@ -455,12 +456,12 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } -static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) +static void btc8192e2ant_update_bt_link_info(struct btc_coexist *btcoexist) { struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool bt_hson = false; + bool bt_hs_on = false; - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); bt_link_info->bt_link_exist = coex_sta->bt_link_exist; bt_link_info->sco_exist = coex_sta->sco_exist; @@ -469,7 +470,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) bt_link_info->hid_exist = coex_sta->hid_exist; /* work around for HS mode. */ - if (bt_hson) { + if (bt_hs_on) { bt_link_info->pan_exist = true; bt_link_info->bt_link_exist = true; } @@ -511,16 +512,16 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) bt_link_info->hid_only = false; } -static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) +static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; - bool bt_hson = false; + bool bt_hs_on = false; u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED; - u8 numdiffprofile = 0; + u8 num_of_diff_profile = 0; - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (!bt_link_info->bt_link_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -529,15 +530,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } if (bt_link_info->sco_exist) - numdiffprofile++; + num_of_diff_profile++; if (bt_link_info->hid_exist) - numdiffprofile++; + num_of_diff_profile++; if (bt_link_info->pan_exist) - numdiffprofile++; + num_of_diff_profile++; if (bt_link_info->a2dp_exist) - numdiffprofile++; + num_of_diff_profile++; - if (numdiffprofile == 1) { + if (num_of_diff_profile == 1) { if (bt_link_info->sco_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "SCO only\n"); @@ -552,7 +553,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) "A2DP only\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; } else if (bt_link_info->pan_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "PAN(HS) only\n"); @@ -567,7 +568,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } } - } else if (numdiffprofile == 2) { + } else if (num_of_diff_profile == 2) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -578,7 +579,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) "SCO + A2DP ==> SCO\n"); algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->pan_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "SCO + PAN(HS)\n"); @@ -609,7 +610,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "HID + PAN(HS)\n"); @@ -623,7 +624,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "A2DP + PAN(HS)\n"); @@ -638,7 +639,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } } - } else if (numdiffprofile == 3) { + } else if (num_of_diff_profile == 3) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { @@ -647,7 +648,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; } else if (bt_link_info->hid_exist && bt_link_info->pan_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "SCO + HID + PAN(HS)\n"); @@ -661,7 +662,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } else if (bt_link_info->pan_exist && bt_link_info->a2dp_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "SCO + A2DP + PAN(HS)\n"); @@ -678,7 +679,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) if (bt_link_info->hid_exist && bt_link_info->pan_exist && bt_link_info->a2dp_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "HID + A2DP + PAN(HS)\n"); @@ -693,12 +694,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) } } } - } else if (numdiffprofile >= 3) { + } else if (num_of_diff_profile >= 3) { if (bt_link_info->sco_exist) { if (bt_link_info->hid_exist && bt_link_info->pan_exist && bt_link_info->a2dp_exist) { - if (bt_hson) { + if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "ErrorSCO+HID+A2DP+PAN(HS)\n"); @@ -717,8 +718,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) return algorithm; } -static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, - u8 dac_swinglvl) +static void btc8192e2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, + u8 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; @@ -726,81 +727,81 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, /* There are several type of dacswing * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */ - h2c_parameter[0] = dac_swinglvl; + h2c_parameter[0] = dac_swing_lvl; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } -static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, - u8 dec_btpwr_lvl) +static void btc8192e2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = dec_btpwr_lvl; + h2c_parameter[0] = dec_bt_pwr_lvl; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", - dec_btpwr_lvl, h2c_parameter[0]); + dec_bt_pwr_lvl, h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } -static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, - bool force_exec, u8 dec_btpwr_lvl) +static void btc8192e2ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s Dec BT power level = %d\n", - force_exec ? "force to" : "", dec_btpwr_lvl); - coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; + force_exec ? "force to" : "", dec_bt_pwr_lvl); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); } - halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); + btc8192e2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; } -static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, - bool enable_autoreport) +static void btc8192e2ant_set_bt_auto_report(struct btc_coexist *btcoexist, + bool enable_auto_report) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; h2c_parameter[0] = 0; - if (enable_autoreport) + if (enable_auto_report) h2c_parameter[0] |= BIT0; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_autoreport ? "Enabled!!" : "Disabled!!"), + (enable_auto_report ? "Enabled!!" : "Disabled!!"), h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); } -static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, - bool force_exec, - bool enable_autoreport) +static void btc8192e2ant_bt_auto_report(struct btc_coexist *btcoexist, + bool force_exec, + bool enable_auto_report) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s BT Auto report = %s\n", (force_exec ? "force to" : ""), - ((enable_autoreport) ? "Enabled" : "Disabled")); - coex_dm->cur_bt_auto_report = enable_autoreport; + ((enable_auto_report) ? "Enabled" : "Disabled")); + coex_dm->cur_bt_auto_report = enable_auto_report; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -811,21 +812,21 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) return; } - halbtc8192e2ant_set_bt_autoreport(btcoexist, - coex_dm->cur_bt_auto_report); + btc8192e2ant_set_bt_auto_report(btcoexist, + coex_dm->cur_bt_auto_report); coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; } -static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, - bool force_exec, u8 fw_dac_swinglvl) +static void btc8192e2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s set FW Dac Swing level = %d\n", - (force_exec ? "force to" : ""), fw_dac_swinglvl); - coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; + (force_exec ? "force to" : ""), fw_dac_swing_lvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -838,8 +839,8 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, return; } - halbtc8192e2ant_setfw_dac_swinglevel(btcoexist, - coex_dm->cur_fw_dac_swing_lvl); + btc8192e2ant_set_fw_dac_swing_level(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; } @@ -869,8 +870,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, } } -static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, - bool force_exec, bool rx_rf_shrink_on) +static void btc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -896,8 +897,8 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; } -static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, - u32 level) +static void btc8192e2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, + u32 level) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 val = (u8)level; @@ -907,28 +908,28 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); } -static void btc8192e2ant_setsw_full_swing(struct btc_coexist *btcoexist, - bool sw_dac_swingon, - u32 sw_dac_swinglvl) +static void btc8192e2ant_set_sw_full_swing(struct btc_coexist *btcoexist, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) { - if (sw_dac_swingon) - halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl); + if (sw_dac_swing_on) + btc8192e2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); else - halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18); + btc8192e2ant_set_dac_swing_reg(btcoexist, 0x18); } -static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, - bool force_exec, bool dac_swingon, - u32 dac_swinglvl) +static void btc8192e2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n", (force_exec ? "force to" : ""), - ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); - coex_dm->cur_dac_swing_on = dac_swingon; - coex_dm->cur_dac_swing_lvl = dac_swinglvl; + ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl); + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -945,14 +946,14 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, return; } mdelay(30); - btc8192e2ant_setsw_full_swing(btcoexist, dac_swingon, dac_swinglvl); + btc8192e2ant_set_sw_full_swing(btcoexist, dac_swing_on, dac_swing_lvl); coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; } -static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, - bool agc_table_en) +static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, + bool agc_table_en) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -978,8 +979,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, } } -static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, - bool force_exec, bool agc_table_en) +static void btc8192e2ant_agc_table(struct btc_coexist *btcoexist, + bool force_exec, bool agc_table_en) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -998,14 +999,14 @@ static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) return; } - halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en); + btc8192e2ant_set_agc_table(btcoexist, agc_table_en); coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; } -static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, - u32 val0x6c0, u32 val0x6c4, - u32 val0x6c8, u8 val0x6cc) +static void btc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1026,10 +1027,9 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } -static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, - bool force_exec, - u32 val0x6c0, u32 val0x6c4, - u32 val0x6c8, u8 val0x6cc) +static void btc8192e2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1064,8 +1064,8 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) return; } - halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, - val0x6c8, val0x6cc); + btc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8, + val0x6cc); coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; @@ -1073,37 +1073,37 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; } -static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { switch (type) { case 0: - halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, - 0x5a5a5a5a, 0xffffff, 0x3); + btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); break; case 1: - halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, - 0x5a5a5a5a, 0xffffff, 0x3); + btc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); break; case 2: - halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, - 0x5ffb5ffb, 0xffffff, 0x3); + btc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5ffb5ffb, 0xffffff, 0x3); break; case 3: - halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, - 0x5fdb5fdb, 0xffffff, 0x3); + btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5fdb5fdb, 0xffffff, 0x3); break; case 4: - halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, - 0x5ffb5ffb, 0xffffff, 0x3); + btc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5ffb5ffb, 0xffffff, 0x3); break; default: break; } } -static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, - bool enable) +static void btc8192e2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, + bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; @@ -1118,8 +1118,8 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } -static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, - bool force_exec, bool enable) +static void btc8192e2ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1140,12 +1140,12 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, coex_dm->cur_ignore_wlan_act) return; } - halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable); + btc8192e2ant_set_fw_ignore_wlan_act(btcoexist, enable); coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; } -static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, +static void btc8192e2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1173,24 +1173,24 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } -static void btc8192e2ant_sw_mec1(struct btc_coexist *btcoexist, - bool shrink_rx_lpf, bool low_penalty_ra, - bool limited_dig, bool btlan_constrain) +static void btc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool btlan_constrain) { - halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); + btc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); } -static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist, - bool agc_table_shift, bool adc_backoff, - bool sw_dac_swing, u32 dac_swinglvl) +static void btc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swing_lvl) { - halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift); - halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, - dac_swinglvl); + btc8192e2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift); + btc8192e2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swing_lvl); } -static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, - bool force_exec, bool turn_on, u8 type) +static void btc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1217,91 +1217,91 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, switch (type) { case 1: default: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x90); break; case 2: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90); break; case 3: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, 0x3, 0xf1, 0x90); break; case 4: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, 0x3, 0xf1, 0x90); break; case 5: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x90); break; case 6: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, 0x12, 0x60, 0x90); break; case 7: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, 0x3, 0x70, 0x90); break; case 8: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10, 0x3, 0x70, 0x90); break; case 9: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x10); break; case 10: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, 0x12, 0xe1, 0x10); break; case 11: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, 0x3, 0xf1, 0x10); break; case 12: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, 0x3, 0xf1, 0x10); break; case 13: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, 0x1a, 0xe0, 0x10); break; case 14: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, 0x12, 0xe0, 0x10); break; case 15: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, 0x3, 0xf0, 0x10); break; case 16: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, 0x3, 0xf0, 0x10); break; case 17: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20, 0x03, 0x10, 0x10); break; case 18: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5, 0xe1, 0x90); break; case 19: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, 0x25, 0xe1, 0x90); break; case 20: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, 0x25, 0x60, 0x90); break; case 21: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, 0x03, 0x70, 0x90); break; case 71: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x90); break; } @@ -1310,12 +1310,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, switch (type) { default: case 0: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0); btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4); break; case 1: - halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, + btc8192e2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, 0x8, 0x0); mdelay(5); btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); @@ -1328,22 +1328,22 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } -static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, - u8 sstype) +static void btc8192e2ant_set_switch_ss_type(struct btc_coexist *btcoexist, + u8 ss_type) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 mimops = BTC_MIMO_PS_DYNAMIC; - u32 disra_mask = 0x0; + u32 dis_ra_mask = 0x0; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], REAL set SS Type = %d\n", sstype); + "[BTCoex], REAL set SS Type = %d\n", ss_type); - disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, - coex_dm->curra_masktype); - halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask); + dis_ra_mask = btc8192e2ant_decide_ra_mask(btcoexist, ss_type, + coex_dm->cur_ra_mask_type); + btc8192e2ant_update_ra_mask(btcoexist, FORCE_EXEC, dis_ra_mask); - if (sstype == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + if (ss_type == 1) { + btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); /* switch ofdm path */ btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11); btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1); @@ -1352,8 +1352,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1); btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81); mimops = BTC_MIMO_PS_STATIC; - } else if (sstype == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + } else if (ss_type == 2) { + btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33); btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3); btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313); @@ -1365,89 +1365,89 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops); } -static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, - bool force_exec, u8 new_sstype) +static void btc8192e2ant_switch_ss_type(struct btc_coexist *btcoexist, + bool force_exec, u8 new_ss_type) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s Switch SS Type = %d\n", - (force_exec ? "force to" : ""), new_sstype); - coex_dm->cur_sstype = new_sstype; + (force_exec ? "force to" : ""), new_ss_type); + coex_dm->cur_ss_type = new_ss_type; if (!force_exec) { - if (coex_dm->pre_sstype == coex_dm->cur_sstype) + if (coex_dm->pre_ss_type == coex_dm->cur_ss_type) return; } - halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype); + btc8192e2ant_set_switch_ss_type(btcoexist, coex_dm->cur_ss_type); - coex_dm->pre_sstype = coex_dm->cur_sstype; + coex_dm->pre_ss_type = coex_dm->cur_ss_type; } -static void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist) +static void btc8192e2ant_coex_all_off(struct btc_coexist *btcoexist) { /* fw all off */ - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); /* sw all off */ - btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); /* hw all off */ - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } -static void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +static void btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) { /* force to reset coex mechanism */ - halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0); - btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0); - halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + btc8192e2ant_switch_ss_type(btcoexist, FORCE_EXEC, 2); - btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); } -static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist) { bool low_pwr_disable = true; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); } -static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) +static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool common = false, wifi_connected = false, wifi_busy = false; - bool bt_hson = false, low_pwr_disable = false; + bool bt_hs_on = false, low_pwr_disable = false; - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); if (bt_link_info->sco_exist || bt_link_info->hid_exist) - halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0); + btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0); else - halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); if (!wifi_connected) { low_pwr_disable = false; @@ -1461,26 +1461,24 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) coex_dm->bt_status) || (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)) { - halbtc8192e2ant_switch_sstype(btcoexist, - NORMAL_EXEC, 2); - btc8192e2ant_coex_tbl_w_type(btcoexist, - NORMAL_EXEC, 1); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); } else { - halbtc8192e2ant_switch_sstype(btcoexist, - NORMAL_EXEC, 1); - btc8192e2ant_coex_tbl_w_type(btcoexist, - NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, false, + false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); common = true; } else { @@ -1494,20 +1492,18 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Wifi connected + BT non connected-idle!!\n"); - halbtc8192e2ant_switch_sstype(btcoexist, - NORMAL_EXEC, 2); - btc8192e2ant_coex_tbl_w_type(btcoexist, - NORMAL_EXEC, 1); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, - NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, + NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); common = true; } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == @@ -1517,25 +1513,25 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - if (bt_hson) + if (bt_hs_on) return false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Wifi connected + BT connected-idle!!\n"); - halbtc8192e2ant_switch_sstype(btcoexist, - NORMAL_EXEC, 2); - btc8192e2ant_coex_tbl_w_type(btcoexist, - NORMAL_EXEC, 1); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, - NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_switch_ss_type(btcoexist, + NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, + NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); common = true; } else { @@ -1552,20 +1548,21 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Wifi Connected-Idle + BT Busy!!\n"); - halbtc8192e2ant_switch_sstype(btcoexist, - NORMAL_EXEC, 1); - btc8192e2ant_coex_tbl_w_type(btcoexist, - NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 21); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, - NORMAL_EXEC, 6); - halbtc8192e2ant_dec_btpwr(btcoexist, - NORMAL_EXEC, 0); - btc8192e2ant_sw_mec1(btcoexist, false, - false, false, false); - btc8192e2ant_sw_mec2(btcoexist, false, - false, false, 0x18); + btc8192e2ant_switch_ss_type(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, + 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 21); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, + NORMAL_EXEC, 6); + btc8192e2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, 0); + btc8192e2ant_sw_mechanism1(btcoexist, false, + false, false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, + false, false, 0x18); common = true; } } @@ -1573,588 +1570,9 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) return common; } -static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, - int result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - - if (coex_dm->cur_ps_tdma == 71) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - - if (result == -1) { - if (coex_dm->cur_ps_tdma == 71) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } - } - } -} - -static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, - int result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } - } - } -} - -static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, - int result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } - } - } -} - -static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, - bool sco_hid, bool tx_pause, - u8 max_interval) +static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) { struct rtl_priv *rtlpriv = btcoexist->adapter; static int up, dn, m, n, wait_cnt; @@ -2174,72 +1592,72 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (sco_hid) { if (tx_pause) { if (max_interval == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 13); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); coex_dm->tdma_adj_type = 13; } else if (max_interval == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 14); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); coex_dm->tdma_adj_type = 14; } else { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 15); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); coex_dm->tdma_adj_type = 15; } } else { if (max_interval == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 9); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); coex_dm->tdma_adj_type = 9; } else if (max_interval == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 10); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); coex_dm->tdma_adj_type = 10; } else { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 11); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); coex_dm->tdma_adj_type = 11; } } } else { if (tx_pause) { if (max_interval == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 5); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); coex_dm->tdma_adj_type = 5; } else if (max_interval == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 6); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); coex_dm->tdma_adj_type = 6; } else { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 7); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); coex_dm->tdma_adj_type = 7; } } else { if (max_interval == 1) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 1); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); coex_dm->tdma_adj_type = 1; } else if (max_interval == 2) { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 2); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); coex_dm->tdma_adj_type = 2; } else { - halbtc8192e2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 3); + btc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); coex_dm->tdma_adj_type = 3; } } @@ -2322,12 +1740,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], max Interval = %d\n", max_interval); - if (max_interval == 1) - btc8192e_int1(btcoexist, tx_pause, result); - else if (max_interval == 2) - btc8192e_int2(btcoexist, tx_pause, result); - else if (max_interval == 3) - btc8192e_int3(btcoexist, tx_pause, result); } /* if current PsTdma not match with @@ -2348,9 +1760,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); if (!scan && !link && !roam) - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, - coex_dm->tdma_adj_type); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, coex_dm->tdma_adj_type); else RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); @@ -2358,583 +1769,578 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } /* SCO only or SCO+PAN(HS) */ -static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_sco(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); } btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); } } } -static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); } btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); } } } -static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_hid(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ -static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; bool long_dist = false; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - if ((btrssi_state == BTC_RSSI_STATE_LOW || - btrssi_state == BTC_RSSI_STATE_STAY_LOW) && - (wifirssi_state == BTC_RSSI_STATE_LOW || - wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if ((bt_rssi_state == BTC_RSSI_STATE_LOW || + bt_rssi_state == BTC_RSSI_STATE_STAY_LOW) && + (wifi_rssi_state == BTC_RSSI_STATE_LOW || + wifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); long_dist = true; } if (long_dist) { - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, - 0x4); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, + 0x4); } else { - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, - 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, + 0x8); } - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); if (long_dist) - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); else - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); if (long_dist) { - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17); coex_dm->auto_tdma_adjust = false; - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); } else { - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - true, 1); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 1); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 1); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, + true, 1); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); } } /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 2); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 2); - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 2); + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); } /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - true, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - true, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + true, 0x6); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - true, 0x6); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); } else { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - true, 0x6); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + true, 0x6); } } } -static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); } /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* PAN(HS) only */ -static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); } - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* PAN(EDR)+A2DP */ -static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 3); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, - false, 3); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_tdma_duration_adjust(btcoexist, false, false, 3); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, false, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } @@ -2942,125 +2348,125 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) /* HID+A2DP+PAN(EDR) */ static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +static void btc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) { - u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u8 wifi_rssi_state, bt_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_bw; - wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); - btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42); + wifi_rssi_state = btc8192e2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8192e2ant_bt_rssi_state(btcoexist, 3, 34, 42); - halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); - halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 1); + btc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + btc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); - if ((btrssi_state == BTC_RSSI_STATE_LOW) || - (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2); - } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || - (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); - } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || - (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); - halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + if ((bt_rssi_state == BTC_RSSI_STATE_LOW) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2); + } else if ((bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 4); + btc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, true, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || - (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, true, false, - false, 0x18); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8192e2ant_sw_mec1(btcoexist, false, true, - false, false); - btc8192e2ant_sw_mec2(btcoexist, false, false, - false, 0x18); + btc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +static void btc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; @@ -3080,12 +2486,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) return; } - algorithm = halbtc8192e2ant_action_algorithm(btcoexist); + algorithm = btc8192e2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT is under inquiry/page scan !!\n"); - halbtc8192e2ant_action_bt_inquiry(btcoexist); + btc8192e2ant_action_bt_inquiry(btcoexist); return; } @@ -3093,7 +2499,7 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); - if (halbtc8192e2ant_is_common_action(btcoexist)) { + if (btc8192e2ant_is_common_action(btcoexist)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant common\n"); coex_dm->auto_tdma_adjust = false; @@ -3109,47 +2515,47 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) case BT_8192E_2ANT_COEX_ALGO_SCO: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = SCO\n"); - halbtc8192e2ant_action_sco(btcoexist); + btc8192e2ant_action_sco(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = SCO+PAN(EDR)\n"); - halbtc8192e2ant_action_sco_pan(btcoexist); + btc8192e2ant_action_sco_pan(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = HID\n"); - halbtc8192e2ant_action_hid(btcoexist); + btc8192e2ant_action_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = A2DP\n"); - halbtc8192e2ant_action_a2dp(btcoexist); + btc8192e2ant_action_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); - halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); + btc8192e2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = PAN(EDR)\n"); - halbtc8192e2ant_action_pan_edr(btcoexist); + btc8192e2ant_action_pan_edr(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = HS mode\n"); - halbtc8192e2ant_action_pan_hs(btcoexist); + btc8192e2ant_action_pan_hs(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = PAN+A2DP\n"); - halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); + btc8192e2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = PAN(EDR)+HID\n"); - halbtc8192e2ant_action_pan_edr_hid(btcoexist); + btc8192e2ant_action_pan_edr_hid(btcoexist); break; case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3159,20 +2565,20 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = HID+A2DP\n"); - halbtc8192e2ant_action_hid_a2dp(btcoexist); + btc8192e2ant_action_hid_a2dp(btcoexist); break; default: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "Action 2-Ant, algorithm = unknown!!\n"); - /* halbtc8192e2ant_coex_alloff(btcoexist); */ + /* btc8192e2ant_coex_all_off(btcoexist); */ break; } coex_dm->pre_algorithm = coex_dm->cur_algorithm; } } -static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, - bool backup) +static void btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, + bool backup) { struct rtl_priv *rtlpriv = btcoexist->adapter; u16 u16tmp = 0; @@ -3191,7 +2597,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, 0x430); coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist, 0x434); - coex_dm->backup_retrylimit = btcoexist->btc_read_2byte( + coex_dm->backup_retry_limit = btcoexist->btc_read_2byte( btcoexist, 0x42a); coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte( @@ -3209,7 +2615,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, else btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004); - btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0); + btc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); /* antenna switch control parameter */ btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555); @@ -3232,7 +2638,7 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, u16tmp |= BIT9; btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp); - /* enable PTA I2C mailbox */ + /* enable PTA I2C mailbox */ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101); u8tmp |= BIT4; btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp); @@ -3247,29 +2653,25 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp); } -/************************************************************* - * work around function start with wa_halbtc8192e2ant_ - *************************************************************/ - /************************************************************ - * extern function start with EXhalbtc8192e2ant_ + * extern function start with ex_btc8192e2ant_ ************************************************************/ -void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) +void ex_btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) { - halbtc8192e2ant_init_hwconfig(btcoexist, true); + btc8192e2ant_init_hwconfig(btcoexist, true); } -void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Coex Mechanism Init!!\n"); - halbtc8192e2ant_init_coex_dm(btcoexist); + btc8192e2ant_init_coex_dm(btcoexist); } -void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; @@ -3278,8 +2680,8 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) u16 u16tmp[4]; u32 u32tmp[4]; bool roam = false, scan = false, link = false, wifi_under_5g = false; - bool bt_hson = false, wifi_busy = false; - int wifirssi = 0, bt_hs_rssi = 0; + bool bt_hs_on = false, wifi_busy = false; + int wifi_rssi = 0, bt_hs_rssi = 0; u32 wifi_bw, wifi_traffic_dir; u8 wifi_dot11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; @@ -3316,21 +2718,21 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, fw_ver, bt_patch_ver, bt_patch_ver); - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsMode(HsChnl)", - wifi_dot11_chnl, bt_hson, wifi_hs_chnl); + wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ", "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info); - btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi); + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -3377,7 +2779,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %7ph(%d)", - GLBtInfoSrc8192e2Ant[i], + glbt_info_src_8192e_2ant[i], coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } @@ -3390,7 +2792,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type", - coex_dm->cur_sstype); + coex_dm->cur_ss_type); /* Sw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", @@ -3429,7 +2831,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, - coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, coex_dm->backup_ampdu_maxtime); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); @@ -3485,12 +2887,12 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) "0x774(lp rx[31:16]/tx[15:0])", coex_sta->low_priority_rx, coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1) - halbtc8192e2ant_monitor_bt_ctr(btcoexist); + btc8192e2ant_monitor_bt_ctr(btcoexist); #endif btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); } -void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3498,7 +2900,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; - halbtc8192e2ant_coex_alloff(btcoexist); + btc8192e2ant_coex_all_off(btcoexist); } else if (BTC_IPS_LEAVE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS LEAVE notify\n"); @@ -3506,7 +2908,7 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3521,7 +2923,7 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3533,7 +2935,7 @@ void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) "[BTCoex], SCAN FINISH notify\n"); } -void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3545,8 +2947,8 @@ void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) "[BTCoex], CONNECT FINISH notify\n"); } -void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; @@ -3591,8 +2993,8 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } -void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3601,8 +3003,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, "[BTCoex], DHCP Packet notify\n"); } -void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, - u8 *tmp_buf, u8 length) +void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 bt_info = 0; @@ -3633,7 +3035,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, } if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) { - coex_sta->bt_retry_cnt = /* [3:0] */ + /* [3:0] */ + coex_sta->bt_retry_cnt = coex_sta->bt_info_c2h[rsp_source][2] & 0xf; coex_sta->bt_rssi = @@ -3651,11 +3054,11 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) - ex_halbtc8192e2ant_media_status_notify( + ex_btc8192e2ant_media_status_notify( btcoexist, BTC_MEDIA_CONNECT); else - ex_halbtc8192e2ant_media_status_notify( + ex_btc8192e2ant_media_status_notify( btcoexist, BTC_MEDIA_DISCONNECT); } @@ -3665,9 +3068,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, !btcoexist->stop_coex_dm) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "bit3, BT NOT ignore Wlan active!\n"); - halbtc8192e2ant_IgnoreWlanAct(btcoexist, - FORCE_EXEC, - false); + btc8192e2ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, + false); } } else { /* BT already NOT ignore Wlan active, @@ -3679,8 +3082,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, if ((coex_sta->bt_info_ext & BIT4)) { /* BT auto report already enabled, do nothing */ } else { - halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC, - true); + btc8192e2ant_bt_auto_report(btcoexist, FORCE_EXEC, + true); } #endif } @@ -3718,9 +3121,9 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->sco_exist = false; } - halbtc8192e2ant_update_btlink_info(btcoexist); + btc8192e2ant_update_bt_link_info(btcoexist); - if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { + if (!(bt_info & BT_INFO_8192E_2ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT Non-Connected idle!!!\n"); @@ -3728,12 +3131,12 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); - } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || - (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { + } else if ((bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) || + (bt_info & BT_INFO_8192E_2ANT_B_SCO_BUSY)) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); - } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { + } else if (bt_info & BT_INFO_8192E_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); @@ -3758,12 +3161,7 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_dm->limited_dig = limited_dig; btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); - halbtc8192e2ant_run_coexist_mechanism(btcoexist); -} - -void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, - u8 type) -{ + btc8192e2ant_run_coexist_mechanism(btcoexist); } void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) @@ -3772,11 +3170,11 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); - halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); - ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); + btc8192e2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + ex_btc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } -void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) +void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; static u8 dis_ver_info_cnt; @@ -3810,12 +3208,12 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) } #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) - halbtc8192e2ant_querybt_info(btcoexist); - halbtc8192e2ant_monitor_bt_ctr(btcoexist); - btc8192e2ant_monitor_bt_enable_dis(btcoexist); + btc8192e2ant_query_bt_info(btcoexist); + btc8192e2ant_monitor_bt_ctr(btcoexist); + btc8192e2ant_monitor_bt_enable_disable(btcoexist); #else - if (halbtc8192e2ant_iswifi_status_changed(btcoexist) || + if (btc8192e2ant_is_wifi_status_changed(btcoexist) || coex_dm->auto_tdma_adjust) - halbtc8192e2ant_run_coexist_mechanism(btcoexist); + btc8192e2ant_run_coexist_mechanism(btcoexist); #endif } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h index 75e1f7d0db0627ec99a64da62680aec13c61465a..fc0fa87ec404aa94a6185e87478f1f545783dbea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h @@ -116,7 +116,7 @@ struct coex_dm_8192e_2ant { u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */ u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */ - u16 backup_retrylimit; + u16 backup_retry_limit; u8 backup_ampdu_maxtime; /* algorithm related */ @@ -125,18 +125,18 @@ struct coex_dm_8192e_2ant { u8 bt_status; u8 wifi_chnl_info[3]; - u8 pre_sstype; - u8 cur_sstype; + u8 pre_ss_type; + u8 cur_ss_type; - u32 prera_mask; - u32 curra_mask; - u8 curra_masktype; - u8 pre_arfrtype; - u8 cur_arfrtype; - u8 pre_retrylimit_type; - u8 cur_retrylimit_type; - u8 pre_ampdutime_type; - u8 cur_ampdutime_type; + u32 pre_ra_mask; + u32 cur_ra_mask; + u8 cur_ra_mask_type; + u8 pre_arfr_type; + u8 cur_arfr_type; + u8 pre_retry_limit_type; + u8 cur_retry_limit_type; + u8 pre_ampdu_time_type; + u8 cur_ampdu_time_type; }; struct coex_sta_8192e_2ant { diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index d67bbfb6ad8e61c4e1e0cef2e3a83c04a961f71f..2003c8c51dcc4c9deefcbe5f1045b010e53cc080 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -45,7 +45,7 @@ static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant; static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant; static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant; -static const char *const GLBtInfoSrc8723b1Ant[] = { +static const char *const glbt_info_src_8723b_1ant[] = { "BT Info[wifi fw]", "BT Info[bt rsp]", "BT Info[bt auto report]", @@ -60,188 +60,6 @@ static u32 glcoex_ver_8723b_1ant = 0x47; /*************************************************************** * local function start with halbtc8723b1ant_ ***************************************************************/ -static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - s32 bt_rssi = 0; - u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; - - bt_rssi = coex_sta->bt_rssi; - - if (level_num == 2) { - if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || - (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - if (bt_rssi >= rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - bt_rssi_state = BTC_RSSI_STATE_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to High\n"); - } else { - bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state stay at Low\n"); - } - } else { - if (bt_rssi < rssi_thresh) { - bt_rssi_state = BTC_RSSI_STATE_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to Low\n"); - } else { - bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state stay at High\n"); - } - } - } else if (level_num == 3) { - if (rssi_thresh > rssi_thresh1) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi thresh error!!\n"); - return coex_sta->pre_bt_rssi_state; - } - - if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || - (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - if (bt_rssi >= rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to Medium\n"); - } else { - bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state stay at Low\n"); - } - } else if ((coex_sta->pre_bt_rssi_state == - BTC_RSSI_STATE_MEDIUM) || - (coex_sta->pre_bt_rssi_state == - BTC_RSSI_STATE_STAY_MEDIUM)) { - if (bt_rssi >= rssi_thresh1 + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - bt_rssi_state = BTC_RSSI_STATE_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to High\n"); - } else if (bt_rssi < rssi_thresh) { - bt_rssi_state = BTC_RSSI_STATE_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to Low\n"); - } else { - bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state stay at Medium\n"); - } - } else { - if (bt_rssi < rssi_thresh1) { - bt_rssi_state = BTC_RSSI_STATE_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state switch to Medium\n"); - } else { - bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Rssi state stay at High\n"); - } - } - } - - coex_sta->pre_bt_rssi_state = bt_rssi_state; - - return bt_rssi_state; -} - -static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, - u8 index, u8 level_num, - u8 rssi_thresh, u8 rssi_thresh1) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - s32 wifi_rssi = 0; - u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; - - btcoexist->btc_get(btcoexist, - BTC_GET_S4_WIFI_RSSI, &wifi_rssi); - - if (level_num == 2) { - if ((coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_LOW) || - (coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_STAY_LOW)) { - if (wifi_rssi >= rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - wifi_rssi_state = BTC_RSSI_STATE_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to High\n"); - } else { - wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state stay at Low\n"); - } - } else { - if (wifi_rssi < rssi_thresh) { - wifi_rssi_state = BTC_RSSI_STATE_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to Low\n"); - } else { - wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state stay at High\n"); - } - } - } else if (level_num == 3) { - if (rssi_thresh > rssi_thresh1) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI thresh error!!\n"); - return coex_sta->pre_wifi_rssi_state[index]; - } - - if ((coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_LOW) || - (coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_STAY_LOW)) { - if (wifi_rssi >= rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to Medium\n"); - } else { - wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state stay at Low\n"); - } - } else if ((coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_MEDIUM) || - (coex_sta->pre_wifi_rssi_state[index] == - BTC_RSSI_STATE_STAY_MEDIUM)) { - if (wifi_rssi >= rssi_thresh1 + - BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { - wifi_rssi_state = BTC_RSSI_STATE_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to High\n"); - } else if (wifi_rssi < rssi_thresh) { - wifi_rssi_state = BTC_RSSI_STATE_LOW; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to Low\n"); - } else { - wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state stay at Medium\n"); - } - } else { - if (wifi_rssi < rssi_thresh1) { - wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state switch to Medium\n"); - } else { - wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], wifi RSSI state stay at High\n"); - } - } - } - - coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; - - return wifi_rssi_state; -} static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist, bool force_exec, u32 dis_rate_mask) @@ -249,7 +67,7 @@ static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist, coex_dm->curra_mask = dis_rate_mask; if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) - btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK, &coex_dm->curra_mask); coex_dm->prera_mask = coex_dm->curra_mask; @@ -326,15 +144,14 @@ static void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist, coex_dm->cur_ampdu_time_type)) { switch (coex_dm->cur_ampdu_time_type) { case 0: /* normal mode */ - btcoexist->btc_write_1byte(btcoexist, 0x456, - coex_dm->backup_ampdu_max_time); - break; + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_max_time); + break; case 1: /* AMPDU timw = 0x38 * 32us */ - btcoexist->btc_write_1byte(btcoexist, - 0x456, 0x38); - break; + btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); + break; default: - break; + break; } } @@ -354,7 +171,7 @@ static void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist, halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x00000003); break; - /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/ + /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */ case 2: halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0001f1f7); @@ -426,7 +243,8 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist) coex_sta->c2h_bt_info_req_sent = true; - h2c_parameter[0] |= BIT0; /* trigger*/ + /* trigger */ + h2c_parameter[0] |= BIT0; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", @@ -515,202 +333,6 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist) bt_link_info->hid_only = false; } -static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool bt_hs_on = false; - u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED; - u8 numdiffprofile = 0; - - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); - - if (!bt_link_info->bt_link_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], No BT link exists!!!\n"); - return algorithm; - } - - if (bt_link_info->sco_exist) - numdiffprofile++; - if (bt_link_info->hid_exist) - numdiffprofile++; - if (bt_link_info->pan_exist) - numdiffprofile++; - if (bt_link_info->a2dp_exist) - numdiffprofile++; - - if (numdiffprofile == 1) { - if (bt_link_info->sco_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = SCO only\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; - } else { - if (bt_link_info->hid_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = HID only\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_HID; - } else if (bt_link_info->a2dp_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = A2DP only\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; - } else if (bt_link_info->pan_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = PAN(HS) only\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANHS; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = PAN(EDR) only\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR; - } - } - } - } else if (numdiffprofile == 2) { - if (bt_link_info->sco_exist) { - if (bt_link_info->hid_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = SCO + HID\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_HID; - } else if (bt_link_info->a2dp_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; - } else if (bt_link_info->pan_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + PAN(HS)\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; - } - } - } else { - if (bt_link_info->hid_exist && - bt_link_info->a2dp_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = HID + A2DP\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; - } else if (bt_link_info->hid_exist && - bt_link_info->pan_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = HID + PAN(HS)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_HID_A2DP; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = HID + PAN(EDR)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; - } - } else if (bt_link_info->pan_exist && - bt_link_info->a2dp_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; - } - } - } - } else if (numdiffprofile == 3) { - if (bt_link_info->sco_exist) { - if (bt_link_info->hid_exist && - bt_link_info->a2dp_exist) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_HID; - } else if (bt_link_info->hid_exist && - bt_link_info->pan_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_HID_A2DP; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; - } - } else if (bt_link_info->pan_exist && - bt_link_info->a2dp_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); - algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; - } - } - } else { - if (bt_link_info->hid_exist && - bt_link_info->pan_exist && - bt_link_info->a2dp_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_HID_A2DP; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; - } - } - } - } else if (numdiffprofile >= 3) { - if (bt_link_info->sco_exist) { - if (bt_link_info->hid_exist && - bt_link_info->pan_exist && - bt_link_info->a2dp_exist) { - if (bt_hs_on) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, - DBG_LOUD, - "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); - algorithm = - BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; - } - } - } - } - - return algorithm; -} - static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, bool low_penalty_ra) { @@ -721,11 +343,11 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, if (low_penalty_ra) { h2c_parameter[1] |= BIT0; - /*normal rate except MCS7/6/5, OFDM54/48/36 */ + /* normal rate except MCS7/6/5, OFDM54/48/36 */ h2c_parameter[2] = 0x00; - h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */ - h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */ - h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ + h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */ } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -846,8 +468,9 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist, } } -static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, - bool enable) +static void +halbtc8723b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, + bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; @@ -882,7 +505,7 @@ static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, coex_dm->cur_ignore_wlan_act) return; } - halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable); + halbtc8723b1ant_set_fw_ignore_wlan_act(btcoexist, enable); coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; } @@ -944,9 +567,9 @@ static void halbtc8723b1ant_set_lps_rpwm(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); } -static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, - bool force_exec, - u8 lps_val, u8 rpwm_val) +static void halbtc8723b1ant_lps_rpwm(struct btc_coexist *btcoexist, + bool force_exec, + u8 lps_val, u8 rpwm_val) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -987,9 +610,9 @@ static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist, halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } -static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, - u8 ant_pos_type, bool init_hw_cfg, - bool wifi_off) +static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) { struct btc_board_info *board_info = &btcoexist->board_info; u32 fw_ver = 0, u32tmp = 0; @@ -1028,7 +651,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, if (use_ext_switch) { if (init_hw_cfg) { /* 0x4c[23] = 0, 0x4c[24] = 1 - * Antenna control by WL/BT + * Antenna control by WL/BT */ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); u32tmp &= ~BIT23; @@ -1037,35 +660,36 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - /* Main Ant to BT for IPS case 0x4c[23] = 1 */ + /* Main Ant to BT for IPS case 0x4c[23] = 1 */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1); - /*tell firmware "no antenna inverse"*/ + /* tell firmware "no antenna inverse" */ h2c_parameter[0] = 0; h2c_parameter[1] = 1; /*ext switch type*/ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); } else { - /*Aux Ant to BT for IPS case 0x4c[23] = 1 */ + /* Aux Ant to BT for IPS case 0x4c[23] = 1 */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x0); - /*tell firmware "antenna inverse"*/ + /* tell firmware "antenna inverse" */ h2c_parameter[0] = 1; - h2c_parameter[1] = 1; /*ext switch type*/ + h2c_parameter[1] = 1; /* ext switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); } } - /* fixed internal switch first*/ - /* fixed internal switch S1->WiFi, S0->BT*/ + /* fixed internal switch first + * fixed internal switch S1->WiFi, S0->BT + */ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); - else/* fixed internal switch S0->WiFi, S1->BT*/ + else /* fixed internal switch S0->WiFi, S1->BT */ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); /* ext switch setting */ @@ -1108,7 +732,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, } else { if (init_hw_cfg) { - /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64*/ + /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64 */ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); u32tmp |= BIT23; u32tmp &= ~BIT24; @@ -1116,41 +740,42 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - /*Main Ant to WiFi for IPS case 0x4c[23] = 1*/ + /* Main Ant to WiFi for IPS case 0x4c[23] = 1 */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x0); - /*tell firmware "no antenna inverse"*/ + /* tell firmware "no antenna inverse" */ h2c_parameter[0] = 0; - h2c_parameter[1] = 0; /*internal switch type*/ + h2c_parameter[1] = 0; /* internal switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); } else { - /*Aux Ant to BT for IPS case 0x4c[23] = 1*/ + /* Aux Ant to BT for IPS case 0x4c[23] = 1 */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1); - /*tell firmware "antenna inverse"*/ + /* tell firmware "antenna inverse" */ h2c_parameter[0] = 1; - h2c_parameter[1] = 0; /*internal switch type*/ + h2c_parameter[1] = 0; /* internal switch type */ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); } } - /* fixed external switch first*/ - /*Main->WiFi, Aux->BT*/ + /* fixed external switch first + * Main->WiFi, Aux->BT + */ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); - else/*Main->BT, Aux->WiFi */ + else /* Main->BT, Aux->WiFi */ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x2); - /* internal switch setting*/ + /* internal switch setting */ switch (ant_pos_type) { case BTC_ANT_PATH_WIFI: if (board_info->btdm_ant_pos == @@ -1365,7 +990,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12, 0x3, 0x14, 0x50); break; - /* SoftAP only with no sta associated,BT disable , + /* SoftAP only with no sta associated, BT disable, * TDMA mode for power saving * here softap mode screen off will cost 70-80mA for phone */ @@ -1376,24 +1001,29 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, } } else { switch (type) { - case 8: /*PTA Control */ + case 8: /* PTA Control */ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0); - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, - false, false); + halbtc8723b1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_PTA, + false, false); break; case 0: - default: /*Software control, Antenna at BT side */ + default: + /* Software control, Antenna at BT side */ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0); - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, - false, false); + halbtc8723b1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_BT, + false, false); break; - case 9: /*Software control, Antenna at WiFi side */ + case 9: + /* Software control, Antenna at WiFi side */ halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0); - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI, - false, false); + halbtc8723b1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_WIFI, + false, false); break; } } @@ -1407,247 +1037,15 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } -static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - bool commom = false, wifi_connected = false; - bool wifi_busy = false; - - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, - &wifi_connected); - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); - - if (!wifi_connected && - BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); - halbtc8723b1ant_sw_mechanism(btcoexist, false); - commom = true; - } else if (wifi_connected && - (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == - coex_dm->bt_status)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi connected + BT non connected-idle!!\n"); - halbtc8723b1ant_sw_mechanism(btcoexist, false); - commom = true; - } else if (!wifi_connected && - (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == - coex_dm->bt_status)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); - halbtc8723b1ant_sw_mechanism(btcoexist, false); - commom = true; - } else if (wifi_connected && - (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == - coex_dm->bt_status)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi connected + BT connected-idle!!\n"); - halbtc8723b1ant_sw_mechanism(btcoexist, false); - commom = true; - } else if (!wifi_connected && - (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != - coex_dm->bt_status)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); - halbtc8723b1ant_sw_mechanism(btcoexist, false); - commom = true; - } else { - if (wifi_busy) - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); - else - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); - - commom = false; - } - - return commom; -} - -static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, - u8 wifi_status) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - static s32 up, dn, m, n, wait_count; - /* 0: no change, +1: increase WiFi duration, - * -1: decrease WiFi duration - */ - s32 result; - u8 retry_count = 0, bt_info_ext; - bool wifi_busy = false; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); - - if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) - wifi_busy = true; - else - wifi_busy = false; - - if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) || - (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) || - (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) { - if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && - coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - - up = 0; - dn = 0; - m = 1; - n = 3; - result = 0; - wait_count = 0; - } - return; - } - - if (!coex_dm->auto_tdma_adjust) { - coex_dm->auto_tdma_adjust = true; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); - - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; - - up = 0; - dn = 0; - m = 1; - n = 3; - result = 0; - wait_count = 0; - } else { - /*accquire the BT TRx retry count from BT_Info byte2 */ - retry_count = coex_sta->bt_retry_cnt; - bt_info_ext = coex_sta->bt_info_ext; - result = 0; - wait_count++; - /* no retry in the last 2-second duration */ - if (retry_count == 0) { - up++; - dn--; - - if (dn <= 0) - dn = 0; - - if (up >= n) { - wait_count = 0; - n = 3; - up = 0; - dn = 0; - result = 1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Increase wifi duration!!\n"); - } - } else if (retry_count <= 3) { - up--; - dn++; - - if (up <= 0) - up = 0; - - if (dn == 2) { - if (wait_count <= 2) - m++; - else - m = 1; - - if (m >= 20) - m = 20; - - n = 3 * m; - up = 0; - dn = 0; - wait_count = 0; - result = -1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); - } - } else { - if (wait_count == 1) - m++; - else - m = 1; - - if (m >= 20) - m = 20; - - n = 3 * m; - up = 0; - dn = 0; - wait_count = 0; - result = -1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); - } - - if (result == -1) { - if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && - ((coex_dm->cur_ps_tdma == 1) || - (coex_dm->cur_ps_tdma == 2))) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } - } else if (result == 1) { - if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && - ((coex_dm->cur_ps_tdma == 1) || - (coex_dm->cur_ps_tdma == 2))) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } - } else { /*no change */ - /*if busy / idle change */ - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex],********* TDMA(on, %d) ********\n", - coex_dm->cur_ps_tdma); - } - - if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && - coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) { - /* recover to previous adjust type */ - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, - coex_dm->tdma_adj_type); - } - } -} - -static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist, - bool new_ps_state) +static void halbtc8723b1ant_ps_tdma_chk_pwr_save(struct btc_coexist *btcoexist, + bool new_ps_state) { u8 lps_mode = 0x0; btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); - if (lps_mode) { /* already under LPS state */ + if (lps_mode) { + /* already under LPS state */ if (new_ps_state) { /* keep state under LPS, do nothing. */ } else { @@ -1655,7 +1053,8 @@ static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist, halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); } - } else { /* NO PS state */ + } else { + /* NO PS state */ if (new_ps_state) { /* will enter LPS state, turn off psTdma first */ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, @@ -1681,18 +1080,18 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); break; case BTC_PS_LPS_ON: - btc8723b1ant_pstdmachkpwrsave(btcoexist, true); - halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val, - rpwm_val); - /* when coex force to enter LPS, do not enter 32k low power. */ + halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, true); + halbtc8723b1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power */ low_pwr_disable = true; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - /* power save must executed before psTdma. */ + /* power save must executed before psTdma */ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); break; case BTC_PS_LPS_OFF: - btc8723b1ant_pstdmachkpwrsave(btcoexist, false); + halbtc8723b1ant_ps_tdma_chk_pwr_save(btcoexist, false); btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); break; default: @@ -1700,66 +1099,6 @@ static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist, } } -/*************************************************** - * - * Software Coex Mechanism start - * - ***************************************************/ -/* SCO only or SCO+PAN(HS) */ -static void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, true); -} - -static void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, true); -} - -/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ -static void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, false); -} - -static void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, false); -} - -static void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, false); -} - -/* PAN(HS) only */ -static void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, false); -} - -/*PAN(EDR)+A2DP */ -static void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, false); -} - -static void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, true); -} - -/* HID+A2DP+PAN(EDR) */ -static void btc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, true); -} - -static void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist) -{ - halbtc8723b1ant_sw_mechanism(btcoexist, true); -} - /***************************************************** * * Non-Software Coex Mechanism start @@ -1826,11 +1165,11 @@ static void btc8723b1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, &wifi_connected); /* tdma and coex table */ - if (bt_link_info->sco_exist) { halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); - } else { /* HID */ + } else { + /* HID */ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5); } @@ -1840,30 +1179,21 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( struct btc_coexist *btcoexist, u8 wifi_status) { - u8 bt_rssi_state; - struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0); - if (bt_link_info->hid_only) { /*HID */ + if (bt_link_info->hid_only) { /* HID */ btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status); coex_dm->auto_tdma_adjust = false; return; - } else if (bt_link_info->a2dp_only) { /*A2DP */ - if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifi_status) { + } else if (bt_link_info->a2dp_only) { /* A2DP */ + if (wifi_status == BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE) { halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); coex_dm->auto_tdma_adjust = false; - } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b1ant_tdma_dur_adj_for_acl(btcoexist, - wifi_status); - halbtc8723b1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 1); - } else { /*for low BT RSSI */ + } else { /* for low BT RSSI */ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); halbtc8723b1ant_coex_table_with_type(btcoexist, @@ -1871,18 +1201,18 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( coex_dm->auto_tdma_adjust = false; } } else if (bt_link_info->hid_exist && - bt_link_info->a2dp_exist) { /*HID+A2DP */ - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + bt_link_info->a2dp_exist) { /* HID + A2DP */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); coex_dm->auto_tdma_adjust = false; halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); - /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */ + /* PAN(OPP,FTP), HID + PAN(OPP,FTP) */ } else if (bt_link_info->pan_only || (bt_link_info->hid_exist && bt_link_info->pan_exist)) { halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); coex_dm->auto_tdma_adjust = false; - /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/ + /* A2DP + PAN(OPP,FTP), HID + A2DP + PAN(OPP,FTP) */ } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) || (bt_link_info->hid_exist && bt_link_info->a2dp_exist && bt_link_info->pan_exist)) { @@ -1907,57 +1237,59 @@ static void btc8723b1ant_action_wifi_not_conn(struct btc_coexist *btcoexist) halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } -static void btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoex) +static void +btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoexist) { - struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE, + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); /* tdma and coex table */ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); - halbtc8723b1ant_coex_table_with_type(btcoex, + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } else if (bt_link_info->pan_only) { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); - halbtc8723b1ant_coex_table_with_type(btcoex, + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); } else { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); - halbtc8723b1ant_coex_table_with_type(btcoex, + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)){ - btc8723b1ant_act_bt_sco_hid_only_busy(btcoex, + btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN); } else { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8); - halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 2); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); } } -static void btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoex) +static void +btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoexist) { - struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE, + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) || (bt_link_info->sco_exist) || (bt_link_info->hid_only) || (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8); - halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 7); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); } else { - halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, true, 20); - halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 1); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } } @@ -2109,75 +1441,6 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) } } -static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 algorithm = 0; - - algorithm = halbtc8723b1ant_action_algorithm(btcoexist); - coex_dm->cur_algorithm = algorithm; - - if (!halbtc8723b1ant_is_common_action(btcoexist)) { - switch (coex_dm->cur_algorithm) { - case BT_8723B_1ANT_COEX_ALGO_SCO: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = SCO\n"); - halbtc8723b1ant_action_sco(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_HID: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = HID\n"); - halbtc8723b1ant_action_hid(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_A2DP: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = A2DP\n"); - halbtc8723b1ant_action_a2dp(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); - halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_PANEDR: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = PAN(EDR)\n"); - halbtc8723b1ant_action_pan_edr(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_PANHS: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = HS mode\n"); - halbtc8723b1ant_action_pan_hs(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = PAN+A2DP\n"); - halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); - halbtc8723b1ant_action_pan_edr_hid(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = HID+A2DP+PAN\n"); - btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); - break; - case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = HID+A2DP\n"); - halbtc8723b1ant_action_hid_a2dp(btcoexist); - break; - default: - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Action algorithm = coexist All Off!!\n"); - break; - } - coex_dm->pre_algorithm = coex_dm->cur_algorithm; - } -} - static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -2186,7 +1449,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) bool increase_scan_dev_num = false; bool bt_ctrl_agg_buf_size = false; u8 agg_buf_size = 5; - u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; @@ -2238,16 +1500,12 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) { halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); } else { - if (wifi_connected) { - wifi_rssi_state = - halbtc8723b1ant_wifi_rssi_state(btcoexist, - 1, 2, 30, 0); + if (wifi_connected) halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1, 1, 1); - } else { + else halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); - } } if (bt_link_info->sco_exist) { @@ -2263,8 +1521,6 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, bt_ctrl_agg_buf_size, agg_buf_size); - btc8723b1ant_run_sw_coex_mech(btcoexist); - btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (coex_sta->c2h_bt_inquiry_page) { @@ -2364,28 +1620,19 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); /* Enable counter statistics */ - /*0x76e[3] =1, WLAN_Act control by PTA */ + /*0x76e[3] = 1, WLAN_Act control by PTA */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); - /*Antenna config */ - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false); + /* Antenna config */ + halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, true, false); /* PTA parameter */ halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); } -static void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist) -{ - /* set wlan_act to low */ - btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); -} - /************************************************************** - * work around function start with wa_halbtc8723b1ant_ - **************************************************************/ -/************************************************************** - * extern function start with EXhalbtc8723b1ant_ + * extern function start with ex_halbtc8723b1ant_ **************************************************************/ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist) @@ -2539,7 +1786,7 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) if (coex_sta->bt_info_c2h_cnt[i]) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %7ph(%d)", - GLBtInfoSrc8723b1Ant[i], + glbt_info_src_8723b_1ant[i], coex_sta->bt_info_c2h[i], coex_sta->bt_info_c2h_cnt[i]); } @@ -2697,13 +1944,12 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, - false, true); + halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + false, true); /* set PTA control */ halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); - halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_IPS_LEAVE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS LEAVE notify\n"); @@ -2774,14 +2020,17 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) if (BTC_SCAN_START == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCAN START notify\n"); - if (!wifi_connected) /* non-connected scan */ + if (!wifi_connected) + /* non-connected scan */ btc8723b1ant_action_wifi_not_conn_scan(btcoexist); - else /* wifi is connected */ + else + /* wifi is connected */ btc8723b1ant_action_wifi_conn_scan(btcoexist); } else if (BTC_SCAN_FINISH == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCAN FINISH notify\n"); - if (!wifi_connected) /* non-connected scan */ + if (!wifi_connected) + /* non-connected scan */ btc8723b1ant_action_wifi_not_conn(btcoexist); else halbtc8723b1ant_action_wifi_connected(btcoexist); @@ -2831,7 +2080,8 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); - if (!wifi_connected) /* non-connected scan */ + if (!wifi_connected) + /* non-connected scan */ btc8723b1ant_action_wifi_not_conn(btcoexist); else halbtc8723b1ant_action_wifi_connected(btcoexist); @@ -3020,7 +2270,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->a2dp_exist = false; coex_sta->hid_exist = false; coex_sta->sco_exist = false; - } else { /* connection exists */ + } else { + /* connection exists */ coex_sta->bt_link_exist = true; if (bt_info & BT_INFO_8723B_1ANT_B_FTP) coex_sta->pan_exist = true; @@ -3089,9 +2340,8 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) btcoexist->stop_coex_dm = true; - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true); + halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true); - halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, @@ -3111,13 +2361,12 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify to SLEEP\n"); btcoexist->stop_coex_dm = true; - halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, - true); + halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, + true); halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); - halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify to WAKE UP\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 12125966a911140fd0b32a7d153cf47b0dc75a68..2f3946be4ce29bdb581753b515faa78bf7750be8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -240,9 +240,33 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, return wifi_rssi_state; } +static +void btc8723b2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec, + bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; + u8 rx_agg_size = agg_buf_size; + + /* ============================================ */ + /* Rx Aggregation related setting */ + /* ============================================ */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregate buf size, only work when BT control Rx aggregate size */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); +} + static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; u32 reg_hp_txrx, reg_lp_txrx, u32tmp; u32 reg_hp_tx = 0, reg_hp_rx = 0; u32 reg_lp_tx = 0, reg_lp_rx = 0; @@ -263,6 +287,17 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; + if ((coex_sta->low_priority_tx > 1050) && + (!coex_sta->c2h_bt_inquiry_page)) + coex_sta->pop_event_cnt++; + + if ((coex_sta->low_priority_rx >= 950) && + (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) && + (!coex_sta->under_ips)) + bt_link_info->slave_role = true; + else + bt_link_info->slave_role = false; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); @@ -274,6 +309,43 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); } +static void btc8723b2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist) +{ + if (coex_sta->under_ips) { + coex_sta->crc_ok_cck = 0; + coex_sta->crc_ok_11g = 0; + coex_sta->crc_ok_11n = 0; + coex_sta->crc_ok_11n_agg = 0; + + coex_sta->crc_err_cck = 0; + coex_sta->crc_err_11g = 0; + coex_sta->crc_err_11n = 0; + coex_sta->crc_err_11n_agg = 0; + } else { + coex_sta->crc_ok_cck = + btcoexist->btc_read_4byte(btcoexist, 0xf88); + coex_sta->crc_ok_11g = + btcoexist->btc_read_2byte(btcoexist, 0xf94); + coex_sta->crc_ok_11n = + btcoexist->btc_read_2byte(btcoexist, 0xf90); + coex_sta->crc_ok_11n_agg = + btcoexist->btc_read_2byte(btcoexist, 0xfb8); + + coex_sta->crc_err_cck = + btcoexist->btc_read_4byte(btcoexist, 0xf84); + coex_sta->crc_err_11g = + btcoexist->btc_read_2byte(btcoexist, 0xf96); + coex_sta->crc_err_11n = + btcoexist->btc_read_2byte(btcoexist, 0xf92); + coex_sta->crc_err_11n_agg = + btcoexist->btc_read_2byte(btcoexist, 0xfba); + } + + /* reset counter */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0); +} + static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -297,6 +369,8 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) static bool pre_bt_hs_on; bool wifi_busy = false, under_4way = false, bt_hs_on = false; bool wifi_connected = false; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + u8 tmp; btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -320,6 +394,15 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) pre_bt_hs_on = bt_hs_on; return true; } + + tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + wifi_rssi_state = + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0); + + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_LOW)) + return true; } return false; @@ -327,11 +410,9 @@ static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist) { - /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool bt_hs_on = false; -#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); bt_link_info->bt_link_exist = coex_sta->bt_link_exist; @@ -345,21 +426,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist) bt_link_info->pan_exist = true; bt_link_info->bt_link_exist = true; } -#else /* profile from bt stack */ - bt_link_info->bt_link_exist = stack_info->bt_link_exist; - bt_link_info->sco_exist = stack_info->sco_exist; - bt_link_info->a2dp_exist = stack_info->a2dp_exist; - bt_link_info->pan_exist = stack_info->pan_exist; - bt_link_info->hid_exist = stack_info->hid_exist; - - /*for win-8 stack HID report error*/ - if (!stack_info->hid_exist) - stack_info->hid_exist = coex_sta->hid_exist; - /*sync BTInfo with BT firmware and stack*/ - /* when stack HID report error, here we use the info from bt fw.*/ - if (!stack_info->bt_link_exist) - stack_info->bt_link_exist = coex_sta->bt_link_exist; -#endif + /* check if Sco only */ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && !bt_link_info->pan_exist && !bt_link_info->hid_exist) @@ -584,44 +651,6 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) return algorithm; } -static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - bool ret = false; - bool bt_hs_on = false, wifi_connected = false; - s32 bt_hs_rssi = 0; - u8 bt_rssi_state; - - if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) - return false; - if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, - &wifi_connected)) - return false; - if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) - return false; - - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); - - if (wifi_connected) { - if (bt_hs_on) { - if (bt_hs_rssi > 37) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Need to decrease bt power for HS mode!!\n"); - ret = true; - } - } else { - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); - ret = true; - } - } - } - - return ret; -} - static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, u8 dac_swing_lvl) { @@ -642,44 +671,40 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, } static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, - bool dec_bt_pwr) + u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = 0; - - if (dec_bt_pwr) - h2c_parameter[0] |= BIT1; + h2c_parameter[0] = dec_bt_pwr_lvl; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", - (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, - bool force_exec, bool dec_bt_pwr) + bool force_exec, u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s Dec BT power = %s\n", - force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF"); - coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl); + coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n", + coex_dm->pre_dec_bt_pwr_lvl, + coex_dm->cur_dec_bt_pwr_lvl); - if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl) return; } - btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); + btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl); - coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; + coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl; } static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, @@ -708,72 +733,21 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; } -static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, - bool rx_rf_shrink_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (rx_rf_shrink_on) { - /* Shrink RF Rx LPF corner */ - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, - 0xfffff, 0xffffc); - } else { - /* Resume RF Rx LPF corner */ - /* After initialized, we can use coex_dm->btRf0x1eBackup */ - if (btcoexist->initilized) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Resume RF Rx LPF corner!!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, - 0xfffff, - coex_dm->bt_rf0x1e_backup); - } - } -} - -static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, - bool force_exec, bool rx_rf_shrink_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), (rx_rf_shrink_on ? - "ON" : "OFF")); - coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); - - if (coex_dm->pre_rf_rx_lpf_shrink == - coex_dm->cur_rf_rx_lpf_shrink) - return; - } - btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist, - coex_dm->cur_rf_rx_lpf_shrink); - - coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; -} - static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, bool low_penalty_ra) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; - h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ + h2c_parameter[0] = 0x6; /* op_code, 0x6 = Retry_Penalty */ if (low_penalty_ra) { h2c_parameter[1] |= BIT0; - /*normal rate except MCS7/6/5, OFDM54/48/36*/ + /* normal rate except MCS7/6/5, OFDM54/48/36 */ h2c_parameter[2] = 0x00; - h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/ - h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/ - h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ + h2c_parameter[3] = 0xf4; /* MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf5; /* MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf6; /* MCS5 or OFDM36 */ } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -788,7 +762,6 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, { struct rtl_priv *rtlpriv = btcoexist->adapter; - /*return; */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s turn LowPenaltyRA = %s\n", (force_exec ? "force to" : ""), (low_penalty_ra ? @@ -830,9 +803,9 @@ static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex, btc8723b2ant_set_dac_swing_reg(btcoex, 0x18); } -static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, - bool force_exec, bool dac_swing_on, - u32 dac_swing_lvl) +void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -863,105 +836,6 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; } -static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, - bool agc_table_en) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 rssi_adjust_val = 0; - - /* BB AGC Gain Table */ - if (agc_table_en) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BB Agc Table On!\n"); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BB Agc Table Off!\n"); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001); - btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001); - } - - /* RF Gain */ - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); - if (agc_table_en) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Agc Table On!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, - 0xfffff, 0x38fff); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, - 0xfffff, 0x38ffe); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Agc Table Off!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, - 0xfffff, 0x380c3); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, - 0xfffff, 0x28ce6); - } - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); - - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); - - if (agc_table_en) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Agc Table On!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, - 0xfffff, 0x38fff); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, - 0xfffff, 0x38ffe); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Agc Table Off!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, - 0xfffff, 0x380c3); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, - 0xfffff, 0x28ce6); - } - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0); - - /* set rssiAdjustVal for wifi module. */ - if (agc_table_en) - rssi_adjust_val = 8; - btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, - &rssi_adjust_val); -} - -static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist, - bool force_exec, bool agc_table_en) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s %s Agc Table\n", - (force_exec ? "force to" : ""), - (agc_table_en ? "Enable" : "Disable")); - coex_dm->cur_agc_table_en = agc_table_en; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", - coex_dm->pre_agc_table_en, - coex_dm->cur_agc_table_en); - - if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) - return; - } - btc8723b2ant_set_agc_table(btcoexist, agc_table_en); - - coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; -} - static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist, u32 val0x6c0, u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) @@ -1026,61 +900,73 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist, coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; } -static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { switch (type) { case 0: btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, - 0x55555555, 0xffff, 0x3); + 0x55555555, 0xffffff, 0x3); break; case 1: btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, - 0x5afa5afa, 0xffff, 0x3); + 0x5afa5afa, 0xffffff, 0x3); break; case 2: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, - 0x5a5a5a5a, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5ada5ada, + 0x5ada5ada, 0xffffff, 0x3); break; case 3: btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, - 0xaaaaaaaa, 0xffff, 0x3); + 0xaaaaaaaa, 0xffffff, 0x3); break; case 4: btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff, - 0xffffffff, 0xffff, 0x3); + 0xffffffff, 0xffffff, 0x3); break; case 5: btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, - 0x5fff5fff, 0xffff, 0x3); + 0x5fff5fff, 0xffffff, 0x3); break; case 6: btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); + 0x5a5a5a5a, 0xffffff, 0x3); break; case 7: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); break; case 8: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea, - 0x5aea5aea, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); break; case 9: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5aea5aea, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); break; case 10: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5aff5aff, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); break; case 11: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); break; case 12: - btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5f5f5f5f, 0xffff, 0x3); + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 13: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 14: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 15: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0xaaaaaaaa, 0xffffff, 0x3); break; default: break; @@ -1094,7 +980,7 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, u8 h2c_parameter[1] = {0}; if (enable) - h2c_parameter[0] |= BIT0;/* function enable*/ + h2c_parameter[0] |= BIT0; /* function enable */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", @@ -1103,6 +989,33 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } +static void btc8723b2ant_set_lps_rpwm(struct btc_coexist *btcoexist, + u8 lps_val, u8 rpwm_val) +{ + u8 lps = lps_val; + u8 rpwm = rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); +} + +static void btc8723b2ant_lps_rpwm(struct btc_coexist *btcoexist, + bool force_exec, u8 lps_val, u8 rpwm_val) +{ + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if (!force_exec) { + if ((coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) + return; + } + btc8723b2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, bool force_exec, bool enable) { @@ -1133,6 +1046,8 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5]; + if ((coex_sta->a2dp_exist) && (coex_sta->hid_exist)) + byte5 = byte5 | 0x1; h2c_parameter[0] = byte1; h2c_parameter[1] = byte2; @@ -1155,23 +1070,13 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } -static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist, - bool shrink_rx_lpf, bool low_penalty_ra, - bool limited_dig, bool bt_lna_constrain) +static void btc8723b2ant_sw_mechanism(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool bt_lna_constrain) { - btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } -static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist, - bool agc_table_shift, bool adc_backoff, - bool sw_dac_swing, u32 dac_swing_lvl) -{ - btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift); - btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, - dac_swing_lvl); -} - static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, u8 antpos_type, bool init_hwcfg, bool wifi_off) @@ -1189,44 +1094,66 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, use_ext_switch = true; if (init_hwcfg) { - /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */ - u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); - u32tmp &= ~BIT23; - u32tmp |= BIT24; - btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); - + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x39, 0x8, 0x1); btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); - /* Force GNT_BT to low */ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + if (fw_ver >= 0x180000) { + /* Use H2C to set GNT_BT to High to avoid A2DP click */ + h2c_parameter[0] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, + h2c_parameter); + } else { + btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18); + } + + btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0); + + /* WiFi TRx Mask off */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, + 0x1, 0xfffff, 0x0); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { /* tell firmware "no antenna inverse" */ h2c_parameter[0] = 0; - h2c_parameter[1] = 1; /* ext switch type */ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, - h2c_parameter); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); } else { /* tell firmware "antenna inverse" */ h2c_parameter[0] = 1; - h2c_parameter[1] = 1; /* ext switch type */ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + } + + if (use_ext_switch) { + /* ext switch type */ + h2c_parameter[1] = 1; + } else { + /* int switch type */ + h2c_parameter[1] = 0; + } + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); + } else { + if (fw_ver >= 0x180000) { + /* Use H2C to set GNT_BT to "Control by PTA"*/ + h2c_parameter[0] = 0; + btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, h2c_parameter); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + } else { + btcoexist->btc_write_1byte(btcoexist, 0x765, 0x0); } } /* ext switch setting */ if (use_ext_switch) { + if (init_hwcfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + } + /* fixed internal switch S1->WiFi, S0->BT */ - if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); - else - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0); switch (antpos_type) { case BTC_ANT_WIFI_AT_MAIN: @@ -1240,9 +1167,18 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, 0x92c, 0x3, 0x2); break; } - } else { /* internal switch */ - /* fixed ext switch */ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); + } else { + /* internal switch */ + if (init_hwcfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 Ant controlled by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp |= BIT23; + u32tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + } + + /* fixed ext switch, S1->Main, S0->Aux */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x0); switch (antpos_type) { case BTC_ANT_WIFI_AT_MAIN: /* fixed internal switch S1->WiFi, S0->BT */ @@ -1260,6 +1196,17 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, bool turn_on, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + u8 wifi_rssi_state, bt_rssi_state; + s8 wifi_duration_adjust = 0x0; + u8 tdma_byte4_modify = 0x0; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s turn %s PS TDMA, type=%d\n", @@ -1268,6 +1215,15 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, coex_dm->cur_ps_tdma_on = turn_on; coex_dm->cur_ps_tdma = type; + if (!(BTC_RSSI_HIGH(wifi_rssi_state) && + BTC_RSSI_HIGH(bt_rssi_state)) && turn_on) { + /* for WiFi RSSI low or BT RSSI low */ + type = type + 100; + coex_dm->is_switch_to_1dot5_ant = true; + } else { + coex_dm->is_switch_to_1dot5_ant = false; + } + if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", @@ -1280,83 +1236,131 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) return; } + + if (coex_sta->scan_ap_num <= 5) { + if (coex_sta->a2dp_bit_pool >= 45) + wifi_duration_adjust = -15; + else if (coex_sta->a2dp_bit_pool >= 35) + wifi_duration_adjust = -10; + else + wifi_duration_adjust = 5; + } else if (coex_sta->scan_ap_num <= 20) { + if (coex_sta->a2dp_bit_pool >= 45) + wifi_duration_adjust = -15; + else if (coex_sta->a2dp_bit_pool >= 35) + wifi_duration_adjust = -10; + else + wifi_duration_adjust = 0; + } else if (coex_sta->scan_ap_num <= 40) { + if (coex_sta->a2dp_bit_pool >= 45) + wifi_duration_adjust = -15; + else if (coex_sta->a2dp_bit_pool >= 35) + wifi_duration_adjust = -10; + else + wifi_duration_adjust = -5; + } else { + if (coex_sta->a2dp_bit_pool >= 45) + wifi_duration_adjust = -15; + else if (coex_sta->a2dp_bit_pool >= 35) + wifi_duration_adjust = -10; + else + wifi_duration_adjust = -10; + } + + if ((bt_link_info->slave_role) && (bt_link_info->a2dp_exist)) + /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */ + tdma_byte4_modify = 0x1; + if (turn_on) { switch (type) { case 1: default: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, 0x90 | tdma_byte4_modify); break; case 2: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, - 0x12, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x2d, + 0x03, 0xf1, 0x90 | tdma_byte4_modify); break; case 3: - /* This call breaks BT when wireless is active - - * comment it out for now until a better fix is found: - * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, - * 0x3, 0xf1, 0x90); - */ + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, + 0x90 | tdma_byte4_modify); break; case 4: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, - 0x03, 0xf1, 0x90); + 0x03, 0xf1, + 0x90 | tdma_byte4_modify); break; case 5: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x3c, + 0x3, 0x70, 0x90 | tdma_byte4_modify); break; case 6: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, - 0x12, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x2d, + 0x3, 0x70, 0x90 | tdma_byte4_modify); break; case 7: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, - 0x3, 0x70, 0x90); + 0x3, 0x70, + 0x90 | tdma_byte4_modify); break; case 8: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10, - 0x3, 0x70, 0x90); + 0x3, 0x70, + 0x90 | tdma_byte4_modify); break; case 9: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x3c + wifi_duration_adjust, + 0x03, 0xf1, 0x90 | tdma_byte4_modify); break; case 10: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, - 0x12, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x2d, + 0x03, 0xf1, 0x90 | tdma_byte4_modify); break; case 11: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, - 0xa, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, + 0x90 | tdma_byte4_modify); break; case 12: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, - 0x5, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, + 0x90 | tdma_byte4_modify); break; case 13: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x3c, + 0x3, 0x70, 0x90 | tdma_byte4_modify); break; case 14: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, - 0x12, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x2d, + 0x3, 0x70, 0x90 | tdma_byte4_modify); break; case 15: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, - 0xa, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, + 0x90 | tdma_byte4_modify); break; case 16: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, - 0x5, 0x60, 0x90); + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x3, 0x70, + 0x90 | tdma_byte4_modify); break; case 17: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x90); break; case 18: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, - 0x5, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5, + 0xe1, 0x90); break; case 19: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, @@ -1370,9 +1374,63 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, 0x03, 0x70, 0x90); break; + + case 23: + case 123: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35, + 0x03, 0x71, 0x10); + break; case 71: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xe3, 0x3c + wifi_duration_adjust, + 0x03, 0xf1, 0x90); + break; + case 101: + case 105: + case 113: + case 171: + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xd3, 0x3a + wifi_duration_adjust, + 0x03, 0x70, 0x50 | tdma_byte4_modify); + break; + case 102: + case 106: + case 110: + case 114: + btc8723b2ant_set_fw_ps_tdma( + btcoexist, 0xd3, 0x2d + wifi_duration_adjust, + 0x03, 0x70, 0x50 | tdma_byte4_modify); + break; + case 103: + case 107: + case 111: + case 115: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c, + 0x03, 0x70, + 0x50 | tdma_byte4_modify); + break; + case 104: + case 108: + case 112: + case 116: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10, + 0x03, 0x70, + 0x50 | tdma_byte4_modify); + break; + case 109: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, + 0x90 | tdma_byte4_modify); + break; + case 121: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, + 0x90 | tdma_byte4_modify); + break; + case 22: + case 122: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35, + 0x03, 0x71, 0x11); break; } } else { @@ -1398,62 +1456,202 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } +static void btc8723b2ant_ps_tdma_check_for_power_save_state( + struct btc_coexist *btcoexist, bool new_ps_state) +{ + u8 lps_mode = 0x0; + + btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); + + if (lps_mode) { + /* already under LPS state */ + if (new_ps_state) { + /* keep state under LPS, do nothing. */ + } else { + /* will leave LPS state, turn off psTdma first */ + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } + } else { + /* NO PS state */ + if (new_ps_state) { + /* will enter LPS state, turn off psTdma first */ + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } else { + /* keep state under NO PS state, do nothing. */ + } + } +} + +static void btc8723b2ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, u8 rpwm_val) +{ + bool low_pwr_disable = false; + + switch (ps_type) { + case BTC_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + coex_sta->force_lps_on = false; + break; + case BTC_PS_LPS_ON: + btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist, + true); + btc8723b2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power */ + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + /* power save must executed before psTdma */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + coex_sta->force_lps_on = true; + break; + case BTC_PS_LPS_OFF: + btc8723b2ant_ps_tdma_check_for_power_save_state(btcoexist, + false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + coex_sta->force_lps_on = false; + break; + default: + break; + } +} + static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist) { /* fw all off */ + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); /* sw all off */ - btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false); /* hw all off */ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) { /* force to reset coex mechanism*/ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); - btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0); + + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false); - btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + coex_sta->pop_event_cnt = 0; } static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false; bool low_pwr_disable = true; + bool scan = false, link = false, roam = false; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); - if (wifi_connected) { - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + if (coex_sta->bt_abnormal_scan) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); + } else if (scan || link || roam) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi link process + BT Inq/Page!!\n"); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + } else if (wifi_connected) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT Inq/Page!!\n"); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); } else { - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); } btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false); +} + +static void btc8723b2ant_action_wifi_link_process(struct btc_coexist + *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + u32 u32tmp; + u8 u8tmpa, u8tmpb; + + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false); + + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765); + u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e); + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n", + u32tmp, u8tmpa, u8tmpb); +} + +static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 ap_num = 0; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset - coex_dm->switch_thres_offset; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset - coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); + + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); - btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + /* office environment */ + if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) && + (coex_sta->a2dp_exist)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n"); + + btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + /* sw all off */ + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, + false); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - coex_dm->need_recover_0x948 = true; - coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948); + return true; + } - btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX, - false, false); + btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18); + return false; } static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) @@ -1472,21 +1670,21 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) low_pwr_disable = false; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, + false, false, 0x8); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi non-connected idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, - false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, - 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, + false); common = true; } else { @@ -1496,23 +1694,23 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, + false, false, 0x8); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi connected + BT non connected-idle!!\n"); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xb); - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, - false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); common = true; } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == @@ -1526,20 +1724,20 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) return false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi connected + BT connected-idle!!\n"); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, + false, false, 0x8); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0xb); - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, - false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); common = true; } else { @@ -1553,36 +1751,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); common = false; } else { - if (bt_hs_on) - return false; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, - 0x1, 0xfffff, 0x0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, - 7); - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 21); - btc8723b2ant_fw_dac_swing_lvl(btcoexist, - NORMAL_EXEC, - 0xb); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, - true); - else - btc8723b2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, - false); - btc8723b2ant_sw_mechanism1(btcoexist, false, - false, false, - false); - btc8723b2ant_sw_mechanism2(btcoexist, false, - false, false, - 0x18); - common = true; + common = + btc8723b2ant_action_wifi_idle_process( + btcoexist); } } } @@ -1590,550 +1764,6 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) return common; } -static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, - s32 result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - /* Set PS TDMA for max interval == 1 */ - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - - if (coex_dm->cur_ps_tdma == 71) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - - if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); - coex_dm->tdma_adj_type = 4; - } - - if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); - coex_dm->tdma_adj_type = 12; - } - - if (result == -1) { - if (coex_dm->cur_ps_tdma == 71) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } - } - } -} - -static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, - s32 result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - /* Set PS TDMA for max interval == 2 */ - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } - } - } -} - -static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, - s32 result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - /* Set PS TDMA for max interval == 3 */ - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 6) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 7) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 14) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 15) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 12) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } - } - } -} - static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool sco_hid, bool tx_pause, u8 max_interval) @@ -2157,34 +1787,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); - coex_dm->tdma_adj_type = 13; + coex_dm->ps_tdma_du_adj_type = 13; } else if (max_interval == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); - coex_dm->tdma_adj_type = 14; + coex_dm->ps_tdma_du_adj_type = 14; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); - coex_dm->tdma_adj_type = 15; + coex_dm->ps_tdma_du_adj_type = 15; } } else { if (max_interval == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); - coex_dm->tdma_adj_type = 9; + coex_dm->ps_tdma_du_adj_type = 9; } else if (max_interval == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); - coex_dm->tdma_adj_type = 10; + coex_dm->ps_tdma_du_adj_type = 10; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - coex_dm->tdma_adj_type = 11; + coex_dm->ps_tdma_du_adj_type = 11; } } } else { @@ -2193,34 +1833,44 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - coex_dm->tdma_adj_type = 5; + coex_dm->ps_tdma_du_adj_type = 5; } else if (max_interval == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); - coex_dm->tdma_adj_type = 6; + coex_dm->ps_tdma_du_adj_type = 6; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); - coex_dm->tdma_adj_type = 7; + coex_dm->ps_tdma_du_adj_type = 7; } } else { if (max_interval == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); - coex_dm->tdma_adj_type = 1; + coex_dm->ps_tdma_du_adj_type = 1; } else if (max_interval == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; + coex_dm->ps_tdma_du_adj_type = 2; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; } else { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - coex_dm->tdma_adj_type = 3; + coex_dm->ps_tdma_du_adj_type = 3; } } } @@ -2234,6 +1884,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, } else { /*accquire the BT TRx retry count from BT_Info byte2*/ retry_count = coex_sta->bt_retry_cnt; + + if ((coex_sta->low_priority_tx) > 1050 || + (coex_sta->low_priority_rx) > 1250) + retry_count++; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], retry_count = %d\n", retry_count); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -2250,6 +1905,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, dn = 0; if (up >= n) { + /* if retry count during continuous n*2 + * seconds is 0, enlarge WiFi duration + */ wait_count = 0; n = 3; up = 0; @@ -2266,12 +1924,20 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, up = 0; if (dn == 2) { + /* if continuous 2 retry count(every 2 + * seconds) >0 and < 3, reduce WiFi duration + */ if (wait_count <= 2) + /* avoid loop between the two levels */ m++; else m = 1; if (m >= 20) + /* maximum of m = 20 ' will recheck if + * need to adjust wifi duration in + * maximum time interval 120 seconds + */ m = 20; n = 3 * m; @@ -2282,42 +1948,793 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Decrease wifi duration for retry_counter<3!!\n"); } - } else { - if (wait_count == 1) - m++; - else - m = 1; - - if (m >= 20) - m = 20; - - n = 3 * m; - up = 0; - dn = 0; - wait_count = 0; - result = -1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Decrease wifi duration for retry_counter>3!!\n"); + } else { + /* retry count > 3, once retry count > 3, to reduce + * WiFi duration + */ + if (wait_count == 1) + /* to avoid loop between the two levels */ + m++; + else + m = 1; + + if (m >= 20) + /* maximum of m = 20 ' will recheck if need to + * adjust wifi duration in maximum time interval + * 120 seconds + */ + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Decrease wifi duration for retry_counter>3!!\n"); + } + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 71) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = + 5; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = + 13; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = + 71; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = + 9; + } + } + } + } else if (max_interval == 2) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } + } + } + } else if (max_interval == 3) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } + } + } } - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], max Interval = %d\n", max_interval); - if (max_interval == 1) - set_tdma_int1(btcoexist, tx_pause, result); - else if (max_interval == 2) - set_tdma_int2(btcoexist, tx_pause, result); - else if (max_interval == 3) - set_tdma_int3(btcoexist, tx_pause, result); } - /*if current PsTdma not match with the recorded one (when scan, dhcp..), - *then we have to adjust it back to the previous recorded one. + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); + + /* if current PsTdma not match with the recorded one (scan, dhcp, ...), + * then we have to adjust it back to the previous recorded one. */ - if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { + if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); @@ -2325,7 +2742,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (!scan && !link && !roam) btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, - coex_dm->tdma_adj_type); + coex_dm->ps_tdma_du_adj_type); else RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); @@ -2335,58 +2752,55 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, /* SCO only or SCO+PAN(HS) */ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state; + u8 wifi_rssi_state, bt_rssi_state; u32 wifi_bw; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state( + btcoexist, 2, BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset, + 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - /*for SCO quality at 11b/g mode*/ if (BTC_WIFI_BW_LEGACY == wifi_bw) - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2); - else /*for SCO quality & wifi performance balance at 11n mode*/ - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8); + /* for SCO quality at 11b/g mode */ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + else + /* for SCO quality & wifi performance balance at 11n mode */ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8); - /*for voice quality */ + /* for voice quality */ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - true, 0x4); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - true, 0x4); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - true, 0x4); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - true, 0x4); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } } @@ -2395,26 +2809,32 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) { u8 wifi_rssi_state, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/ - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); - else /*for HID quality & wifi performance balance at 11n mode*/ - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9); + if (wifi_bw == BTC_WIFI_BW_LEGACY) + /* for HID at 11b/g mode */ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + else + /* for HID quality & wifi performance balance at 11n mode */ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9); + + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) @@ -2426,44 +2846,36 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } } -/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ +/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) { u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 ap_num = 0; + u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, - 1, 2, 40, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); @@ -2474,35 +2886,40 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) 0x0); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); /* sw mechanism */ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - true, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - true, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } return; } btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) @@ -2516,104 +2933,116 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } } static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2); /* sw mechanism */ - btcoexist->btc_get(btcoexist, - BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } } static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) @@ -2626,109 +3055,109 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } } -/*PAN(HS) only*/ +/* PAN(HS) only */ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); - - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } } -/*PAN(EDR)+A2DP*/ +/* PAN(EDR) + A2DP */ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + else + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12); if (BTC_WIFI_BW_HT40 == wifi_bw) btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3); @@ -2736,74 +3165,80 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) btc8723b2ant_tdma_duration_adjust(btcoexist, false, false, 3); } else { - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); - btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3); + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, false, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, false, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, false, + false, false); } } } static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; - - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { if (BTC_WIFI_BW_HT40 == wifi_bw) { btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); } else { btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); } btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); } else { btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); @@ -2813,54 +3248,61 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } } -/* HID+A2DP+PAN(EDR) */ +/* HID + A2DP + PAN(EDR) */ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { @@ -2878,94 +3320,148 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } } static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 ap_num = 0; + u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0); + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + tmp, 0); + tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - + coex_dm->switch_thres_offset; + bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 3, tmp, 37); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, 0x5); btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (btc8723b_need_dec_pwr(btcoexist)) - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); - else - btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + if (wifi_bw == BTC_WIFI_BW_LEGACY) { + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + else if (BTC_RSSI_MEDIUM(bt_rssi_state)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + } else { + /* only 802.11N mode we have to dec bt power to 4 degree */ + if (BTC_RSSI_HIGH(bt_rssi_state)) { + /* need to check ap Number of Not */ + if (ap_num < 10) + btc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, 4); + else + btc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, 2); + } else if (BTC_RSSI_MEDIUM(bt_rssi_state)) { + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + } else { + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + } + } - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) - btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); - else - btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14); + btc8723b2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } + + if (BTC_RSSI_HIGH(bt_rssi_state)) { + if (ap_num < 10) + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + false, 1); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + false, 3); + } else { + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 18); + btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); + btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); + btcoexist->btc_write_4byte(btcoexist, 0x430, 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, 0x01010000); + + if (ap_num < 10) + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + true, 1); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + true, 3); + } /* sw mechanism */ if (BTC_WIFI_BW_HT40 == wifi_bw) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, true, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, true, true, + false, false); } } else { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, true, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } else { - btc8723b2ant_sw_mechanism1(btcoexist, false, true, - false, false); - btc8723b2ant_sw_mechanism2(btcoexist, false, false, - false, 0x18); + btc8723b2ant_sw_mechanism(btcoexist, false, true, + false, false); } } } +static void btc8723b2ant_action_wifi_multi_port(struct btc_coexist *btcoexist) +{ + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + /* sw all off */ + btc8723b2ant_sw_mechanism(btcoexist, false, false, false, false); + + /* hw all off */ + btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + + btc8723b2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); +} + static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 algorithm = 0; + u32 num_of_wifi_link = 0; + u32 wifi_link_status = 0; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool miracast_plus_bt = false; + bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], RunCoexistMechanism()===>\n"); @@ -2989,14 +3485,46 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) "[BTCoex], BT is under inquiry/page scan !!\n"); btc8723b2ant_action_bt_inquiry(btcoexist); return; - } else { - if (coex_dm->need_recover_0x948) { - coex_dm->need_recover_0x948 = false; - btcoexist->btc_write_2byte(btcoexist, 0x948, - coex_dm->backup_0x948); - } } + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], WiFi is under Link Process !!\n"); + btc8723b2ant_action_wifi_link_process(btcoexist); + return; + } + + /* for P2P */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + + if ((num_of_wifi_link >= 2) || + (wifi_link_status & WIFI_P2P_GO_CONNECTED)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n", + num_of_wifi_link, wifi_link_status); + + if (bt_link_info->bt_link_exist) + miracast_plus_bt = true; + else + miracast_plus_bt = false; + + btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT, + &miracast_plus_bt); + btc8723b2ant_action_wifi_multi_port(btcoexist); + + return; + } + + miracast_plus_bt = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT, + &miracast_plus_bt); + coex_dm->cur_algorithm = algorithm; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Algorithm = %d\n", @@ -3077,19 +3605,37 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist) { + bool is_in_mp_mode = false; + u8 h2c_parameter[2] = {0}; + u32 fw_ver = 0; + /* set wlan_act to low */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); - /* Force GNT_BT to High */ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); - /* BT select s0/s1 is controlled by BT */ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); + + /* WiFi standby while GNT_BT 0 -> 1 */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + if (fw_ver >= 0x180000) { + /* Use H2C to set GNT_BT to HIGH */ + h2c_parameter[0] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x6E, 1, h2c_parameter); + } else { + btcoexist->btc_write_1byte(btcoexist, 0x765, 0x18); + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE, + &is_in_mp_mode); + if (!is_in_mp_mode) + /* BT select s0/s1 is controlled by BT */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); + else + /* BT select s0/s1 is controlled by WiFi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); } /********************************************************************* - * work around function start with wa_btc8723b2ant_ - *********************************************************************/ -/********************************************************************* - * extern function start with EXbtc8723b2ant_ + * extern function start with ex_btc8723b2ant_ *********************************************************************/ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) { @@ -3107,19 +3653,90 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) u8tmp |= 0x5; btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); - /*Antenna config */ + /* Antenna config */ btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, true, false); + coex_sta->dis_ver_info_cnt = 0; + /* PTA parameter */ - btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0); + btc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); /* Enable counter statistics */ - /*0x76e[3] =1, WLAN_Act control by PTA*/ - btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + /* 0x76e[3] = 1, WLAN_ACT controlled by PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); } +void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u16 u16tmp = 0x0; + u32 value = 0; + + btcoexist->btc_write_1byte(btcoexist, 0x67, 0x20); + + /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly */ + u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x2); + btcoexist->btc_write_2byte(btcoexist, 0x2, u16tmp | BIT0 | BIT1); + + btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0); + + if (btcoexist->chip_interface == BTC_INTF_USB) { + /* fixed at S0 for USB interface */ + board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT; + } else { + /* for PCIE and SDIO interface, we check efuse 0xc3[6] */ + if (board_info->single_ant_path == 0) { + /* set to S1 */ + board_info->btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT; + } else if (board_info->single_ant_path == 1) { + /* set to S0 */ + board_info->btdm_ant_pos = BTC_ANTENNA_AT_AUX_PORT; + } + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ANTPOSREGRISTRY_CTRL, + &value); + } +} + +void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u8 u8tmp = 0x4; /* Set BIT2 by default since it's 2ant case */ + + /** + * S0 or S1 setting and Local register setting(By this fw can get + * ant number, S0/S1, ... info) + * + * Local setting bit define + * BIT0: "0" : no antenna inverse; "1" : antenna inverse + * BIT1: "0" : internal switch; "1" : external switch + * BIT2: "0" : one antenna; "1" : two antennas + * + * NOTE: here default all internal switch and 1-antenna ==> BIT1=0 and + * BIT2 = 0 + */ + if (btcoexist->chip_interface == BTC_INTF_USB) { + /* fixed at S0 for USB interface */ + u8tmp |= 0x1; /* antenna inverse */ + btcoexist->btc_write_local_reg_1byte(btcoexist, 0xfe08, u8tmp); + } else { + /* for PCIE and SDIO interface, we check efuse 0xc3[6] */ + if (board_info->single_ant_path == 0) { + } else if (board_info->single_ant_path == 1) { + /* set to S0 */ + u8tmp |= 0x1; /* antenna inverse */ + } + + if (btcoexist->chip_interface == BTC_INTF_PCI) + btcoexist->btc_write_local_reg_1byte(btcoexist, 0x384, + u8tmp); + else if (btcoexist->chip_interface == BTC_INTF_SDIO) + btcoexist->btc_write_local_reg_1byte(btcoexist, 0x60, + u8tmp); + } +} + void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3215,7 +3832,6 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ? "uplink" : "downlink"))); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, bt_link_info->hid_exist, @@ -3265,7 +3881,7 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) ps_tdma_case, coex_dm->auto_tdma_adjust); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", - "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl, coex_dm->cur_ignore_wlan_act); /* Hw setting */ @@ -3396,6 +4012,12 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; + u32 u32tmp; + u8 u8tmpa, u8tmpb; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765); + u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e); if (BTC_SCAN_START == type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3403,6 +4025,12 @@ void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) else if (BTC_SCAN_FINISH == type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCAN FINISH notify\n"); + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, + &coex_sta->scan_ap_num); + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n", + u32tmp, u8tmpa, u8tmpb); } void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) @@ -3424,6 +4052,7 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_central_chnl; + u8 ap_num = 0; if (BTC_MEDIA_CONNECT == type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3441,10 +4070,16 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, h2c_parameter[1] = wifi_central_chnl; btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_HT40 == wifi_bw) + if (wifi_bw == BTC_WIFI_BW_HT40) { h2c_parameter[2] = 0x30; - else - h2c_parameter[2] = 0x20; + } else { + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, + &ap_num); + if (ap_num < 10) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } } coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; @@ -3492,7 +4127,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; if (i == 1) bt_info = tmpbuf[i]; - if (i == length-1) + if (i == length - 1) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "0x%02x]\n", tmpbuf[i]); else @@ -3507,17 +4142,30 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, } if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) { - coex_sta->bt_retry_cnt = /* [3:0]*/ + coex_sta->bt_retry_cnt = coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + if (coex_sta->bt_retry_cnt >= 1) + coex_sta->pop_event_cnt++; + coex_sta->bt_rssi = coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; - coex_sta->bt_info_ext = - coex_sta->bt_info_c2h[rsp_source][4]; + coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4]; + + if (coex_sta->bt_info_c2h[rsp_source][2] & 0x20) + coex_sta->c2h_bt_remote_name_req = true; + else + coex_sta->c2h_bt_remote_name_req = false; + + if (coex_sta->bt_info_c2h[rsp_source][1] == 0x49) + coex_sta->a2dp_bit_pool = + coex_sta->bt_info_c2h[rsp_source][6]; + else + coex_sta->a2dp_bit_pool = 0; /* Here we need to resend some wifi info to BT - because bt is reset and loss of the info. + * because BT is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3552,20 +4200,21 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, #endif } - /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + /* check BIT2 first ==> check if bt is under inquiry or page scan */ if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE) coex_sta->c2h_bt_inquiry_page = true; else coex_sta->c2h_bt_inquiry_page = false; - /* set link exist status*/ if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { + /* set link exist status */ coex_sta->bt_link_exist = false; coex_sta->pan_exist = false; coex_sta->a2dp_exist = false; coex_sta->hid_exist = false; coex_sta->sco_exist = false; - } else { /* connection exists */ + } else { + /* connection exists */ coex_sta->bt_link_exist = true; if (bt_info & BT_INFO_8723B_2ANT_B_FTP) coex_sta->pan_exist = true; @@ -3583,6 +4232,16 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->sco_exist = true; else coex_sta->sco_exist = false; + + if ((!coex_sta->hid_exist) && + (!coex_sta->c2h_bt_inquiry_page) && + (!coex_sta->sco_exist)) { + if (coex_sta->high_priority_tx + + coex_sta->high_priority_rx >= 160) { + coex_sta->hid_exist = true; + bt_info = bt_info | 0x28; + } + } } btc8723b2ant_update_bt_link_info(btcoexist); @@ -3640,46 +4299,67 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) ex_btc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } +void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n"); + + if (pnp_state == BTC_WIFI_PNP_SLEEP) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to SLEEP\n"); + + /* Driver do not leave IPS/LPS when driver is going to sleep, so + * BTCoexistence think wifi is still under IPS/LPS + * + * BT should clear UnderIPS/UnderLPS state to avoid mismatch + * state after wakeup. + */ + coex_sta->under_ips = false; + coex_sta->under_lps = false; + } else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Pnp notify to WAKE UP\n"); + ex_btc8723b2ant_init_hwconfig(btcoexist); + btc8723b2ant_init_coex_dm(btcoexist); + btc8723b2ant_query_bt_info(btcoexist); + } +} + void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - struct btc_board_info *board_info = &btcoexist->board_info; - struct btc_stack_info *stack_info = &btcoexist->stack_info; - static u8 dis_ver_info_cnt; - u32 fw_ver = 0, bt_patch_ver = 0; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], ==========================Periodical===========================\n"); - if (dis_ver_info_cnt <= 5) { - dis_ver_info_cnt += 1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], ****************************************************************\n"); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", - board_info->pg_ant_num, - board_info->btdm_ant_num, - board_info->btdm_ant_pos); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT stack/ hci ext ver = %s / %d\n", - stack_info->profile_notified ? "Yes" : "No", - stack_info->hci_version); - btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, - &bt_patch_ver); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", - glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, - fw_ver, bt_patch_ver, bt_patch_ver); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], ****************************************************************\n"); + if (coex_sta->dis_ver_info_cnt <= 5) { + coex_sta->dis_ver_info_cnt += 1; + if (coex_sta->dis_ver_info_cnt == 3) { + /* Antenna config to set 0x765 = 0x0 (GNT_BT control by + * PTA) after initial + */ + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Set GNT_BT control by PTA\n"); + btc8723b2ant_set_ant_path( + btcoexist, BTC_ANT_WIFI_AT_MAIN, false, false); + } } #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) btc8723b2ant_query_bt_info(btcoexist); - btc8723b2ant_monitor_bt_ctr(btcoexist); - btc8723b2ant_monitor_bt_enable_disable(btcoexist); #else + btc8723b2ant_monitor_bt_ctr(btcoexist); + btc8723b2ant_monitor_wifi_ctr(btcoexist); + + /* for some BT speakers that High-Priority pkts appear before + * playing, this will cause HID exist + */ + if ((coex_sta->high_priority_tx + coex_sta->high_priority_rx < 50) && + (bt_link_info->hid_exist)) + bt_link_info->hid_exist = false; + if (btc8723b2ant_is_wifi_status_changed(btcoexist) || coex_dm->auto_tdma_adjust) btc8723b2ant_run_coexist_mechanism(btcoexist); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h index 567f354caf95c5e6f1449c1f9296ec9877646fd0..18a35c7faba92a1e8a0d0f858960329fc4a30462 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h @@ -41,6 +41,11 @@ #define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2 +/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */ +#define BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES 42 +/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */ +#define BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES 46 + enum BT_INFO_SRC_8723B_2ANT { BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0, BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1, @@ -75,8 +80,8 @@ enum BT_8723B_2ANT_COEX_ALGO { struct coex_dm_8723b_2ant { /* fw mechanism */ - bool pre_dec_bt_pwr; - bool cur_dec_bt_pwr; + bool pre_dec_bt_pwr_lvl; + bool cur_dec_bt_pwr_lvl; u8 pre_fw_dac_swing_lvl; u8 cur_fw_dac_swing_lvl; bool cur_ignore_wlan_act; @@ -84,7 +89,7 @@ struct coex_dm_8723b_2ant { u8 pre_ps_tdma; u8 cur_ps_tdma; u8 ps_tdma_para[5]; - u8 tdma_adj_type; + u8 ps_tdma_du_adj_type; bool reset_tdma_adjust; bool auto_tdma_adjust; bool pre_ps_tdma_on; @@ -122,8 +127,13 @@ struct coex_dm_8723b_2ant { u8 bt_status; u8 wifi_chnl_info[3]; - bool need_recover_0x948; - u16 backup_0x948; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; + + bool is_switch_to_1dot5_ant; + u8 switch_thres_offset; }; struct coex_sta_8723b_2ant { @@ -132,6 +142,7 @@ struct coex_sta_8723b_2ant { bool a2dp_exist; bool hid_exist; bool pan_exist; + bool bt_abnormal_scan; bool under_lps; bool under_ips; @@ -140,14 +151,33 @@ struct coex_sta_8723b_2ant { u32 low_priority_tx; u32 low_priority_rx; u8 bt_rssi; + bool bt_tx_rx_mask; u8 pre_bt_rssi_state; u8 pre_wifi_rssi_state[4]; bool c2h_bt_info_req_sent; u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10]; u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX]; bool c2h_bt_inquiry_page; + bool c2h_bt_remote_name_req; u8 bt_retry_cnt; u8 bt_info_ext; + u32 pop_event_cnt; + u8 scan_ap_num; + + u32 crc_ok_cck; + u32 crc_ok_11g; + u32 crc_ok_11n; + u32 crc_ok_11n_agg; + + u32 crc_err_cck; + u32 crc_err_11g; + u32 crc_err_11n; + u32 crc_err_11n_agg; + bool force_lps_on; + + u8 dis_ver_info_cnt; + + u8 a2dp_bit_pool; }; /********************************************************************* diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 8b689ed9a629bce30472d2b61adb562d24c071af..5e9f3b0f7a2583e36bc5c747325a52fbfb73141a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -23,7 +23,7 @@ * *****************************************************************************/ -/*============================================================ +/************************************************************** * Description: * * This file is for RTL8821A Co-exist mechanism @@ -31,21 +31,21 @@ * History * 2012/11/15 Cosa first check in. * - *============================================================ -*/ -/*============================================================ + **************************************************************/ + +/************************************************************** * include files - *============================================================ - */ + **************************************************************/ #include "halbt_precomp.h" -/*============================================================ +/************************************************************** * Global variables, these are static variables - *============================================================ - */ + **************************************************************/ static struct coex_dm_8821a_1ant glcoex_dm_8821a_1ant; static struct coex_dm_8821a_1ant *coex_dm = &glcoex_dm_8821a_1ant; static struct coex_sta_8821a_1ant glcoex_sta_8821a_1ant; static struct coex_sta_8821a_1ant *coex_sta = &glcoex_sta_8821a_1ant; +static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, + u8 wifi_status); static const char *const glbt_info_src_8821a_1ant[] = { "BT Info[wifi fw]", @@ -53,22 +53,21 @@ static const char *const glbt_info_src_8821a_1ant[] = { "BT Info[bt auto report]", }; -static u32 glcoex_ver_date_8821a_1ant = 20130816; -static u32 glcoex_ver_8821a_1ant = 0x41; +static u32 glcoex_ver_date_8821a_1ant = 20130816; +static u32 glcoex_ver_8821a_1ant = 0x41; -/*============================================================ +/************************************************************** * local function proto type if needed * - * local function start with halbtc8821a1ant_ - *============================================================ - */ -static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) + * local function start with btc8821a1ant_ + **************************************************************/ +static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; - long bt_rssi = 0; - u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + long bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; bt_rssi = coex_sta->bt_rssi; @@ -150,9 +149,9 @@ static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist, return bt_rssi_state; } -static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, - u8 index, u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) +static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; long wifi_rssi = 0; @@ -165,8 +164,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, BTC_RSSI_STATE_LOW) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { - if (wifi_rssi >= - (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + if (wifi_rssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], wifi RSSI state switch to High\n"); @@ -197,8 +196,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, BTC_RSSI_STATE_LOW) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { - if (wifi_rssi >= - (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + if (wifi_rssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], wifi RSSI state switch to Medium\n"); @@ -211,9 +210,8 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, BTC_RSSI_STATE_MEDIUM) || (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM)) { - if (wifi_rssi >= - (rssi_thresh1 + - BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + if (wifi_rssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { wifi_rssi_state = BTC_RSSI_STATE_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], wifi RSSI state switch to High\n"); @@ -243,14 +241,14 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, return wifi_rssi_state; } -static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist, - bool force_exec, u32 dis_rate_mask) +static void btc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_rate_mask) { coex_dm->cur_ra_mask = dis_rate_mask; if (force_exec || (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask)) { - btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_RAMASK, &coex_dm->cur_ra_mask); } coex_dm->pre_ra_mask = coex_dm->cur_ra_mask; @@ -259,14 +257,14 @@ static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist, static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist, bool force_exec, u8 type) { - bool wifi_under_b_mode = false; + bool wifi_under_b_mode = false; coex_dm->cur_arfr_type = type; if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) { switch (coex_dm->cur_arfr_type) { - case 0: /* normal mode*/ + case 0: /* normal mode */ btcoexist->btc_write_4byte(btcoexist, 0x430, coex_dm->backup_arfr_cnt1); btcoexist->btc_write_4byte(btcoexist, 0x434, @@ -296,19 +294,19 @@ static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist, coex_dm->pre_arfr_type = coex_dm->cur_arfr_type; } -static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8821a1ant_retry_limit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { coex_dm->cur_retry_limit_type = type; if (force_exec || (coex_dm->pre_retry_limit_type != coex_dm->cur_retry_limit_type)) { switch (coex_dm->cur_retry_limit_type) { - case 0: /* normal mode*/ + case 0: /* normal mode */ btcoexist->btc_write_2byte(btcoexist, 0x42a, coex_dm->backup_retry_limit); break; - case 1: /* retry limit = 8*/ + case 1: /* retry limit = 8 */ btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); break; default: @@ -318,19 +316,19 @@ static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist, coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type; } -static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { coex_dm->cur_ampdu_time_type = type; if (force_exec || (coex_dm->pre_ampdu_time_type != coex_dm->cur_ampdu_time_type)) { switch (coex_dm->cur_ampdu_time_type) { - case 0: /* normal mode*/ + case 0: /* normal mode */ btcoexist->btc_write_1byte(btcoexist, 0x456, coex_dm->backup_ampdu_max_time); break; - case 1: /* AMPDU timw = 0x38 * 32us*/ + case 1: /* AMPDU time = 0x38 * 32us */ btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); break; default: @@ -341,88 +339,85 @@ static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist, coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type; } -static void halbtc8821a1ant_limited_tx(struct btc_coexist *btcoexist, - bool force_exec, u8 ra_mask_type, - u8 arfr_type, u8 retry_limit_type, - u8 ampdu_time_type) +static void btc8821a1ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_mask_type, + u8 arfr_type, u8 retry_limit_type, + u8 ampdu_time_type) { switch (ra_mask_type) { - case 0: /* normal mode*/ - halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0); + case 0: /* normal mode */ + btc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0); break; - case 1: /* disable cck 1/2*/ - halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, - 0x00000003); + case 1: /* disable cck 1/2 */ + btc8821a1ant_update_ra_mask(btcoexist, force_exec, + 0x00000003); break; - case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/ - halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, - 0x0001f1f7); + case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */ + btc8821a1ant_update_ra_mask(btcoexist, force_exec, + 0x0001f1f7); break; default: break; } btc8821a1ant_auto_rate_fb_retry(btcoexist, force_exec, arfr_type); - halbtc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type); - halbtc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type); + btc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type); + btc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type); } -static void halbtc8821a1ant_limited_rx(struct btc_coexist *btcoexist, - bool force_exec, bool rej_ap_agg_pkt, - bool bt_ctrl_agg_buf_size, - u8 agg_buf_size) +static void btc8821a1ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool bt_ctrl_agg_buf_size, u8 agg_buf_size) { bool reject_rx_agg = rej_ap_agg_pkt; bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; u8 rx_agg_size = agg_buf_size; - /*============================================*/ - /* Rx Aggregation related setting*/ - /*============================================*/ + /* Rx Aggregation related setting */ btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &reject_rx_agg); - /* decide BT control aggregation buf size or not*/ + /* decide BT control aggregation buf size or not */ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &bt_ctrl_rx_agg_size); - /* aggregation buf size, only work when BT control Rx agg size.*/ + /* aggregation buf size, only work when BT control Rx agg size */ btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); - /* real update aggregation setting*/ + /* real update aggregation setting */ btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); } -static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +static void btc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { - u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp; - u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; reg_hp_tx_rx = 0x770; reg_lp_tx_rx = 0x774; u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx); reg_hp_tx = u4_tmp & MASKLWORD; - reg_hp_rx = (u4_tmp & MASKHWORD)>>16; + reg_hp_rx = (u4_tmp & MASKHWORD) >> 16; u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx); reg_lp_tx = u4_tmp & MASKLWORD; - reg_lp_rx = (u4_tmp & MASKHWORD)>>16; + reg_lp_rx = (u4_tmp & MASKHWORD) >> 16; coex_sta->high_priority_tx = reg_hp_tx; coex_sta->high_priority_rx = reg_hp_rx; coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; - /* reset counter*/ + /* reset counter */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); } -static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) +static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; - h2c_parameter[0] |= BIT0; /* trigger*/ + h2c_parameter[0] |= BIT0; /* trigger */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", @@ -431,10 +426,43 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } -static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) +bool btc8821a1ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy = true; + static bool pre_under_4way = true; + static bool pre_bt_hs_on = true; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + } + + return false; +} + +static void btc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) { struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool bt_hs_on = false; + bool bt_hs_on = false; btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); @@ -444,13 +472,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) bt_link_info->pan_exist = coex_sta->pan_exist; bt_link_info->hid_exist = coex_sta->hid_exist; - /* work around for HS mode.*/ + /* work around for HS mode */ if (bt_hs_on) { bt_link_info->pan_exist = true; bt_link_info->bt_link_exist = true; } - /* check if Sco only*/ + /* check if Sco only */ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && !bt_link_info->pan_exist && @@ -459,7 +487,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) else bt_link_info->sco_only = false; - /* check if A2dp only*/ + /* check if A2dp only */ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && !bt_link_info->pan_exist && @@ -468,7 +496,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) else bt_link_info->a2dp_only = false; - /* check if Pan only*/ + /* check if Pan only */ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && bt_link_info->pan_exist && @@ -477,7 +505,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) else bt_link_info->pan_only = false; - /* check if Hid only*/ + /* check if Hid only */ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && !bt_link_info->pan_exist && @@ -487,13 +515,13 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) bt_link_info->hid_only = false; } -static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) +static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool bt_hs_on = false; - u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED; - u8 num_of_diff_profile = 0; + bool bt_hs_on = false; + u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED; + u8 num_of_diff_profile = 0; btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); @@ -605,7 +633,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); algorithm = BT_8821A_1ANT_COEX_ALGO_HID; } else if (bt_link_info->hid_exist && - bt_link_info->pan_exist) { + bt_link_info->pan_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -618,7 +646,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; } } else if (bt_link_info->pan_exist && - bt_link_info->a2dp_exist) { + bt_link_info->a2dp_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -670,53 +698,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) return algorithm; } -static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist, - bool enable_auto_report) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[1] = {0}; - - h2c_parameter[0] = 0; - - if (enable_auto_report) - h2c_parameter[0] |= BIT0; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); - - btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); -} - -static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist, - bool force_exec, - bool enable_auto_report) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), ((enable_auto_report) ? - "Enabled" : "Disabled")); - coex_dm->cur_bt_auto_report = enable_auto_report; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); - - if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) - return; - } - halbtc8821a1ant_set_bt_auto_report(btcoexist, coex_dm->cur_bt_auto_report); - - coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; -} - -static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, - bool low_penalty_ra) +static void btc8821a1ant_set_sw_penalty_tx_rate(struct btc_coexist *btcoexist, + bool low_penalty_ra) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; @@ -725,11 +708,11 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, if (low_penalty_ra) { h2c_parameter[1] |= BIT0; - /*normal rate except MCS7/6/5, OFDM54/48/36*/ + /* normal rate except MCS7/6/5, OFDM54/48/36 */ h2c_parameter[2] = 0x00; - h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/ - h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/ - h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ + h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */ } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -739,8 +722,8 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } -static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist, - bool force_exec, bool low_penalty_ra) +static void btc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) { coex_dm->cur_low_penalty_ra = low_penalty_ra; @@ -748,14 +731,15 @@ static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist, if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) return; } - btc8821a1ant_set_sw_pen_tx_rate(btcoexist, coex_dm->cur_low_penalty_ra); + btc8821a1ant_set_sw_penalty_tx_rate(btcoexist, + coex_dm->cur_low_penalty_ra); coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; } -static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, - u32 val0x6c0, u32 val0x6c4, - u32 val0x6c8, u8 val0x6cc) +static void btc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -776,9 +760,9 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } -static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, - bool force_exec, u32 val0x6c0, - u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) +static void btc8821a1ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -798,8 +782,8 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, (coex_dm->pre_val_0x6cc == coex_dm->cur_val_0x6cc)) return; } - halbtc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, - val0x6c8, val0x6cc); + btc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); coex_dm->pre_val_0x6c0 = coex_dm->cur_val_0x6c0; coex_dm->pre_val_0x6c4 = coex_dm->cur_val_0x6c4; @@ -807,42 +791,41 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, coex_dm->pre_val_0x6cc = coex_dm->cur_val_0x6cc; } -static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist, - bool force_exec, u8 type) +static void btc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) { switch (type) { case 0: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, - 0x55555555, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffffff, 0x3); break; case 1: - halbtc8821a1ant_coex_table(btcoexist, force_exec, - 0x55555555, 0x5a5a5a5a, - 0xffffff, 0x3); - break; + btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); + break; case 2: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, - 0x5a5a5a5a, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); break; case 3: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, - 0xaaaaaaaa, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0xaaaaaaaa, 0xffffff, 0x3); break; case 4: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0xffffffff, - 0xffffffff, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); break; case 5: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5fff5fff, - 0x5fff5fff, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0xaaaa5a5a, 0xffffff, 0x3); break; case 6: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55ff55ff, - 0x5a5a5a5a, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaa5a5a, 0xffffff, 0x3); break; case 7: - halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5afa5afa, - 0x5afa5afa, 0xffffff, 0x3); + btc8821a1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, + 0xaaaaaaaa, 0xffffff, 0x3); break; default: break; @@ -853,10 +836,10 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[1] = {0}; + u8 h2c_parameter[1] = {0}; if (enable) - h2c_parameter[0] |= BIT0; /* function enable*/ + h2c_parameter[0] |= BIT0; /* function enable */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", @@ -865,8 +848,8 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); } -static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, - bool force_exec, bool enable) +static void btc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -890,24 +873,40 @@ static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; } -static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, - u8 byte1, u8 byte2, u8 byte3, - u8 byte4, u8 byte5) +static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, + u8 byte2, u8 byte3, u8 byte4, u8 byte5) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5] = {0}; + u8 real_byte1 = byte1, real_byte5 = byte5; + bool ap_enable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + if (byte1 & BIT4 && !(byte1 & BIT5)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], FW for 1Ant AP mode\n"); + real_byte1 &= ~BIT4; + real_byte1 |= BIT5; + + real_byte5 |= BIT5; + real_byte5 &= ~BIT6; + } + } - h2c_parameter[0] = byte1; + h2c_parameter[0] = real_byte1; h2c_parameter[1] = byte2; h2c_parameter[2] = byte3; h2c_parameter[3] = byte4; - h2c_parameter[4] = byte5; + h2c_parameter[4] = real_byte5; - coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[0] = real_byte1; coex_dm->ps_tdma_para[1] = byte2; coex_dm->ps_tdma_para[2] = byte3; coex_dm->ps_tdma_para[3] = byte4; - coex_dm->ps_tdma_para[4] = byte5; + coex_dm->ps_tdma_para[4] = real_byte5; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", @@ -919,18 +918,18 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } -static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist, - u8 lps_val, u8 rpwm_val) +static void btc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist, + u8 lps_val, u8 rpwm_val) { - u8 lps = lps_val; - u8 rpwm = rpwm_val; + u8 lps = lps_val; + u8 rpwm = rpwm_val; btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps); btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); } -static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, - bool force_exec, u8 lps_val, u8 rpwm_val) +static void btc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, + bool force_exec, u8 lps_val, u8 rpwm_val) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -954,33 +953,33 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, return; } } - halbtc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); + btc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); coex_dm->pre_lps = coex_dm->cur_lps; coex_dm->pre_rpwm = coex_dm->cur_rpwm; } -static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, - bool low_penalty_ra) +static void btc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, + bool low_penalty_ra) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); - halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); + btc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } -static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, - u8 ant_pos_type, bool init_hw_cfg, - bool wifi_off) +static void btc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) { struct btc_board_info *board_info = &btcoexist->board_info; u32 u4_tmp = 0; u8 h2c_parameter[2] = {0}; if (init_hw_cfg) { - /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT*/ + /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */ u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); u4_tmp &= ~BIT23; u4_tmp |= BIT24; @@ -990,41 +989,42 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - /*tell firmware "antenna inverse" ==> - * WRONG firmware antenna control code.==>need fw to fix + /* tell firmware "antenna inverse" + * WRONG firmware antenna control code, need fw to fix */ h2c_parameter[0] = 1; h2c_parameter[1] = 1; btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); - /*Main Ant to BT for IPS case 0x4c[23] = 1*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, - 0x1, 0x1); } else { - /*tell firmware "no antenna inverse" ==> - * WRONG firmware antenna control code.==>need fw to fix + /* tell firmware "no antenna inverse" + * WRONG firmware antenna control code, need fw to fix */ h2c_parameter[0] = 0; h2c_parameter[1] = 1; btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); - /*Aux Ant to BT for IPS case 0x4c[23] = 1*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, - 0x1, 0x0); } } else if (wifi_off) { /* 0x4c[24:23] = 00, Set Antenna control - * by BT_RFE_CTRL BT Vendor 0xac = 0xf002 + * by BT_RFE_CTRL BT Vendor 0xac = 0xf002 */ u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); u4_tmp &= ~BIT23; u4_tmp &= ~BIT24; btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp); + + /* 0x765 = 0x18 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); + } else { + /* 0x765 = 0x0 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); } - /* ext switch setting*/ + /* ext switch setting */ switch (ant_pos_type) { case BTC_ANT_PATH_WIFI: + btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, 0x30, 0x1); @@ -1033,6 +1033,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, 0x30, 0x2); break; case BTC_ANT_PATH_BT: + btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, 0x30, 0x2); @@ -1042,6 +1043,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, break; default: case BTC_ANT_PATH_PTA: + btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x66); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, 0x30, 0x1); @@ -1052,8 +1054,8 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, } } -static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, - bool force_exec, bool turn_on, u8 type) +static void btc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 rssi_adjust_val = 0; @@ -1078,185 +1080,189 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, if (turn_on) { switch (type) { default: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1a, - 0x1a, 0x0, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a, + 0x1a, 0x0, 0x50); break; case 1: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x3a, - 0x03, 0x10, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x3a, + 0x03, 0x10, 0x50); rssi_adjust_val = 11; break; case 2: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x2b, - 0x03, 0x10, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x2b, + 0x03, 0x10, 0x50); rssi_adjust_val = 14; break; case 3: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1d, - 0x1d, 0x0, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d, + 0x1d, 0x0, 0x10); break; case 4: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15, - 0x3, 0x14, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x14, 0x0); rssi_adjust_val = 17; break; case 5: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15, - 0x3, 0x11, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15, + 0x3, 0x11, 0x10); break; case 6: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, - 0x3, 0x0, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0x3, 0x0, 0x0); break; case 7: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xc, - 0x5, 0x0, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc, + 0x5, 0x0, 0x0); break; case 8: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25, - 0x3, 0x10, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); break; case 9: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21, - 0x3, 0x10, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x50); rssi_adjust_val = 18; break; case 10: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, - 0xa, 0x0, 0x40); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0xa, 0x0, 0x40); break; case 11: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14, - 0x03, 0x10, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14, + 0x03, 0x10, 0x10); rssi_adjust_val = 20; break; case 12: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x0a, - 0x0a, 0x0, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a, + 0x0a, 0x0, 0x50); break; case 13: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x18, - 0x18, 0x0, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x18, + 0x18, 0x0, 0x10); break; case 14: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21, - 0x3, 0x10, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1e, + 0x3, 0x10, 0x14); break; case 15: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, - 0x3, 0x8, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0x3, 0x8, 0x0); break; case 16: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15, - 0x3, 0x10, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x10, 0x0); rssi_adjust_val = 18; break; case 18: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25, - 0x3, 0x10, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); rssi_adjust_val = 14; break; case 20: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x35, - 0x03, 0x11, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35, + 0x03, 0x11, 0x10); break; case 21: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15, - 0x03, 0x11, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15, + 0x03, 0x11, 0x10); break; case 22: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x25, - 0x03, 0x11, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25, + 0x03, 0x11, 0x10); break; case 23: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, - 0x3, 0x31, 0x18); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x18); rssi_adjust_val = 22; break; case 24: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x15, - 0x3, 0x31, 0x18); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x3, 0x31, 0x18); rssi_adjust_val = 22; break; case 25: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, - 0x3, 0x31, 0x18); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); rssi_adjust_val = 22; break; case 26: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, - 0x3, 0x31, 0x18); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); rssi_adjust_val = 22; break; case 27: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, - 0x3, 0x31, 0x98); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x98); rssi_adjust_val = 22; break; case 28: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x69, 0x25, - 0x3, 0x31, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25, + 0x3, 0x31, 0x0); break; case 29: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xab, 0x1a, - 0x1a, 0x1, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a, + 0x1a, 0x1, 0x10); break; case 30: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14, - 0x3, 0x10, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14, + 0x3, 0x10, 0x50); break; case 31: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x1a, - 0x1a, 0, 0x58); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a, + 0x1a, 0, 0x58); break; case 32: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0xa, - 0x3, 0x10, 0x0); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa, + 0x3, 0x10, 0x0); break; case 33: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xa3, 0x25, - 0x3, 0x30, 0x90); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25, + 0x3, 0x30, 0x90); break; case 34: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x53, 0x1a, - 0x1a, 0x0, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a, + 0x1a, 0x0, 0x10); break; case 35: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x63, 0x1a, - 0x1a, 0x0, 0x10); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a, + 0x1a, 0x0, 0x10); break; case 36: - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x12, - 0x3, 0x14, 0x50); + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12, + 0x3, 0x14, 0x50); break; } } else { - /* disable PS tdma*/ + /* disable PS tdma */ switch (type) { - case 8: /*PTA Control*/ - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x8, 0x0, 0x0, - 0x0, 0x0); - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, - false, false); + case 8: + /* PTA Control */ + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, 0x0, + 0x0, 0x0); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, + false, false); break; case 0: - default: /*Software control, Antenna at BT side*/ - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x0, 0x0); - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, - false, false); + default: + /* Software control, Antenna at BT side */ + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x0, 0x0); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + false, false); break; - case 9: /*Software control, Antenna at WiFi side*/ - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x0, 0x0); - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI, - false, false); + case 9: + /* Software control, Antenna at WiFi side */ + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x0, 0x0); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI, + false, false); break; - case 10: /* under 5G*/ - halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x8, 0x0); - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, - false, false); + case 10: + /* under 5G */ + btc8821a1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x8, 0x0); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + false, false); break; } } @@ -1264,15 +1270,15 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val); - /* update pre state*/ + /* update pre state */ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } -static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) +static bool btc8821a1ant_is_common_action(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - bool common = false, wifi_connected = false, wifi_busy = false; + bool common = false, wifi_connected = false, wifi_busy = false; btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); @@ -1283,7 +1289,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) coex_dm->bt_status) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && @@ -1291,7 +1297,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) coex_dm->bt_status)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi connected + BT non connected-idle!!\n"); - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && @@ -1299,15 +1305,15 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) coex_dm->bt_status)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (wifi_connected && (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == - coex_dm->bt_status)) { + coex_dm->bt_status)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi connected + BT connected-idle!!\n"); - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else if (!wifi_connected && @@ -1315,7 +1321,7 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) coex_dm->bt_status)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); common = true; } else { @@ -1333,231 +1339,40 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) return common; } -static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, - u8 wifi_status) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - static long up, dn, m, n, wait_count; - /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ - long result; - u8 retry_count = 0, bt_info_ext; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TdmaDurationAdjustForAcl()\n"); - - if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) || - (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN == - wifi_status) || - (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == - wifi_status)) { - if (coex_dm->cur_ps_tdma != 1 && - coex_dm->cur_ps_tdma != 2 && - coex_dm->cur_ps_tdma != 3 && - coex_dm->cur_ps_tdma != 9) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - - up = 0; - dn = 0; - m = 1; - n = 3; - result = 0; - wait_count = 0; - } - return; - } - - if (!coex_dm->auto_tdma_adjust) { - coex_dm->auto_tdma_adjust = true; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], first run TdmaDurationAdjust()!!\n"); - - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); - coex_dm->tdma_adj_type = 2; - /*============*/ - up = 0; - dn = 0; - m = 1; - n = 3; - result = 0; - wait_count = 0; - } else { - /*accquire the BT TRx retry count from BT_Info byte2*/ - retry_count = coex_sta->bt_retry_cnt; - bt_info_ext = coex_sta->bt_info_ext; - result = 0; - wait_count++; - - if (retry_count == 0) { - /* no retry in the last 2-second duration*/ - up++; - dn--; - - if (dn <= 0) - dn = 0; - - if (up >= n) { - /* if (retry count == 0) for 2*n seconds , - * make WiFi duration wider - */ - wait_count = 0; - n = 3; - up = 0; - dn = 0; - result = 1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Increase wifi duration!!\n"); - } - } else if (retry_count <= 3) { - /* <=3 retry in the last 2-second duration*/ - up--; - dn++; - - if (up <= 0) - up = 0; - - if (dn == 2) { - /* if retry count< 3 for 2*2 seconds, - * shrink wifi duration - */ - if (wait_count <= 2) - m++; /* avoid bounce in two levels */ - else - m = 1; - - if (m >= 20) { - /* m max value is 20, max time is 120 s, - * recheck if adjust WiFi duration. - */ - m = 20; - } - n = 3*m; - up = 0; - dn = 0; - wait_count = 0; - result = -1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); - } - } else { - /* retry count > 3, if retry count > 3 happens once, - * shrink WiFi duration - */ - if (wait_count == 1) - m++; /* avoid bounce in two levels */ - else - m = 1; - /* m max value is 20, max time is 120 second, - * recheck if adjust WiFi duration. - */ - if (m >= 20) - m = 20; - - n = 3*m; - up = 0; - dn = 0; - wait_count = 0; - result = -1; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); - } - - if (result == -1) { - if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && - ((coex_dm->cur_ps_tdma == 1) || - (coex_dm->cur_ps_tdma == 2))) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } - } else if (result == 1) { - if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && - ((coex_dm->cur_ps_tdma == 1) || - (coex_dm->cur_ps_tdma == 2))) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } - } else { - /*no change*/ - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], ********** TDMA(on, %d) **********\n", - coex_dm->cur_ps_tdma); - } - - if (coex_dm->cur_ps_tdma != 1 && - coex_dm->cur_ps_tdma != 2 && - coex_dm->cur_ps_tdma != 9 && - coex_dm->cur_ps_tdma != 11) { - /* recover to previous adjust type*/ - halbtc8821a1ant_ps_tdma(btcoexist, - NORMAL_EXEC, true, - coex_dm->tdma_adj_type); - } - } -} - static void btc8821a1ant_ps_tdma_check_for_pwr_save(struct btc_coexist *btcoex, bool new_ps_state) { - u8 lps_mode = 0x0; + u8 lps_mode = 0x0; btcoex->btc_get(btcoex, BTC_GET_U1_LPS_MODE, &lps_mode); if (lps_mode) { - /* already under LPS state*/ + /* already under LPS state */ if (new_ps_state) { - /* keep state under LPS, do nothing.*/ + /* keep state under LPS, do nothing */ } else { - /* will leave LPS state, turn off psTdma first*/ - halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); + /* will leave LPS state, turn off psTdma first */ + btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); } } else { /* NO PS state*/ if (new_ps_state) { - /* will enter LPS state, turn off psTdma first*/ - halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); + /* will enter LPS state, turn off psTdma first */ + btc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); } else { - /* keep state under NO PS state, do nothing.*/ + /* keep state under NO PS state, do nothing */ } } } -static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist, - u8 ps_type, u8 lps_val, - u8 rpwm_val) +static void btc8821a1ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, u8 rpwm_val) { bool low_pwr_disable = false; switch (ps_type) { case BTC_PS_WIFI_NATIVE: - /* recover to original 32k low power setting*/ + /* recover to original 32k low power setting */ low_pwr_disable = false; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); @@ -1566,13 +1381,13 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist, case BTC_PS_LPS_ON: btc8821a1ant_ps_tdma_check_for_pwr_save(btcoexist, true); - halbtc8821a1ant_lps_rpwm(btcoexist, - NORMAL_EXEC, lps_val, rpwm_val); - /* when coex force to enter LPS, do not enter 32k low power.*/ + btc8821a1ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power */ low_pwr_disable = true; btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - /* power save must executed before psTdma.*/ + /* power save must executed before psTdma */ btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); break; case BTC_PS_LPS_OFF: @@ -1584,295 +1399,332 @@ static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist, } } -static void halbtc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist) +static void btc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist) { - halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, - 0x0, 0x0); - halbtc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); - halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); - halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5); } -static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist) +/*********************************************** + * + * Software Coex Mechanism start + * + ***********************************************/ + +/* SCO only or SCO+PAN(HS) */ +static void btc8821a1ant_action_sco(struct btc_coexist *btcoexist) { - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); + btc8821a1ant_sw_mechanism(btcoexist, true); } -static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_hid(struct btc_coexist *btcoexist) { - struct rtl_priv *rtlpriv = btcoexist->adapter; - static bool pre_bt_disabled; - static u32 bt_disable_cnt; - bool bt_active = true, bt_disabled = false; - - /* This function check if bt is disabled*/ - - if (coex_sta->high_priority_tx == 0 && - coex_sta->high_priority_rx == 0 && - coex_sta->low_priority_tx == 0 && - coex_sta->low_priority_rx == 0) { - bt_active = false; - } - if (coex_sta->high_priority_tx == 0xffff && - coex_sta->high_priority_rx == 0xffff && - coex_sta->low_priority_tx == 0xffff && - coex_sta->low_priority_rx == 0xffff) { - bt_active = false; - } - if (bt_active) { - bt_disable_cnt = 0; - bt_disabled = false; - btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, - &bt_disabled); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is enabled !!\n"); - } else { - bt_disable_cnt++; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); - if (bt_disable_cnt >= 2) { - bt_disabled = true; - btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, - &bt_disabled); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is disabled !!\n"); - halbtc8821a1ant_action_wifi_only(btcoexist); - } - } - if (pre_bt_disabled != bt_disabled) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); - pre_bt_disabled = bt_disabled; - if (bt_disabled) { - btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, - NULL); - btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, - NULL); - } - } + btc8821a1ant_sw_mechanism(btcoexist, true); } -/*=============================================*/ -/**/ -/* Software Coex Mechanism start*/ -/**/ -/*=============================================*/ - -/* SCO only or SCO+PAN(HS)*/ -static void halbtc8821a1ant_action_sco(struct btc_coexist *btcoexist) +/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +static void btc8821a1ant_action_a2dp(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, true); + btc8821a1ant_sw_mechanism(btcoexist, false); } -static void halbtc8821a1ant_action_hid(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, true); + btc8821a1ant_sw_mechanism(btcoexist, false); } -/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ -static void halbtc8821a1ant_action_a2dp(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); } -static void halbtc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +/* PAN(HS) only */ +static void btc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); } -static void halbtc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist) +/* PAN(EDR)+A2DP */ +static void btc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, false); } -/*PAN(HS) only*/ -static void halbtc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, true); } -/*PAN(EDR)+A2DP*/ -static void halbtc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +/* HID+A2DP+PAN(EDR) */ +static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, false); + btc8821a1ant_sw_mechanism(btcoexist, true); } -static void halbtc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, true); + btc8821a1ant_sw_mechanism(btcoexist, true); } -/* HID+A2DP+PAN(EDR)*/ -static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +/*********************************************** + * + * Non-Software Coex Mechanism start + * + ***********************************************/ +static +void btc8821a1ant_action_wifi_multi_port(struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, true); + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + /* tdma and coex table */ + if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) { + if (bt_link_info->a2dp_exist) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else if (bt_link_info->a2dp_exist && + bt_link_info->pan_exist) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 4); + } else { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 4); + } + } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } } -static void halbtc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist) +static +void btc8821a1ant_action_wifi_not_connected_asso_auth( + struct btc_coexist *btcoexist) { - halbtc8821a1ant_sw_mechanism(btcoexist, true); + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, + 0x0); + + /* tdma and coex table */ + if ((bt_link_info->sco_exist) || (bt_link_info->hid_exist)) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if ((bt_link_info->a2dp_exist) || (bt_link_info->pan_exist)) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); + } else { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } } -/*=============================================*/ -/**/ -/* Non-Software Coex Mechanism start*/ -/**/ -/*=============================================*/ -static void halbtc8821a1ant_action_hs(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_hs(struct btc_coexist *btcoexist) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2); } -static void halbtc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist) { struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool wifi_connected = false; + bool ap_enable = false; + bool wifi_busy = false, bt_busy = false; - btcoexist->btc_get(btcoexist, - BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); - - if (!wifi_connected) { - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); - } else if ((bt_link_info->sco_exist) || + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + if (!wifi_connected && !coex_sta->wifi_is_high_pri_task) { + btc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + } else if ((bt_link_info->sco_exist) || (bt_link_info->a2dp_exist) || (bt_link_info->hid_only)) { - /* SCO/HID-only busy*/ - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + /* SCO/HID-only busy */ + btc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); + } else if ((bt_link_info->a2dp_exist) && (bt_link_info->hid_exist)) { + /* A2DP+HID busy */ + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if ((bt_link_info->pan_exist) || (wifi_busy)) { + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); } else { - halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_LPS_ON, - 0x50, 0x4); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); } } static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, - u8 wifi_status) { - /* tdma and coex table*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + u8 wifi_status) +{ + /* tdma and coex table */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == + wifi_status) + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + else + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, u8 wifi_status) { - u8 bt_rssi_state; + u8 bt_rssi_state; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0); + bt_rssi_state = btc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0); if (bt_link_info->hid_only) { - /*HID*/ + /* HID */ btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status); coex_dm->auto_tdma_adjust = false; return; } else if (bt_link_info->a2dp_only) { - /*A2DP*/ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a1ant_tdma_dur_adj(btcoexist, wifi_status); - } else { - /*for low BT RSSI*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); + /* A2DP */ + if ((bt_rssi_state != BTC_RSSI_STATE_HIGH) && + (bt_rssi_state != BTC_RSSI_STATE_STAY_HIGH)) { + /* for low BT RSSI */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); coex_dm->auto_tdma_adjust = false; } - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - /*HID+A2DP*/ + /* HID+A2DP */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); coex_dm->auto_tdma_adjust = false; } else { /*for low BT RSSI*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); coex_dm->auto_tdma_adjust = false; } - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } else if ((bt_link_info->pan_only) || (bt_link_info->hid_exist && bt_link_info->pan_exist)) { - /*PAN(OPP, FTP), HID+PAN(OPP, FTP)*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + /* PAN(OPP, FTP), HID+PAN(OPP, FTP) */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); coex_dm->auto_tdma_adjust = false; } else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) || (bt_link_info->hid_exist && bt_link_info->a2dp_exist && bt_link_info->pan_exist)) { - /*A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP)*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + /* A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP) */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); coex_dm->auto_tdma_adjust = false; } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); coex_dm->auto_tdma_adjust = false; } } -static void halbtc8821a1ant_action_wifi_not_connected( - struct btc_coexist *btcoexist) +static +void btc8821a1ant_action_wifi_not_connected(struct btc_coexist *btcoexist) { - /* power save state*/ - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); + /* power save state */ + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); - /* tdma and coex table*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + /* tdma and coex table */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } static void btc8821a1ant_act_wifi_not_conn_scan(struct btc_coexist *btcoexist) { - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + /* tdma and coex table */ + if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) { + if (bt_link_info->a2dp_exist) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else if (bt_link_info->a2dp_exist && + bt_link_info->pan_exist) { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 4); + } else { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 4); + } + } else if ((coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_SCO_BUSY) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } } -static void halbtc8821a1ant_action_wifi_connected_scan( - struct btc_coexist *btcoexist) { +static +void btc8821a1ant_action_wifi_connected_scan(struct btc_coexist *btcoexist) +{ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - /* power save state*/ - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); + /* power save state */ + btc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); - /* tdma and coex table*/ + /* tdma and coex table */ if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 22); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || @@ -1881,52 +1733,52 @@ static void halbtc8821a1ant_action_wifi_connected_scan( btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN); } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } } static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist) { struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool hs_connecting = false; + bool hs_connecting = false; btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); - halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, - 0x0, 0x0); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); - /* tdma and coex table*/ - if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + /* tdma and coex table */ + if (coex_dm->bt_status == BT_8821A_1ANT_BT_STATUS_ACL_BUSY) { if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 22); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 20); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); } } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } } -static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) +static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - bool wifi_busy = false; - bool scan = false, link = false, roam = false; - bool under_4way = false; + bool wifi_busy = false; + bool scan = false, link = false, roam = false; + bool under_4way = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], CoexForWifiConnect()===>\n"); - btcoexist->btc_get(btcoexist, - BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); if (under_4way) { btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -1938,7 +1790,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); if (scan || link || roam) { - halbtc8821a1ant_action_wifi_connected_scan(btcoexist); + btc8821a1ant_action_wifi_connected_scan(btcoexist); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); return; @@ -1947,14 +1799,14 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) /* power save state*/ if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status && !btcoexist->bt_link_info.hid_only) - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_LPS_ON, 0x50, 0x4); + btc8821a1ant_power_save_state(btcoexist, + BTC_PS_LPS_ON, 0x50, 0x4); else - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, - 0x0, 0x0); + btc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, + 0x0, 0x0); - /* tdma and coex table*/ + /* tdma and coex table */ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); if (!wifi_busy) { if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { @@ -1967,10 +1819,10 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE); } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 2); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); } } else { if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { @@ -1983,10 +1835,9 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY); } else { - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 2); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); } } } @@ -1994,52 +1845,52 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 algorithm = 0; + u8 algorithm = 0; - algorithm = halbtc8821a1ant_action_algorithm(btcoexist); + algorithm = btc8821a1ant_action_algorithm(btcoexist); coex_dm->cur_algorithm = algorithm; - if (!halbtc8821a1ant_is_common_action(btcoexist)) { + if (!btc8821a1ant_is_common_action(btcoexist)) { switch (coex_dm->cur_algorithm) { case BT_8821A_1ANT_COEX_ALGO_SCO: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = SCO\n"); - halbtc8821a1ant_action_sco(btcoexist); + btc8821a1ant_action_sco(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = HID\n"); - halbtc8821a1ant_action_hid(btcoexist); + btc8821a1ant_action_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = A2DP\n"); - halbtc8821a1ant_action_a2dp(btcoexist); + btc8821a1ant_action_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = A2DP+PAN(HS)\n"); - halbtc8821a1ant_action_a2dp_pan_hs(btcoexist); + btc8821a1ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = PAN(EDR)\n"); - halbtc8821a1ant_action_pan_edr(btcoexist); + btc8821a1ant_action_pan_edr(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = HS mode\n"); - halbtc8821a1ant_action_pan_hs(btcoexist); + btc8821a1ant_action_pan_hs(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = PAN+A2DP\n"); - halbtc8821a1ant_action_pan_edr_a2dp(btcoexist); + btc8821a1ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = PAN(EDR)+HID\n"); - halbtc8821a1ant_action_pan_edr_hid(btcoexist); + btc8821a1ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -2049,28 +1900,30 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) case BT_8821A_1ANT_COEX_ALGO_HID_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = HID+A2DP\n"); - halbtc8821a1ant_action_hid_a2dp(btcoexist); + btc8821a1ant_action_hid_a2dp(btcoexist); break; default: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action algorithm = coexist All Off!!\n"); - /*halbtc8821a1ant_coex_all_off(btcoexist);*/ + /*btc8821a1ant_coex_all_off(btcoexist);*/ break; } coex_dm->pre_algorithm = coex_dm->cur_algorithm; } } -static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - bool wifi_connected = false, bt_hs_on = false; - bool increase_scan_dev_num = false; - bool bt_ctrl_agg_buf_size = false; - u8 agg_buf_size = 5; - u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; - bool wifi_under_5g = false; + bool wifi_connected = false, bt_hs_on = false; + bool increase_scan_dev_num = false; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + bool wifi_under_5g = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], RunCoexistMechanism()===>\n"); @@ -2097,7 +1950,7 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) if (wifi_under_5g) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); - halbtc8821a1ant_coex_under_5g(btcoexist); + btc8821a1ant_coex_under_5g(btcoexist); return; } @@ -2109,21 +1962,41 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM, &increase_scan_dev_num); - btcoexist->btc_get(btcoexist, - BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if ((num_of_wifi_link >= 2) || + (wifi_link_status & WIFI_P2P_GO_CONNECTED)) { + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + btc8821a1ant_action_wifi_multi_port(btcoexist); + return; + } if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) { - halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); } else { if (wifi_connected) { wifi_rssi_state = - halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2, - 30, 0); - halbtc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, 1, 1); + btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2, + 30, 0); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 1, 1); + } else { + btc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 1, 1); + } } else { - halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, - 0, 0, 0, 0); + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, + 0, 0, 0, 0); } } @@ -2137,22 +2010,22 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) bt_ctrl_agg_buf_size = true; agg_buf_size = 0x8; } - halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, - bt_ctrl_agg_buf_size, agg_buf_size); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); btc8821a1ant_run_sw_coex_mech(btcoexist); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (coex_sta->c2h_bt_inquiry_page) { - halbtc8821a1ant_action_bt_inquiry(btcoexist); + btc8821a1ant_action_bt_inquiry(btcoexist); return; } else if (bt_hs_on) { - halbtc8821a1ant_action_hs(btcoexist); + btc8821a1ant_action_hs(btcoexist); return; } if (!wifi_connected) { - bool scan = false, link = false, roam = false; + bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], wifi is non connected-idle !!!\n"); @@ -2161,48 +2034,57 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - if (scan || link || roam) - btc8821a1ant_act_wifi_not_conn_scan(btcoexist); - else - halbtc8821a1ant_action_wifi_not_connected(btcoexist); + if (scan || link || roam) { + if (scan) + btc8821a1ant_act_wifi_not_conn_scan(btcoexist); + else + btc8821a1ant_action_wifi_not_connected_asso_auth( + btcoexist); + } else { + btc8821a1ant_action_wifi_not_connected(btcoexist); + } } else { - /* wifi LPS/Busy*/ - halbtc8821a1ant_action_wifi_connected(btcoexist); + /* wifi LPS/Busy */ + btc8821a1ant_action_wifi_connected(btcoexist); } } -static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) +static void btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) { - /* force to reset coex mechanism*/ - /* sw all off*/ - halbtc8821a1ant_sw_mechanism(btcoexist, false); + /* force to reset coex mechanism + * sw all off + */ + btc8821a1ant_sw_mechanism(btcoexist, false); - halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8); - halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); } -static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, - bool back_up) +static void btc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, + bool back_up, bool wifi_only) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 u1_tmp = 0; - bool wifi_under_5g = false; + u8 u1_tmp = 0; + bool wifi_under_5g = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], 1Ant Init HW Config!!\n"); + if (wifi_only) + return; + if (back_up) { coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, 0x430); coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist, 0x434); coex_dm->backup_retry_limit = - btcoexist->btc_read_2byte(btcoexist, 0x42a); + btcoexist->btc_read_2byte(btcoexist, 0x42a); coex_dm->backup_ampdu_max_time = - btcoexist->btc_read_1byte(btcoexist, 0x456); + btcoexist->btc_read_1byte(btcoexist, 0x456); } - /* 0x790[5:0] = 0x5*/ + /* 0x790[5:0] = 0x5 */ u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); u1_tmp &= 0xc0; u1_tmp |= 0x5; @@ -2210,35 +2092,33 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); - /*Antenna config*/ + /* Antenna config */ if (wifi_under_5g) - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, - true, false); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + true, false); else - halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, - true, false); - /* PTA parameter*/ - halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); - - /* Enable counter statistics*/ - /*0x76e[3] =1, WLAN_Act control by PTA*/ + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, + true, false); + /* PTA parameter */ + btc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + + /* Enable counter statistics + * 0x76e[3] =1, WLAN_Act control by PTA + */ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); } -/*============================================================*/ -/* work around function start with wa_halbtc8821a1ant_*/ -/*============================================================*/ -/*============================================================*/ -/* extern function start with EXhalbtc8821a1ant_*/ -/*============================================================*/ -void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist) +/************************************************************** + * extern function start with ex_btc8821a1ant_ + **************************************************************/ +void ex_btc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist, bool wifionly) { - halbtc8821a1ant_init_hw_config(btcoexist, true); + btc8821a1ant_init_hw_config(btcoexist, true, wifionly); } -void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) +void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -2247,12 +2127,12 @@ void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) btcoexist->stop_coex_dm = false; - halbtc8821a1ant_init_coex_dm(btcoexist); + btc8821a1ant_init_coex_dm(btcoexist); - halbtc8821a1ant_query_bt_info(btcoexist); + btc8821a1ant_query_bt_info(btcoexist); } -void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; @@ -2359,7 +2239,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) "uplink" : "downlink"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", - ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->bt_disabled) ? ("disabled") : ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ? @@ -2397,7 +2277,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), - ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), btcoexist->bt_info.lps_val, btcoexist->bt_info.rpwm_val); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); @@ -2422,7 +2302,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) "\r\n %-35s = 0x%x ", "Rate Mask", btcoexist->bt_info.ra_mask); - /* Fw mechanism*/ + /* Fw mechanism */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", "============[Fw mechanism]============"); @@ -2444,7 +2324,7 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) coex_dm->cur_ignore_wlan_act); } - /* Hw setting*/ + /* Hw setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", "============[Hw setting]============"); @@ -2527,38 +2407,46 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, coex_sta->low_priority_tx); #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1) - halbtc8821a1ant_monitor_bt_ctr(btcoexist); + btc8821a1ant_monitor_bt_ctr(btcoexist); #endif btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); } -void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; + bool wifi_under_5g = false; if (btcoexist->manual_control || btcoexist->stop_coex_dm) return; + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); + return; + } if (BTC_IPS_ENTER == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; - halbtc8821a1ant_set_ant_path(btcoexist, - BTC_ANT_PATH_BT, false, true); - /*set PTA control*/ - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); - halbtc8821a1ant_coex_table_with_type(btcoexist, - NORMAL_EXEC, 0); + btc8821a1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_BT, false, true); + /* set PTA control */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + btc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); } else if (BTC_IPS_LEAVE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; - halbtc8821a1ant_run_coexist_mechanism(btcoexist); + btc8821a1ant_run_coexist_mechanism(btcoexist); } } -void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -2568,22 +2456,35 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) if (BTC_LPS_ENABLE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], LPS ENABLE notify\n"); - coex_sta->under_Lps = true; + coex_sta->under_lps = true; } else if (BTC_LPS_DISABLE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], LPS DISABLE notify\n"); - coex_sta->under_Lps = false; + coex_sta->under_lps = false; } } -void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; + bool bt_ctrl_agg_buf_size = false; + bool wifi_under_5g = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); + return; + } - if (btcoexist->manual_control || - btcoexist->stop_coex_dm || - btcoexist->bt_info.bt_disabled) + if (coex_sta->bt_disabled) return; btcoexist->btc_get(btcoexist, @@ -2591,13 +2492,24 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); - halbtc8821a1ant_query_bt_info(btcoexist); + btc8821a1ant_query_bt_info(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + btc8821a1ant_action_wifi_multi_port(btcoexist); + return; + } if (coex_sta->c2h_bt_inquiry_page) { - halbtc8821a1ant_action_bt_inquiry(btcoexist); + btc8821a1ant_action_bt_inquiry(btcoexist); return; } else if (bt_hs_on) { - halbtc8821a1ant_action_hs(btcoexist); + btc8821a1ant_action_hs(btcoexist); return; } @@ -2605,40 +2517,62 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCAN START notify\n"); if (!wifi_connected) { - /* non-connected scan*/ + /* non-connected scan */ btc8821a1ant_act_wifi_not_conn_scan(btcoexist); } else { - /* wifi is connected*/ - halbtc8821a1ant_action_wifi_connected_scan(btcoexist); + /* wifi is connected */ + btc8821a1ant_action_wifi_connected_scan(btcoexist); } } else if (BTC_SCAN_FINISH == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCAN FINISH notify\n"); if (!wifi_connected) { - /* non-connected scan*/ - halbtc8821a1ant_action_wifi_not_connected(btcoexist); + /* non-connected scan */ + btc8821a1ant_action_wifi_not_connected(btcoexist); } else { - halbtc8821a1ant_action_wifi_connected(btcoexist); + btc8821a1ant_action_wifi_connected(btcoexist); } } } -void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; bool wifi_connected = false, bt_hs_on = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + bool bt_ctrl_agg_buf_size = false; + bool wifi_under_5g = false; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + coex_sta->bt_disabled) + return; + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); + return; + } - if (btcoexist->manual_control || - btcoexist->stop_coex_dm || - btcoexist->bt_info.bt_disabled) + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + btc8821a1ant_action_wifi_multi_port(btcoexist); return; + } btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (coex_sta->c2h_bt_inquiry_page) { - halbtc8821a1ant_action_bt_inquiry(btcoexist); + btc8821a1ant_action_bt_inquiry(btcoexist); return; } else if (bt_hs_on) { - halbtc8821a1ant_action_hs(btcoexist); + btc8821a1ant_action_hs(btcoexist); return; } @@ -2653,26 +2587,33 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (!wifi_connected) { - /* non-connected scan*/ - halbtc8821a1ant_action_wifi_not_connected(btcoexist); + /* non-connected scan */ + btc8821a1ant_action_wifi_not_connected(btcoexist); } else { - halbtc8821a1ant_action_wifi_connected(btcoexist); + btc8821a1ant_action_wifi_connected(btcoexist); } } } -void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[3] = {0}; u32 wifi_bw; u8 wifi_central_chnl; + bool wifi_under_5g = false; - if (btcoexist->manual_control || - btcoexist->stop_coex_dm || - btcoexist->bt_info.bt_disabled) + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + coex_sta->bt_disabled) + return; + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); return; + } if (BTC_MEDIA_CONNECT == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -2682,17 +2623,16 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, "[BTCoex], MEDIA disconnect notify\n"); } - /* only 2.4G we need to inform bt the chnl mask*/ + /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl); - if ((BTC_MEDIA_CONNECT == type) && + if ((type == BTC_MEDIA_CONNECT) && (wifi_central_chnl <= 14)) { - /*h2c_parameter[0] = 0x1;*/ h2c_parameter[0] = 0x0; h2c_parameter[1] = wifi_central_chnl; btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_HT40 == wifi_bw) + if (wifi_bw == BTC_WIFI_BW_HT40) h2c_parameter[2] = 0x30; else h2c_parameter[2] = 0x20; @@ -2711,25 +2651,48 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } -void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; bool bt_hs_on = false; + bool bt_ctrl_agg_buf_size = false; + bool wifi_under_5g = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + coex_sta->bt_disabled) + return; - if (btcoexist->manual_control || - btcoexist->stop_coex_dm || - btcoexist->bt_info.bt_disabled) + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); return; + } coex_sta->special_pkt_period_cnt = 0; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + btc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + btc8821a1ant_action_wifi_multi_port(btcoexist); + return; + } + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); if (coex_sta->c2h_bt_inquiry_page) { - halbtc8821a1ant_action_bt_inquiry(btcoexist); + btc8821a1ant_action_bt_inquiry(btcoexist); return; } else if (bt_hs_on) { - halbtc8821a1ant_action_hs(btcoexist); + btc8821a1ant_action_hs(btcoexist); return; } @@ -2741,12 +2704,13 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, } } -void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, - u8 *tmp_buf, u8 length) +void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) { struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 i; u8 bt_info = 0; - u8 i, rsp_source = 0; + u8 rsp_source = 0; bool wifi_connected = false; bool bt_busy = false; bool wifi_under_5g = false; @@ -2756,7 +2720,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); - rsp_source = tmp_buf[0]&0xf; + rsp_source = tmp_buf[0] & 0xf; if (rsp_source >= BT_INFO_SRC_8821A_1ANT_MAX) rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; @@ -2768,7 +2732,7 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; - if (i == length-1) { + if (i == length - 1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "0x%02x]\n", tmp_buf[i]); } else { @@ -2787,19 +2751,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4]; - /* Here we need to resend some wifi info to BT*/ - /* because bt is reset and loss of the info.*/ + /* Here we need to resend some wifi info to BT + * because bt is reset and lost the info + */ if (coex_sta->bt_info_ext & BIT1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); - btcoexist->btc_get(btcoexist, - BTC_GET_BL_WIFI_CONNECTED, + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) { - ex_halbtc8821a1ant_media_status_notify(btcoexist, + ex_btc8821a1ant_media_status_notify(btcoexist, BTC_MEDIA_CONNECT); } else { - ex_halbtc8821a1ant_media_status_notify(btcoexist, + ex_btc8821a1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } } @@ -2809,36 +2773,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, !btcoexist->stop_coex_dm) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); - halbtc8821a1ant_ignore_wlan_act(btcoexist, - FORCE_EXEC, - false); + btc8821a1ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, + false); } } -#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) - if (!(coex_sta->bt_info_ext & BIT4)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); - halbtc8821a1ant_bt_auto_report(btcoexist, - FORCE_EXEC, true); - } -#endif } - /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + /* check BIT2 first ==> check if bt is under inquiry or page scan */ if (bt_info & BT_INFO_8821A_1ANT_B_INQ_PAGE) coex_sta->c2h_bt_inquiry_page = true; else coex_sta->c2h_bt_inquiry_page = false; - /* set link exist status*/ - if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { + /* set link exist status */ + if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) { coex_sta->bt_link_exist = false; coex_sta->pan_exist = false; coex_sta->a2dp_exist = false; coex_sta->hid_exist = false; coex_sta->sco_exist = false; } else { - /* connection exists*/ + /* connection exists */ coex_sta->bt_link_exist = true; if (bt_info & BT_INFO_8821A_1ANT_B_FTP) coex_sta->pan_exist = true; @@ -2858,14 +2814,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->sco_exist = false; } - halbtc8821a1ant_update_bt_link_info(btcoexist); + btc8821a1ant_update_bt_link_info(btcoexist); - if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { + /* mask profile bit for connect-ilde identification + * (for CSR case: A2DP idle --> 0x41) + */ + bt_info = bt_info & 0x1f; + + if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) { coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) { - /* connection exists but no busy*/ + /* connection exists but no busy */ coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); @@ -2895,33 +2856,48 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); - halbtc8821a1ant_run_coexist_mechanism(btcoexist); + btc8821a1ant_run_coexist_mechanism(btcoexist); } -void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) +void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; + bool wifi_under_5g = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); + return; + } + btcoexist->stop_coex_dm = true; - halbtc8821a1ant_set_ant_path(btcoexist, - BTC_ANT_PATH_BT, false, true); - halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, true); + btc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); - halbtc8821a1ant_power_save_state(btcoexist, - BTC_PS_WIFI_NATIVE, 0x0, 0x0); - halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); - ex_halbtc8821a1ant_media_status_notify(btcoexist, - BTC_MEDIA_DISCONNECT); + ex_btc8821a1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } -void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) { struct rtl_priv *rtlpriv = btcoexist->adapter; + bool wifi_under_5g = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + btc8821a1ant_coex_under_5g(btcoexist); + return; + } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n"); @@ -2929,26 +2905,33 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) if (BTC_WIFI_PNP_SLEEP == pnp_state) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify to SLEEP\n"); + /* BT should clear UnderIPS/UnderLPS state to avoid mismatch + * state after wakeup. + */ + coex_sta->under_ips = false; + coex_sta->under_lps = false; btcoexist->stop_coex_dm = true; - halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); - halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, - 0x0, 0x0); - halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); + btc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false, + true); } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify to WAKE UP\n"); btcoexist->stop_coex_dm = false; - halbtc8821a1ant_init_hw_config(btcoexist, false); - halbtc8821a1ant_init_coex_dm(btcoexist); - halbtc8821a1ant_query_bt_info(btcoexist); + btc8821a1ant_init_hw_config(btcoexist, false, false); + btc8821a1ant_init_coex_dm(btcoexist); + btc8821a1ant_query_bt_info(btcoexist); } } -void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist) +void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - static u8 dis_ver_info_cnt; - u32 fw_ver = 0, bt_patch_ver = 0; + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; @@ -2982,16 +2965,9 @@ void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist) } #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) - halbtc8821a1ant_query_bt_info(btcoexist); - halbtc8821a1ant_monitor_bt_ctr(btcoexist); - btc8821a1ant_mon_bt_en_dis(btcoexist); + btc8821a1ant_query_bt_info(btcoexist); + btc8821a1ant_monitor_bt_ctr(btcoexist); #else - if (halbtc8821a1ant_Is_wifi_status_changed(btcoexist) || - coex_dm->auto_tdma_adjust) { - if (coex_sta->special_pkt_period_cnt > 2) - halbtc8821a1ant_run_coexist_mechanism(btcoexist); - } - coex_sta->special_pkt_period_cnt++; #endif } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h index 20e904890fc243a175ddd31dc64ca1524e117aca..1bd1ebe3364ebe3e0ac44e17cce0507a9aba566e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h @@ -140,13 +140,14 @@ struct coex_dm_8821a_1ant { }; struct coex_sta_8821a_1ant { + bool bt_disabled; bool bt_link_exist; bool sco_exist; bool a2dp_exist; bool hid_exist; bool pan_exist; - bool under_Lps; + bool under_lps; bool under_ips; u32 special_pkt_period_cnt; u32 high_priority_tx; @@ -160,6 +161,7 @@ struct coex_sta_8821a_1ant { u8 bt_info_c2h[BT_INFO_SRC_8821A_1ANT_MAX][10]; u32 bt_info_c2h_cnt[BT_INFO_SRC_8821A_1ANT_MAX]; bool c2h_bt_inquiry_page; + bool wifi_is_high_pri_task; u8 bt_retry_cnt; u8 bt_info_ext; }; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 1717e9ce96caa311e7b6002ebac5ba49286b0aa0..841b4a83ab70c8f3c5c343702b4a9b7ad7c8e5cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -23,7 +23,7 @@ * *****************************************************************************/ -/*============================================================ +/************************************************************ * Description: * * This file is for RTL8821A Co-exist mechanism @@ -32,22 +32,19 @@ * 2012/08/22 Cosa first check in. * 2012/11/14 Cosa Revise for 8821A 2Ant out sourcing. * - *============================================================ - */ + ************************************************************/ -/*============================================================ +/************************************************************ * include files - *============================================================ -*/ + ************************************************************/ #include "halbt_precomp.h" -/*============================================================ +/************************************************************ * Global variables, these are static variables - *============================================================ - */ -static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant; -static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant; -static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant; -static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant; + ************************************************************/ +static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant; +static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant; +static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant; +static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant; static const char *const glbt_info_src_8821a_2ant[] = { "BT Info[wifi fw]", @@ -55,32 +52,29 @@ static const char *const glbt_info_src_8821a_2ant[] = { "BT Info[bt auto report]", }; -static u32 glcoex_ver_date_8821a_2ant = 20130618; -static u32 glcoex_ver_8821a_2ant = 0x5050; +static u32 glcoex_ver_date_8821a_2ant = 20130618; +static u32 glcoex_ver_8821a_2ant = 0x5050; -/*============================================================ +/************************************************************ * local function proto type if needed - *============================================================ - *============================================================ - * local function start with halbtc8821a2ant_ - *============================================================ - */ -static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) + * + * local function start with btc8821a2ant_ + ************************************************************/ +static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist, + u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; - long bt_rssi = 0; - u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + long bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; bt_rssi = coex_sta->bt_rssi; if (level_num == 2) { if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { - long tmp = rssi_thresh + - BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT; - if (bt_rssi >= tmp) { + if (bt_rssi >= + rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT) { bt_rssi_state = BTC_RSSI_STATE_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT Rssi state switch to High\n"); @@ -110,7 +104,8 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist, if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { if (bt_rssi >= - (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { bt_rssi_state = BTC_RSSI_STATE_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT Rssi state switch to Medium\n"); @@ -156,13 +151,13 @@ static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist, return bt_rssi_state; } -static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, - u8 index, u8 level_num, - u8 rssi_thresh, u8 rssi_thresh1) +static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) { struct rtl_priv *rtlpriv = btcoexist->adapter; - long wifi_rssi = 0; - u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + long wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); @@ -204,7 +199,8 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) { if (wifi_rssi >= - (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], wifi RSSI state switch to Medium\n"); @@ -248,76 +244,57 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, return wifi_rssi_state; } -static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist) +static +void btc8821a2ant_limited_rx(struct btc_coexist *btcoexist, bool force_exec, + bool rej_ap_agg_pkt, bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) { - struct rtl_priv *rtlpriv = btcoexist->adapter; - static bool pre_bt_disabled; - static u32 bt_disable_cnt; - bool bt_active = true, bt_disabled = false; - - /* This function check if bt is disabled*/ - - if (coex_sta->high_priority_tx == 0 && - coex_sta->high_priority_rx == 0 && - coex_sta->low_priority_tx == 0 && - coex_sta->low_priority_rx == 0) - bt_active = false; - if (coex_sta->high_priority_tx == 0xffff && - coex_sta->high_priority_rx == 0xffff && - coex_sta->low_priority_tx == 0xffff && - coex_sta->low_priority_rx == 0xffff) - bt_active = false; - if (bt_active) { - bt_disable_cnt = 0; - bt_disabled = false; - btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, - &bt_disabled); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is enabled !!\n"); - } else { - bt_disable_cnt++; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], bt all counters = 0, %d times!!\n", - bt_disable_cnt); - if (bt_disable_cnt >= 2) { - bt_disabled = true; - btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, - &bt_disabled); - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is disabled !!\n"); - } - } - if (pre_bt_disabled != bt_disabled) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT is from %s to %s!!\n", - (pre_bt_disabled ? "disabled" : "enabled"), - (bt_disabled ? "disabled" : "enabled")); - pre_bt_disabled = bt_disabled; - } + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; + u8 rx_agg_size = agg_buf_size; + + /* Rx Aggregation related setting */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, works when BT control Rx aggregation size */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); } -static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +static void btc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u32 reg_hp_txrx, reg_lp_txrx, u4tmp; - u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + u32 reg_hp_txrx, reg_lp_txrx, u4tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; reg_hp_txrx = 0x770; reg_lp_txrx = 0x774; u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); reg_hp_tx = u4tmp & MASKLWORD; - reg_hp_rx = (u4tmp & MASKHWORD)>>16; + reg_hp_rx = (u4tmp & MASKHWORD) >> 16; u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); reg_lp_tx = u4tmp & MASKLWORD; - reg_lp_rx = (u4tmp & MASKHWORD)>>16; + reg_lp_rx = (u4tmp & MASKHWORD) >> 16; coex_sta->high_priority_tx = reg_hp_tx; coex_sta->high_priority_rx = reg_hp_rx; coex_sta->low_priority_tx = reg_lp_tx; coex_sta->low_priority_rx = reg_lp_rx; + if ((coex_sta->low_priority_rx >= 950) && + (coex_sta->low_priority_rx >= coex_sta->low_priority_tx) && + (!coex_sta->under_ips)) + bt_link_info->slave_role = true; + else + bt_link_info->slave_role = false; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); @@ -329,14 +306,51 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); } -static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) +static void btc8821a2ant_monitor_wifi_ctr(struct btc_coexist *btcoexist) +{ + if (coex_sta->under_ips) { + coex_sta->crc_ok_cck = 0; + coex_sta->crc_ok_11g = 0; + coex_sta->crc_ok_11n = 0; + coex_sta->crc_ok_11n_agg = 0; + + coex_sta->crc_err_cck = 0; + coex_sta->crc_err_11g = 0; + coex_sta->crc_err_11n = 0; + coex_sta->crc_err_11n_agg = 0; + } else { + coex_sta->crc_ok_cck = + btcoexist->btc_read_4byte(btcoexist, 0xf88); + coex_sta->crc_ok_11g = + btcoexist->btc_read_2byte(btcoexist, 0xf94); + coex_sta->crc_ok_11n = + btcoexist->btc_read_2byte(btcoexist, 0xf90); + coex_sta->crc_ok_11n_agg = + btcoexist->btc_read_2byte(btcoexist, 0xfb8); + + coex_sta->crc_err_cck = + btcoexist->btc_read_4byte(btcoexist, 0xf84); + coex_sta->crc_err_11g = + btcoexist->btc_read_2byte(btcoexist, 0xf96); + coex_sta->crc_err_11n = + btcoexist->btc_read_2byte(btcoexist, 0xf92); + coex_sta->crc_err_11n_agg = + btcoexist->btc_read_2byte(btcoexist, 0xfba); + } + + /* reset counter */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x1); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xf16, 0x1, 0x0); +} + +static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; coex_sta->c2h_bt_info_req_sent = true; - h2c_parameter[0] |= BIT0; /* trigger */ + h2c_parameter[0] |= BIT0; /* trigger */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", @@ -345,54 +359,135 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); } -static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) +bool btc8821a2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy = true; + static bool pre_under_4way = true; + static bool pre_bt_hs_on = true; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 3, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_LOW)) + return true; + } + + return false; +} + +static void btc8821a2ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only */ + if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; bool bt_hs_on = false; u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED; u8 num_of_diff_profile = 0; btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); - /*for win-8 stack HID report error*/ - /* sync BTInfo with BT firmware and stack */ - if (!stack_info->hid_exist) - stack_info->hid_exist = coex_sta->hid_exist; - /* when stack HID report error, here we use the info from bt fw. */ - if (!stack_info->bt_link_exist) - stack_info->bt_link_exist = coex_sta->bt_link_exist; - - if (!coex_sta->bt_link_exist) { + if (!bt_link_info->bt_link_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], No profile exists!!!\n"); + "[BTCoex], No BT link exists!!!\n"); return algorithm; } - if (coex_sta->sco_exist) + if (bt_link_info->sco_exist) num_of_diff_profile++; - if (coex_sta->hid_exist) + if (bt_link_info->hid_exist) num_of_diff_profile++; - if (coex_sta->pan_exist) + if (bt_link_info->pan_exist) num_of_diff_profile++; - if (coex_sta->a2dp_exist) + if (bt_link_info->a2dp_exist) num_of_diff_profile++; if (num_of_diff_profile == 1) { - if (coex_sta->sco_exist) { + if (bt_link_info->sco_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { - if (coex_sta->hid_exist) { + if (bt_link_info->hid_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID; - } else if (coex_sta->a2dp_exist) { + } else if (bt_link_info->a2dp_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], A2DP only\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP; - } else if (coex_sta->pan_exist) { + } else if (bt_link_info->pan_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -407,16 +502,16 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) } } } else if (num_of_diff_profile == 2) { - if (coex_sta->sco_exist) { - if (coex_sta->hid_exist) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + HID\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; - } else if (coex_sta->a2dp_exist) { + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; + } else if (bt_link_info->a2dp_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + A2DP ==> SCO\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; - } else if (coex_sta->pan_exist) { + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; + } else if (bt_link_info->pan_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -426,99 +521,104 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + PAN(EDR)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } } } else { - if (coex_sta->hid_exist && - coex_sta->a2dp_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID + A2DP\n"); algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; - } else if (coex_sta->hid_exist && - coex_sta->pan_exist) { + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID + PAN(HS)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_HID; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID + PAN(EDR)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = + BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; } - } else if (coex_sta->pan_exist && - coex_sta->a2dp_exist) { + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], A2DP + PAN(HS)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; + algorithm = + BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], A2DP + PAN(EDR)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; + algorithm = + BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; } } } } else if (num_of_diff_profile == 3) { - if (coex_sta->sco_exist) { - if (coex_sta->hid_exist && - coex_sta->a2dp_exist) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + HID + A2DP ==> HID\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; - } else if (coex_sta->hid_exist && - coex_sta->pan_exist) { + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + HID + PAN(HS)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + HID + PAN(EDR)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } - } else if (coex_sta->pan_exist && - coex_sta->a2dp_exist) { + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + A2DP + PAN(HS)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } } } else { - if (coex_sta->hid_exist && - coex_sta->pan_exist && - coex_sta->a2dp_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID + A2DP + PAN(HS)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; + algorithm = + BT_8821A_2ANT_COEX_ALGO_HID_A2DP; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], HID + A2DP + PAN(EDR)\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + algorithm = + BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; } } } } else if (num_of_diff_profile >= 3) { - if (coex_sta->sco_exist) { - if (coex_sta->hid_exist && - coex_sta->pan_exist && - coex_sta->a2dp_exist) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { if (bt_hs_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -528,7 +628,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); - algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; } } } @@ -536,44 +636,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) return algorithm; } -static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - bool ret = false; - bool bt_hs_on = false, wifi_connected = false; - long bt_hs_rssi = 0; - u8 bt_rssi_state; - - if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) - return false; - if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, - &wifi_connected)) - return false; - if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) - return false; - - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); - - if (wifi_connected) { - if (bt_hs_on) { - if (bt_hs_rssi > 37) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Need to decrease bt power for HS mode!!\n"); - ret = true; - } - } else { - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); - ret = true; - } - } - } - return ret; -} - -static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, +static void btc8821a2ant_set_fw_dac_swing_lvl(struct btc_coexist *btcoexist, u8 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -592,185 +655,47 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); } -static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, - bool dec_bt_pwr) +static void btc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = 0; - - if (dec_bt_pwr) - h2c_parameter[0] |= BIT1; + h2c_parameter[0] = dec_bt_pwr_lvl; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", - (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n", + dec_bt_pwr_lvl, h2c_parameter[0]); btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); } -static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, - bool force_exec, bool dec_bt_pwr) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s Dec BT power = %s\n", - (force_exec ? "force to" : ""), - ((dec_bt_pwr) ? "ON" : "OFF")); - coex_dm->cur_dec_bt_pwr = dec_bt_pwr; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", - coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); - - if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) - return; - } - halbtc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); - - coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; -} - -static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, - bool bt_lna_cons_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[2] = {0}; - - h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */ - - if (bt_lna_cons_on) - h2c_parameter[1] |= BIT0; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", - bt_lna_cons_on ? "ON!!" : "OFF!!", - h2c_parameter[0] << 8 | h2c_parameter[1]); - - btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); -} - -static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, - bool force_exec, bool bt_lna_cons_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s BT Constrain = %s\n", - (force_exec ? "force" : ""), - ((bt_lna_cons_on) ? "ON" : "OFF")); - coex_dm->cur_bt_lna_constrain = bt_lna_cons_on; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", - coex_dm->pre_bt_lna_constrain, - coex_dm->cur_bt_lna_constrain); - - if (coex_dm->pre_bt_lna_constrain == - coex_dm->cur_bt_lna_constrain) - return; - } - btc8821a2ant_set_fw_bt_lna_constr(btcoexist, - coex_dm->cur_bt_lna_constrain); - - coex_dm->pre_bt_lna_constrain = coex_dm->cur_bt_lna_constrain; -} - -static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, - u8 bt_psd_mode) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[2] = {0}; - - h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */ - - h2c_parameter[1] = bt_psd_mode; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", - h2c_parameter[1], - h2c_parameter[0] << 8 | h2c_parameter[1]); - - btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); -} - -static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, - bool force_exec, u8 bt_psd_mode) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s BT PSD mode = 0x%x\n", - (force_exec ? "force" : ""), bt_psd_mode); - coex_dm->cur_bt_psd_mode = bt_psd_mode; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", - coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); - - if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode) - return; - } - halbtc8821a2ant_set_fw_bt_psd_mode(btcoexist, - coex_dm->cur_bt_psd_mode); - - coex_dm->pre_bt_psd_mode = coex_dm->cur_bt_psd_mode; -} - -static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist, - bool enable_auto_report) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[1] = {0}; - - h2c_parameter[0] = 0; - - if (enable_auto_report) - h2c_parameter[0] |= BIT0; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", - (enable_auto_report ? "Enabled!!" : "Disabled!!"), - h2c_parameter[0]); - - btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); -} - -static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist, - bool force_exec, - bool enable_auto_report) +static void btc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, u8 dec_bt_pwr_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s BT Auto report = %s\n", - (force_exec ? "force to" : ""), - ((enable_auto_report) ? "Enabled" : "Disabled")); - coex_dm->cur_bt_auto_report = enable_auto_report; + "[BTCoex], %s Dec BT power level = %u\n", + (force_exec ? "force to" : ""), dec_bt_pwr_lvl); + coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl; if (!force_exec) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", - coex_dm->pre_bt_auto_report, - coex_dm->cur_bt_auto_report); + "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n", + coex_dm->pre_dec_bt_pwr_lvl, + coex_dm->cur_dec_bt_pwr_lvl); - if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl) return; } - halbtc8821a2ant_set_bt_auto_report(btcoexist, - coex_dm->cur_bt_auto_report); + btc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr_lvl); - coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; + coex_dm->pre_dec_bt_pwr_lvl = coex_dm->cur_dec_bt_pwr_lvl; } -static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, - bool force_exec, - u8 fw_dac_swing_lvl) +static void btc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -790,66 +715,14 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, return; } - btc8821a2ant_set_fw_dac_swing_lev(btcoexist, + btc8821a2ant_set_fw_dac_swing_lvl(btcoexist, coex_dm->cur_fw_dac_swing_lvl); coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; } -static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, - bool rx_rf_shrink_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (rx_rf_shrink_on) { - /* Shrink RF Rx LPF corner */ - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Shrink RF Rx LPF corner!!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, - 0xfffff, 0xffffc); - } else { - /* Resume RF Rx LPF corner - * After initialized, we can use coex_dm->bt_rf0x1e_backup - */ - if (btcoexist->initilized) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Resume RF Rx LPF corner!!\n"); - btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, - 0x1e, 0xfffff, - coex_dm->bt_rf0x1e_backup); - } - } -} - -static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, - bool force_exec, bool rx_rf_shrink_on) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s turn Rx RF Shrink = %s\n", - (force_exec ? "force to" : ""), - ((rx_rf_shrink_on) ? "ON" : "OFF")); - coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", - coex_dm->pre_rf_rx_lpf_shrink, - coex_dm->cur_rf_rx_lpf_shrink); - - if (coex_dm->pre_rf_rx_lpf_shrink == - coex_dm->cur_rf_rx_lpf_shrink) - return; - } - btc8821a2ant_set_sw_rf_rx_lpf_corner(btcoexist, - coex_dm->cur_rf_rx_lpf_shrink); - - coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; -} - -static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, - bool low_penalty_ra) +static void btc8821a2ant_set_sw_penalty_tx_rate_adaptive( + struct btc_coexist *btcoexist, bool low_penalty_ra) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[6] = {0}; @@ -858,14 +731,14 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, if (low_penalty_ra) { h2c_parameter[1] |= BIT0; - /*normal rate except MCS7/6/5, OFDM54/48/36 */ + /* normal rate except MCS7/6/5, OFDM54/48/36 */ h2c_parameter[2] = 0x00; - /*MCS7 or OFDM54 */ - h2c_parameter[3] = 0xf7; - /*MCS6 or OFDM48 */ - h2c_parameter[4] = 0xf8; - /*MCS5 or OFDM36 */ - h2c_parameter[5] = 0xf9; + /* MCS7 or OFDM54 */ + h2c_parameter[3] = 0xf5; + /* MCS6 or OFDM48 */ + h2c_parameter[4] = 0xa0; + /* MCS5 or OFDM36 */ + h2c_parameter[5] = 0xa0; } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -875,12 +748,11 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); } -static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, - bool force_exec, bool low_penalty_ra) +static void btc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) { struct rtl_priv *rtlpriv = btcoexist->adapter; - /*return;*/ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s turn LowPenaltyRA = %s\n", (force_exec ? "force to" : ""), @@ -891,19 +763,19 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", coex_dm->pre_low_penalty_ra, - coex_dm->cur_low_penalty_ra); + coex_dm->cur_low_penalty_ra); if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) return; } - btc8821a2ant_SetSwPenTxRateAdapt(btcoexist, + btc8821a2ant_set_sw_penalty_tx_rate_adaptive(btcoexist, coex_dm->cur_low_penalty_ra); coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; } -static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, - u32 level) +static void btc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, + u32 level) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 val = (u8)level; @@ -918,14 +790,14 @@ static void btc8821a2ant_set_sw_full_dac_swing(struct btc_coexist *btcoexist, u32 sw_dac_swing_lvl) { if (sw_dac_swing_on) - halbtc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); + btc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); else - halbtc8821a2ant_set_dac_swing_reg(btcoexist, 0x18); + btc8821a2ant_set_dac_swing_reg(btcoexist, 0x18); } -static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, - bool force_exec, bool dac_swing_on, - u32 dac_swing_lvl) +static void btc8821a2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -958,50 +830,9 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; } -static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, - bool adc_back_off) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (adc_back_off) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BB BackOff Level On!\n"); - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], BB BackOff Level Off!\n"); - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); - } -} - -static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist, - bool force_exec, bool adc_back_off) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], %s turn AdcBackOff = %s\n", - (force_exec ? "force to" : ""), - ((adc_back_off) ? "ON" : "OFF")); - coex_dm->cur_adc_back_off = adc_back_off; - - if (!force_exec) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", - coex_dm->pre_adc_back_off, - coex_dm->cur_adc_back_off); - - if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) - return; - } - halbtc8821a2ant_set_adc_back_off(btcoexist, coex_dm->cur_adc_back_off); - - coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off; -} - -static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, - u32 val0x6c0, u32 val0x6c4, - u32 val0x6c8, u8 val0x6cc) +static void btc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1022,9 +853,9 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); } -static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, - bool force_exec, u32 val0x6c0, - u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) +static void btc8821a2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1057,8 +888,8 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) return; } - halbtc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8, - val0x6cc); + btc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8, + val0x6cc); coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; @@ -1066,14 +897,97 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; } -static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, - bool enable) +static void btc8821a2ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_sta->coex_table_type = type; + + switch (type) { + case 0: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffffff, 0x3); + break; + case 1: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5afa5afa, 0xffffff, 0x3); + break; + case 2: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x5ada5ada, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 3: + btc8821a2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 4: + btc8821a2ant_coex_table(btcoexist, force_exec, 0xffffffff, + 0xffffffff, 0xffffff, 0x3); + break; + case 5: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5fff5fff, 0xffffff, 0x3); + break; + case 6: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 7: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 8: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 9: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 10: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 11: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 12: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 13: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 14: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5ada5ada, 0xffffff, 0x3); + break; + case 15: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x55dd55dd, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 16: + btc8821a2ant_coex_table(btcoexist, force_exec, 0x5fdf5fdf, + 0x5fdb5fdb, 0xffffff, 0x3); + break; + case 17: + btc8821a2ant_coex_table(btcoexist, force_exec, 0xfafafafa, + 0xfafafafa, 0xffffff, 0x3); + break; + default: + break; + } +} + +static void btc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, + bool enable) { struct rtl_priv *rtlpriv = btcoex->adapter; u8 h2c_parameter[1] = {0}; if (enable) - h2c_parameter[0] |= BIT0;/* function enable */ + h2c_parameter[0] |= BIT0; /* function enable */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", @@ -1082,8 +996,35 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter); } -static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, - bool force_exec, bool enable) +static void btc8821a2ant_set_lps_rpwm(struct btc_coexist *btcoexist, u8 lps_val, + u8 rpwm_val) +{ + u8 lps = lps_val; + u8 rpwm = rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); +} + +static void btc8821a2ant_lps_rpwm(struct btc_coexist *btcoexist, + bool force_exec, u8 lps_val, u8 rpwm_val) +{ + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if (!force_exec) { + if ((coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) + return; + } + btc8821a2ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + +static void btc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -1102,14 +1043,14 @@ static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, coex_dm->cur_ignore_wlan_act) return; } - halbtc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable); + btc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable); coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; } -static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, - u8 byte1, u8 byte2, u8 byte3, - u8 byte4, u8 byte5) +static void btc8821a2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, + u8 byte1, u8 byte2, u8 byte3, + u8 byte4, u8 byte5) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 h2c_parameter[5]; @@ -1137,45 +1078,24 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); } -static void btc8821a2ant_sw_mech1(struct btc_coexist *btcoexist, - bool shrink_rx_lpf, - bool low_penalty_ra, bool limited_dig, - bool bt_lna_constrain) +static void btc8821a2ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool bt_lna_constrain) { - u32 wifi_bw; - - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - - if (BTC_WIFI_BW_HT40 != wifi_bw) { - /*only shrink RF Rx LPF for HT40*/ - if (shrink_rx_lpf) - shrink_rx_lpf = false; - } - - halbtc8821a2ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); - halbtc8821a2ant_low_penalty_ra(btcoexist, - NORMAL_EXEC, low_penalty_ra); - - /* no limited DIG - * btc8821a2_set_bt_lna_const(btcoexist, - NORMAL_EXEC, bBTLNAConstrain); - */ + btc8821a2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); } -static void btc8821a2ant_sw_mech2(struct btc_coexist *btcoexist, - bool agc_table_shift, - bool adc_back_off, bool sw_dac_swing, - u32 dac_swing_lvl) +static void btc8821a2ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_back_off, + bool sw_dac_swing, u32 dac_swing_lvl) { - /* halbtc8821a2ant_AgcTable(btcoexist, NORMAL_EXEC, bAGCTableShift); */ - halbtc8821a2ant_adc_back_off(btcoexist, NORMAL_EXEC, adc_back_off); - halbtc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, - sw_dac_swing); + btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swing_lvl); } -static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, - u8 ant_pos_type, bool init_hw_cfg, - bool wifi_off) +static void btc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) { struct btc_board_info *board_info = &btcoexist->board_info; u32 u4tmp = 0; @@ -1189,21 +1109,18 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0x4c, u4tmp); btcoexist->btc_write_4byte(btcoexist, 0x974, 0x3ff); - btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - /* tell firmware "antenna inverse" ==> - * WRONG firmware antenna control code. - * ==>need fw to fix + /* tell firmware "antenna inverse" ==> WRONG firmware + * antenna control code ==>need fw to fix */ h2c_parameter[0] = 1; h2c_parameter[1] = 1; btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, h2c_parameter); } else { - /* tell firmware "no antenna inverse" - * ==> WRONG firmware antenna control code. - * ==>need fw to fix + /* tell firmware "no antenna inverse" ==> WRONG firmware + * antenna control code ==>need fw to fix */ h2c_parameter[0] = 0; h2c_parameter[1] = 1; @@ -1223,11 +1140,25 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, } } -static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, - bool force_exec, bool turn_on, u8 type) +static void btc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 wifi_rssi_state, bt_rssi_state; + + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, + BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); + + if (!(BTC_RSSI_HIGH(wifi_rssi_state) && + BTC_RSSI_HIGH(bt_rssi_state)) && + turn_on) { + /* for WiFi RSSI low or BT RSSI low */ + type = type + 100; + } + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], %s turn %s PS TDMA, type = %d\n", (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), @@ -1251,108 +1182,181 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, switch (type) { case 1: default: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, 0x90); break; case 2: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, - 0x12, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d, + 0x03, 0xf1, 0x90); break; case 3: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c, - 0x3, 0xf1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); break; case 4: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x10, - 0x03, 0xf1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x03, 0xf1, 0x90); break; case 5: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x3, 0x70, 0x90); break; case 6: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, - 0x12, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d, + 0x3, 0x70, 0x90); break; case 7: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c, - 0x3, 0x70, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); break; case 8: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x10, - 0x3, 0x70, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); break; case 9: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, 0x90); break; case 10: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, - 0x12, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d, + 0x03, 0xf1, 0x90); break; case 11: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, - 0xa, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); break; case 12: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, - 0x5, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, 0x90); break; case 13: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x3, 0x70, 0x90); break; case 14: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, - 0x12, 0x12, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x2d, + 0x3, 0x70, 0x90); break; case 15: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, - 0xa, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); break; case 16: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, - 0x5, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x3, 0x70, 0x90); break; case 17: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x2f, - 0x2f, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f, + 0x2f, 0x60, 0x90); break; case 18: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, - 0x5, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, 0x5, + 0xe1, 0x90); break; case 19: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, - 0x25, 0xe1, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); break; case 20: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, - 0x25, 0x60, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); break; case 21: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x15, - 0x03, 0x70, 0x90); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); break; - case 71: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, - 0x1a, 0xe1, 0x90); + case 23: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1e, + 0x03, 0xf0, 0x14); break; - } - } else { - /* disable PS tdma */ - switch (type) { - case 0: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x40, 0x0); + case 24: + case 124: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3c, + 0x03, 0x70, 0x50); + break; + case 25: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x14, + 0x03, 0xf1, 0x90); + break; + case 26: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x30, + 0x03, 0xf1, 0x90); + break; + case 71: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, 0x90); + break; + case 101: + case 105: + case 171: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x3a, + 0x03, 0x70, 0x50); + break; + case 102: + case 106: + case 110: + case 114: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x2d, + 0x03, 0x70, 0x50); + break; + case 103: + case 107: + case 111: + case 115: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c, + 0x03, 0x70, 0x50); + break; + case 104: + case 108: + case 112: + case 116: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x10, + 0x03, 0x70, 0x50); + break; + case 109: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0xf1, 0x90); + break; + case 113: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x3c, + 0x03, 0x70, 0x90); + break; + case 121: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 22: + case 122: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x35, + 0x03, 0x71, 0x11); + break; + case 123: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1c, + 0x03, 0x70, 0x54); + break; + case 125: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x14, + 0x03, 0x70, 0x50); + break; + case 126: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x30, + 0x03, 0x70, 0x50); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + case 0: + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); break; case 1: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x48, 0x0); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x48, 0x0); break; default: - halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, - 0x40, 0x0); + btc8821a2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); break; } } @@ -1362,867 +1366,450 @@ static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; } -static void halbtc8821a2ant_coex_all_off(struct btc_coexist *btcoexist) +static void +btc8821a2ant_ps_tdma_check_for_power_save_state(struct btc_coexist *btcoexist, + bool new_ps_state) +{ + u8 lps_mode = 0x0; + + btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); + + if (lps_mode) { + /* already under LPS state */ + if (new_ps_state) { + /* keep state under LPS, do nothing */ + } else { + /* will leave LPS state, turn off psTdma first */ + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } + } else { + /* NO PS state */ + if (new_ps_state) { + /* will enter LPS state, turn off psTdma first */ + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } else { + /* keep state under NO PS state, do nothing */ + } + } +} + +static void btc8821a2ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, u8 rpwm_val) +{ + bool low_pwr_disable = false; + + switch (ps_type) { + case BTC_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + coex_sta->force_lps_on = false; + break; + case BTC_PS_LPS_ON: + btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist, + true); + btc8821a2ant_lps_rpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power */ + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + /* power save must executed before psTdma */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + coex_sta->force_lps_on = true; + break; + case BTC_PS_LPS_OFF: + btc8821a2ant_ps_tdma_check_for_power_save_state(btcoexist, + false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + coex_sta->force_lps_on = false; + break; + default: + break; + } +} + +static void btc8821a2ant_coex_all_off(struct btc_coexist *btcoexist) { /* fw all off */ - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); /* sw all off */ - btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); /* hw all off */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, - 0x55555555, 0x55555555, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); } -static void halbtc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist) +static void btc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist) { - halbtc8821a2ant_coex_all_off(btcoexist); + btc8821a2ant_coex_all_off(btcoexist); + btc8821a2ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true); } -static void halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) +static void btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) { /* force to reset coex mechanism */ - halbtc8821a2ant_coex_table(btcoexist, FORCE_EXEC, 0x55555555, - 0x55555555, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); - halbtc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, 0); - btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); } -static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist) { + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + bool wifi_connected = false; bool low_pwr_disable = true; + bool scan = false, link = false, roam = false; + + wifi_rssi_state = + btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); - - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); -} - -static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - bool common = false, wifi_connected = false, wifi_busy = false; - bool low_pwr_disable = false; - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); - btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - if (!wifi_connected && - BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) { - low_pwr_disable = false; - btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, - &low_pwr_disable); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + if (scan || link || roam) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi link process + BT Inq/Page!!\n"); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + } else if (wifi_connected) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT Inq/Page!!\n"); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi IPS + BT IPS!!\n"); + "[BTCoex], Wifi no-link + BT Inq/Page!!\n"); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); +} - common = true; - } else if (wifi_connected && - (BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)) { - low_pwr_disable = false; - btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, - &low_pwr_disable); +void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 u8tmpa, u8tmpb; - if (wifi_busy) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi Busy + BT IPS!!\n"); - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi LPS + BT IPS!!\n"); - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); - btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765); + u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e); - common = true; - } else if (!wifi_connected && - (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) { - low_pwr_disable = true; - btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, - &low_pwr_disable); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb); +} - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi IPS + BT LPS!!\n"); +static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 ap_num = 0; - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + wifi_rssi_state = + btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES - 20, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); - common = true; - } else if (wifi_connected && - (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) { - low_pwr_disable = true; - btcoexist->btc_set(btcoexist, - BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); - if (wifi_busy) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi Busy + BT LPS!!\n"); - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi LPS + BT LPS!!\n"); - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } + /* define the office environment */ + if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) && + (coex_sta->a2dp_exist)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n"); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btc8821a2ant_sw_mech1(btcoexist, true, true, true, true); - btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + /* sw all off */ + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, + false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); - common = true; - } else if (!wifi_connected && - (BT_8821A_2ANT_BT_STATUS_NON_IDLE == - coex_dm->bt_status)) { - low_pwr_disable = false; - btcoexist->btc_set(btcoexist, - BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + return true; + } else if (coex_sta->pan_exist) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi IPS + BT Busy!!\n"); - - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); - - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + "[BTCoex], Wifi idle process for BT PAN exist!!\n"); - common = true; - } else { - low_pwr_disable = true; - btcoexist->btc_set(btcoexist, - BTC_SET_ACT_DISABLE_LOW_POWER, - &low_pwr_disable); + btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - if (wifi_busy) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi Busy + BT Busy!!\n"); - common = false; - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], Wifi LPS + BT Busy!!\n"); - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, true, 21); + /* sw all off */ + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, + false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, true); - else - halbtc8821a2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, false); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - common = true; - } - btc8821a2ant_sw_mech1(btcoexist, true, true, true, true); + return true; } - return common; + btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x18); + return false; } -static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, - int result) +static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; + bool common = false, wifi_connected = false, wifi_busy = false; + bool low_pwr_disable = false; + bool bt_hs_on = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, + 0x8); - if (tx_pause) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - - if (coex_dm->cur_ps_tdma == 71) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } + "[BTCoex], Wifi non-connected idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, + false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; - } - } + common = true; } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } + if (BT_8821A_2ANT_BT_STATUS_IDLE == + coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, + false, false, 0x8); - if (result == -1) { - if (coex_dm->cur_ps_tdma == 71) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; - } else if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 71); - coex_dm->tdma_adj_type = 71; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; - } - } - } -} + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); -static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, - int result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + btc8821a2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; - } - } - } -} + btc8821a2ant_power_save_state( + btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); -static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, - int result) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); - if (tx_pause) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 1\n"); - if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } - if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 8); - coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 16); - coex_dm->tdma_adj_type = 16; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; - } - } - } else { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], TxPause = 0\n"); - if (coex_dm->cur_ps_tdma == 5) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 6) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 7) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 8) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } - if (coex_dm->cur_ps_tdma == 13) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 14) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 15) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 16) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - if (result == -1) { - if (coex_dm->cur_ps_tdma == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 4); - coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 9) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 12); - coex_dm->tdma_adj_type = 12; - } - } else if (result == 1) { - if (coex_dm->cur_ps_tdma == 4) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 3) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; - } else if (coex_dm->cur_ps_tdma == 12) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 11) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; - } else if (coex_dm->cur_ps_tdma == 10) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; + common = true; + } else if (BT_8821A_2ANT_BT_STATUS_CON_IDLE == + coex_dm->bt_status) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (bt_hs_on) + return false; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, + false, false, 0x8); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + btc8821a2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + + btc8821a2ant_power_save_state( + btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + common = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + common = false; + } else { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + common = + btc8821a2ant_action_wifi_idle_process( + btcoexist); } } } + return common; } -static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, - bool sco_hid, bool tx_pause, - u8 max_interval) +static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) { struct rtl_priv *rtlpriv = btcoexist->adapter; - static long up, dn, m, n, wait_count; - /* 0: no change, +1: increase WiFi duration, + static long up, dn, m, n, wait_count; + /* 0 : no change + * +1: increase WiFi duration * -1: decrease WiFi duration */ - int result; - u8 retry_count = 0; + int result; + u8 retry_count = 0; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], TdmaDurationAdjust()\n"); - if (coex_dm->reset_tdma_adjust) { - coex_dm->reset_tdma_adjust = false; + if (coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], first run TdmaDurationAdjust()!!\n"); if (sco_hid) { if (tx_pause) { if (max_interval == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 13); - coex_dm->tdma_adj_type = 13; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 13); + coex_dm->ps_tdma_du_adj_type = 13; } else if (max_interval == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 14); - coex_dm->tdma_adj_type = 14; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (max_interval == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; } else { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 15); - coex_dm->tdma_adj_type = 15; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; } } else { if (max_interval == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 9); - coex_dm->tdma_adj_type = 9; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; } else if (max_interval == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 10); - coex_dm->tdma_adj_type = 10; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (max_interval == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; } else { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 11); - coex_dm->tdma_adj_type = 11; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; } } } else { if (tx_pause) { if (max_interval == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 5); - coex_dm->tdma_adj_type = 5; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 5); + coex_dm->ps_tdma_du_adj_type = 5; } else if (max_interval == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 6); - coex_dm->tdma_adj_type = 6; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (max_interval == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; } else { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 7); - coex_dm->tdma_adj_type = 7; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; } } else { if (max_interval == 1) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 1); - coex_dm->tdma_adj_type = 1; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 1); + coex_dm->ps_tdma_du_adj_type = 1; } else if (max_interval == 2) { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 2); - coex_dm->tdma_adj_type = 2; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (max_interval == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; } else { - halbtc8821a2ant_ps_tdma(btcoexist, - NORMAL_EXEC, - true, 3); - coex_dm->tdma_adj_type = 3; + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; } } } @@ -2273,7 +1860,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, up = 0; if (dn == 2) { - /* if retry count< 3 for 2*2 seconds, + /* if retry count < 3 for 2*2 seconds, * shrink wifi duration */ if (wait_count <= 2) @@ -2286,7 +1873,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (m >= 20) m = 20; - n = 3*m; + n = 3 * m; up = 0; dn = 0; wait_count = 0; @@ -2308,7 +1895,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, if (m >= 20) m = 20; - n = 3*m; + n = 3 * m; up = 0; dn = 0; wait_count = 0; @@ -2316,627 +1903,1316 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); } - - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], max Interval = %d\n", max_interval); - if (max_interval == 1) - btc8821a2_int1(btcoexist, tx_pause, result); - else if (max_interval == 2) - btc8821a2_int2(btcoexist, tx_pause, result); - else if (max_interval == 3) - btc8821a2_int3(btcoexist, tx_pause, result); + + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], max Interval = %d\n", max_interval); + + if (max_interval == 1) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 71) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = + 5; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = + 13; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 71); + coex_dm->ps_tdma_du_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = + 71; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = + 9; + } + } + } + } else if (max_interval == 2) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } + } + } + } else if (max_interval == 3) { + if (tx_pause) { + if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } + } + } else { + if (coex_dm->cur_ps_tdma == 5) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8821a2ant_ps_tdma( + btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } + } + } + } } /* if current PsTdma not match with the recorded one * (when scan, dhcp...), then we have to adjust it back to * the previous recorded one. */ - if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { - bool scan = false, link = false, roam = false; + if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { + bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", - coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); if (!scan && !link && !roam) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, - coex_dm->tdma_adj_type); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->ps_tdma_du_adj_type); } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); } } - - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0x6); } /* SCO only or SCO+PAN(HS)*/ -static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_sco(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + u8 wifi_rssi_state, bt_rssi_state; u32 wifi_bw; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { + if (wifi_bw == BTC_WIFI_BW_LEGACY) { /* for SCO quality at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, - 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); } else { /* for SCO quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, - 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + if (wifi_bw == BTC_WIFI_BW_HT40) { + btc8821a2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 8); + } else { + if (bt_link_info->sco_only) + btc8821a2ant_coex_table_with_type( + btcoexist, NORMAL_EXEC, 17); + else + btc8821a2ant_coex_table_with_type( + btcoexist, NORMAL_EXEC, 12); + } } - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism - * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - */ - - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + /* for voice quality */ + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); - /* sw mechanism */ + /* sw mechanism */ + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + true, 0x18); } } else { - /* fw mechanism - * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + true, 0x18); } } } -static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_hid(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; - u32 wifi_bw; + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { + if (wifi_bw == BTC_WIFI_BW_LEGACY) { /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); } else { /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5aea5aea, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); } - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - } + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 24); + if (wifi_bw == BTC_WIFI_BW_HT40) { /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 9); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 13); - } - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ -static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; - u32 wifi_bw; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 ap_num = 0; + u32 wifi_bw; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - /* fw dac swing is called in btc8821a2ant_tdma_dur_adj() - * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - */ + if ((ap_num >= 10) && BTC_RSSI_HIGH(wifi_rssi_state1) && + BTC_RSSI_HIGH(bt_rssi_state)) { + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); - else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, + 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (wifi_bw == BTC_WIFI_BW_HT40) { + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); } else { - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); } + return; + } - /* sw mechanism */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + else + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23); + } else { + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23); + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1); - } else { - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); - } - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; - u32 wifi_bw; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u32 wifi_bw; - bt_info_ext = coex_sta->bt_info_ext; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - /*fw dac swing is called in btc8821a2ant_tdma_dur_adj() - *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if (bt_info_ext&BIT0) { - /*a2dp basic rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2); - } else { - /*a2dp edr rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); - } + btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 2); - /* sw mechanism */ + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); - } - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; - u32 wifi_bw; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u32 wifi_bw; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); - else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5aff5aff, 0xffff, 0x3); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5aff5aff, 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); } - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - } + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26); + else + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26); - /* sw mechanism */ + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 1); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 5); - } - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* PAN(HS) only */ -static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; - u32 wifi_bw; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u32 wifi_bw; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, - true); - } else { - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, - false); - } - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + else + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - /* sw mechanism */ + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, true); - } else { - halbtc8821a2ant_dec_bt_pwr(btcoexist, - NORMAL_EXEC, false); - } - - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } /* PAN(EDR)+A2DP */ -static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; - u32 wifi_bw; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u32 wifi_bw; - bt_info_ext = coex_sta->bt_info_ext; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); + else + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(wifi_rssi_state1) && BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 12); - if (BTC_WIFI_BW_HT40 == wifi_bw) { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); + if (wifi_bw == BTC_WIFI_BW_HT40) + btc8821a2ant_tdma_duration_adjust(btcoexist, false, + true, 3); else - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); + btc8821a2ant_tdma_duration_adjust(btcoexist, false, + false, 3); + } else { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 13); + btc8821a2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } - /* sw mechanism */ + /* sw mechanism */ + if (wifi_bw == BTC_WIFI_BW_HT40) { if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) - btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3); - else - btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3); - - /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, false, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; - u32 wifi_bw; + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); + if (wifi_bw == BTC_WIFI_BW_LEGACY) { + /* for HID at 11b/g mode */ + btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + } - if (BTC_WIFI_BW_HT40 == wifi_bw) { - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); + if (wifi_bw == BTC_WIFI_BW_HT40) { + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 10); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } @@ -2944,42 +3220,70 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) /* HID+A2DP+PAN(EDR) */ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; - u32 wifi_bw; + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; bt_info_ext = coex_sta->bt_info_ext; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); - halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 2); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); + if (wifi_bw == BTC_WIFI_BW_LEGACY) { + /* for HID at 11b/g mode */ + btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + } if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3); + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, 3); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, 3); + } + } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { /* fw mechanism */ @@ -2987,103 +3291,183 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { if (bt_info_ext&BIT0) { /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - false, 3); + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, false, 3); } else { /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - false, 3); + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, false, 3); } } else { if (bt_info_ext&BIT0) { /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 3); } else { /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 3); } } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; - u32 wifi_bw; + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; bt_info_ext = coex_sta->bt_info_ext; - wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, - 15, 0); - bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); + wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0); - if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + if (BTC_RSSI_HIGH(bt_rssi_state)) + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); else - halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5f5b5f5b, 0xffffff, 0x3); + if (wifi_bw == BTC_WIFI_BW_LEGACY) { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } else { + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 14); + btc8821a2ant_power_save_state(btcoexist, BTC_PS_LPS_ON, 0x50, + 0x4); + } if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext & BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } + } else { + if (bt_info_ext & BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } + } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, true, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } else { /* fw mechanism */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext & BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } + } else { + if (bt_info_ext & BIT0) { + /*a2dp basic rate*/ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } else { + /*a2dp edr rate*/ + btc8821a2ant_tdma_duration_adjust(btcoexist, + true, true, + 2); + } + } /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, true, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); } else { - btc8821a2ant_sw_mech1(btcoexist, false, true, - false, false); - btc8821a2ant_sw_mech2(btcoexist, false, false, - false, 0x18); + btc8821a2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); } } } -static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +static void btc8821a2ant_action_wifi_multi_port(struct btc_coexist *btcoexist) +{ + btc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0); + + /* sw all off */ + btc8821a2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + + btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); +} + +static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - bool wifi_under_5g = false; - u8 algorithm = 0; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_under_5g = false; + u8 algorithm = 0; + u32 num_of_wifi_link = 0; + u32 wifi_link_status = 0; + bool miracast_plus_bt = false; + bool scan = false, link = false, roam = false; if (btcoexist->manual_control) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3091,30 +3475,73 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) return; } - btcoexist->btc_get(btcoexist, - BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); if (wifi_under_5g) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); - halbtc8821a2ant_coex_under_5g(btcoexist); + btc8821a2ant_coex_under_5g(btcoexist); + return; + } + + if (coex_sta->under_ips) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], wifi is under IPS !!!\n"); return; } - algorithm = halbtc8821a2ant_action_algorithm(btcoexist); + algorithm = btc8821a2ant_action_algorithm(btcoexist); if (coex_sta->c2h_bt_inquiry_page && (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], BT is under inquiry/page scan !!\n"); - halbtc8821a2ant_bt_inquiry_page(btcoexist); + btc8821a2ant_action_bt_inquiry(btcoexist); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "[BTCoex], WiFi is under Link Process !!\n"); + btc8821a2ant_action_wifi_link_process(btcoexist); + return; + } + + /* for P2P */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + + if ((num_of_wifi_link >= 2) || + (wifi_link_status & WIFI_P2P_GO_CONNECTED)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n", + num_of_wifi_link, wifi_link_status); + + if (bt_link_info->bt_link_exist) + miracast_plus_bt = true; + else + miracast_plus_bt = false; + + btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT, + &miracast_plus_bt); + btc8821a2ant_action_wifi_multi_port(btcoexist); + return; } + miracast_plus_bt = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_MIRACAST_PLUS_BT, + &miracast_plus_bt); + coex_dm->cur_algorithm = algorithm; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); - if (halbtc8821a2ant_is_common_action(btcoexist)) { + if (btc8821a2ant_is_common_action(btcoexist)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant common\n"); coex_dm->reset_tdma_adjust = true; @@ -3130,42 +3557,42 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) case BT_8821A_2ANT_COEX_ALGO_SCO: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = SCO\n"); - halbtc8821a2ant_action_sco(btcoexist); + btc8821a2ant_action_sco(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = HID\n"); - halbtc8821a2ant_action_hid(btcoexist); + btc8821a2ant_action_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = A2DP\n"); - halbtc8821a2ant_action_a2dp(btcoexist); + btc8821a2ant_action_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n"); - halbtc8821a2ant_action_a2dp_pan_hs(btcoexist); + btc8821a2ant_action_a2dp_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n"); - halbtc8821a2ant_action_pan_edr(btcoexist); + btc8821a2ant_action_pan_edr(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANHS: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = HS mode\n"); - halbtc8821a2ant_action_pan_hs(btcoexist); + btc8821a2ant_action_pan_hs(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n"); - halbtc8821a2ant_action_pan_edr_a2dp(btcoexist); + btc8821a2ant_action_pan_edr_a2dp(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n"); - halbtc8821a2ant_action_pan_edr_hid(btcoexist); + btc8821a2ant_action_pan_edr_hid(btcoexist); break; case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3175,26 +3602,22 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) case BT_8821A_2ANT_COEX_ALGO_HID_A2DP: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n"); - halbtc8821a2ant_action_hid_a2dp(btcoexist); + btc8821a2ant_action_hid_a2dp(btcoexist); break; default: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); - halbtc8821a2ant_coex_all_off(btcoexist); + btc8821a2ant_coex_all_off(btcoexist); break; } coex_dm->pre_algorithm = coex_dm->cur_algorithm; } } -/*============================================================ - *work around function start with wa_halbtc8821a2ant_ - *============================================================ - *============================================================ - * extern function start with EXhalbtc8821a2ant_ - *============================================================ - */ -void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) +/************************************************************** + * extern function start with ex_btc8821a2ant_ + **************************************************************/ +void ex_btc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u1tmp = 0; @@ -3212,36 +3635,30 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) u1tmp |= 0x5; btcoexist->btc_write_1byte(btcoexist, 0x790, u1tmp); - /*Antenna config */ - halbtc8821a2ant_set_ant_path(btcoexist, - BTC_ANT_WIFI_AT_MAIN, true, false); + /* Antenna config */ + btc8821a2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, true, false); /* PTA parameter */ - halbtc8821a2ant_coex_table(btcoexist, - FORCE_EXEC, 0x55555555, 0x55555555, - 0xffff, 0x3); + btc8821a2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); /* Enable counter statistics */ - /*0x76e[3] = 1, WLAN_Act control by PTA*/ - btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + /* 0x76e[3] = 1, WLAN_Act control by PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); } -void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) +void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Coex Mechanism Init!!\n"); - halbtc8821a2ant_init_coex_dm(btcoexist); + btc8821a2ant_init_coex_dm(btcoexist); } -void -ex_halbtc8821a2ant_display_coex_info( - struct btc_coexist *btcoexist - ) +void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; @@ -3397,7 +3814,7 @@ ex_halbtc8821a2ant_display_coex_info( RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", - coex_dm->cur_dec_bt_pwr, + coex_dm->cur_dec_bt_pwr_lvl, coex_dm->cur_ignore_wlan_act); } @@ -3475,7 +3892,7 @@ ex_halbtc8821a2ant_display_coex_info( btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); } -void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3483,16 +3900,15 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; - halbtc8821a2ant_coex_all_off(btcoexist); + btc8821a2ant_coex_all_off(btcoexist); } else if (BTC_IPS_LEAVE == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; - /*halbtc8821a2ant_init_coex_dm(btcoexist);*/ } } -void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3507,7 +3923,7 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3520,7 +3936,7 @@ void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -3533,13 +3949,14 @@ void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 h2c_parameter[3] = {0}; - u32 wifi_bw; - u8 wifi_central_chnl; + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifi_central_chnl; + u8 ap_num = 0; if (BTC_MEDIA_CONNECT == type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, @@ -3549,7 +3966,7 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, "[BTCoex], MEDIA disconnect notify\n"); } - /* only 2.4G we need to inform bt the chnl mask*/ + /* only 2.4G we need to inform bt the chnl mask */ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl); if ((BTC_MEDIA_CONNECT == type) && @@ -3557,10 +3974,15 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, h2c_parameter[0] = 0x1; h2c_parameter[1] = wifi_central_chnl; btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_HT40 == wifi_bw) + if (wifi_bw == BTC_WIFI_BW_HT40) { h2c_parameter[2] = 0x30; - else + } else { h2c_parameter[2] = 0x20; + if (ap_num < 10) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } } coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; @@ -3576,8 +3998,9 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } -void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, - u8 type) { +void ex_btc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ struct rtl_priv *rtlpriv = btcoexist->adapter; if (type == BTC_PACKET_DHCP) { @@ -3586,19 +4009,18 @@ void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, } } -void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, - u8 *tmp_buf, u8 length) +void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 bt_info = 0; - u8 i, rsp_source = 0; - static u32 set_bt_lna_cnt, set_bt_psd_mode; - bool bt_busy = false, limited_dig = false; - bool wifi_connected = false, bt_hs_on = false; + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false, bt_hs_on = false; coex_sta->c2h_bt_info_req_sent = false; - rsp_source = tmp_buf[0]&0xf; + rsp_source = tmp_buf[0] & 0xf; if (rsp_source >= BT_INFO_SRC_8821A_2ANT_MAX) rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW; coex_sta->bt_info_c2h_cnt[rsp_source]++; @@ -3610,7 +4032,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; if (i == 1) bt_info = tmp_buf[i]; - if (i == length-1) { + if (i == length - 1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "0x%02x]\n", tmp_buf[i]); } else { @@ -3620,7 +4042,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, } if (BT_INFO_SRC_8821A_2ANT_WIFI_FW != rsp_source) { - coex_sta->bt_retry_cnt = /* [3:0]*/ + /* [3:0] */ + coex_sta->bt_retry_cnt = coex_sta->bt_info_c2h[rsp_source][2]&0xf; coex_sta->bt_rssi = @@ -3629,53 +4052,28 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_ext = coex_sta->bt_info_c2h[rsp_source][4]; - /* Here we need to resend some wifi info to BT*/ - /* because bt is reset and loss of the info.*/ + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info + */ if ((coex_sta->bt_info_ext & BIT1)) { btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) { - ex_halbtc8821a2ant_media_status_notify(btcoexist, + ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_CONNECT); } else { - ex_halbtc8821a2ant_media_status_notify(btcoexist, + ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } - set_bt_psd_mode = 0; - } - if (set_bt_psd_mode <= 3) { - halbtc8821a2ant_set_bt_psd_mode(btcoexist, FORCE_EXEC, - 0x0); /*fix CH-BW mode*/ - set_bt_psd_mode++; - } - - if (coex_dm->cur_bt_lna_constrain) { - if (!(coex_sta->bt_info_ext & BIT2)) { - if (set_bt_lna_cnt <= 3) { - btc8821a2_set_bt_lna_const(btcoexist, - FORCE_EXEC, - true); - set_bt_lna_cnt++; - } - } - } else { - set_bt_lna_cnt = 0; } if ((coex_sta->bt_info_ext & BIT3)) { - halbtc8821a2ant_ignore_wlan_act(btcoexist, - FORCE_EXEC, false); + btc8821a2ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, false); } else { /* BT already NOT ignore Wlan active, do nothing here.*/ } - - if ((coex_sta->bt_info_ext & BIT4)) { - /* BT auto report already enabled, do nothing*/ - } else { - halbtc8821a2ant_bt_auto_report(btcoexist, - FORCE_EXEC, true); - } } btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); @@ -3718,8 +4116,7 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE; } - if (bt_hs_on) - coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE; + btc8821a2ant_update_bt_link_info(btcoexist); } if (BT_8821A_2ANT_BT_STATUS_NON_IDLE == coex_dm->bt_status) @@ -3736,27 +4133,27 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); - halbtc8821a2ant_run_coexist_mechanism(btcoexist); + btc8821a2ant_run_coexist_mechanism(btcoexist); } -void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) +void ex_btc8821a2ant_halt_notify(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n"); - halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); - ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); + btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + ex_btc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } -void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist) +void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - static u8 dis_ver_info_cnt; - u32 fw_ver = 0, bt_patch_ver = 0; + static u8 dis_ver_info_cnt; struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; + u32 fw_ver = 0, bt_patch_ver = 0; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], ==========================Periodical===========================\n"); @@ -3785,7 +4182,7 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist) "[BTCoex], ****************************************************************\n"); } - halbtc8821a2ant_query_bt_info(btcoexist); - halbtc8821a2ant_monitor_bt_ctr(btcoexist); - btc8821a2ant_mon_bt_en_dis(btcoexist); + btc8821a2ant_query_bt_info(btcoexist); + btc8821a2ant_monitor_bt_ctr(btcoexist); + btc8821a2ant_monitor_wifi_ctr(btcoexist); } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h index b4cf1f53d5105845b875f3f5ebb9e408dcb7151d..535ca10e910b34e03cd66276f1102b87c668f96f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h @@ -38,6 +38,11 @@ #define BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT 2 +/* WiFi RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */ +#define BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES 42 +/* BT RSSI Threshold for 2-Ant TDMA/1-Ant PS-TDMA translation */ +#define BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES 46 + enum _BT_INFO_SRC_8821A_2ANT { BT_INFO_SRC_8821A_2ANT_WIFI_FW = 0x0, BT_INFO_SRC_8821A_2ANT_BT_RSP = 0x1, @@ -69,8 +74,8 @@ enum _BT_8821A_2ANT_COEX_ALGO { struct coex_dm_8821a_2ant { /* fw mechanism */ - bool pre_dec_bt_pwr; - bool cur_dec_bt_pwr; + bool pre_dec_bt_pwr_lvl; + bool cur_dec_bt_pwr_lvl; bool pre_bt_lna_constrain; bool cur_bt_lna_constrain; u8 pre_bt_psd_mode; @@ -82,8 +87,9 @@ struct coex_dm_8821a_2ant { u8 pre_ps_tdma; u8 cur_ps_tdma; u8 ps_tdma_para[5]; - u8 tdma_adj_type; + u8 ps_tdma_du_adj_type; bool reset_tdma_adjust; + bool auto_tdma_adjust; bool pre_ps_tdma_on; bool cur_ps_tdma_on; bool pre_bt_auto_report; @@ -118,6 +124,10 @@ struct coex_dm_8821a_2ant { u8 cur_algorithm; u8 bt_status; u8 wifi_chnl_info[3]; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; }; struct coex_sta_8821a_2ant { @@ -141,6 +151,19 @@ struct coex_sta_8821a_2ant { bool c2h_bt_inquiry_page; u8 bt_retry_cnt; u8 bt_info_ext; + + u32 crc_ok_cck; + u32 crc_ok_11g; + u32 crc_ok_11n; + u32 crc_ok_11n_agg; + + u32 crc_err_cck; + u32 crc_err_11g; + u32 crc_err_11n; + u32 crc_err_11n_agg; + + u8 coex_table_type; + bool force_lps_on; }; /*=========================================== diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 150aeb8e79d1a4870abbc6e6f8174e3ae7fb6990..f1300061291375dda7084923dd4682d7ad13ce25 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -466,7 +466,7 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) case BTC_SET_ACT_DISABLE_LOW_POWER: halbtc_disable_low_power(); break; - case BTC_SET_ACT_UPDATE_ra_mask: + case BTC_SET_ACT_UPDATE_RAMASK: btcoexist->bt_info.ra_mask = *u32_tmp; break; case BTC_SET_ACT_SEND_MIMO_PS: diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 601bbe1d22b35ff911064c3db5638108eca91f94..c8271135aaaaed26af1ba359374fc1f133542b38 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -66,6 +66,15 @@ #define BTC_ANT_WIFI_AT_CPL_MAIN 0 #define BTC_ANT_WIFI_AT_CPL_AUX 1 +enum btc_bt_reg_type { + BTC_BT_REG_RF = 0, + BTC_BT_REG_MODEM = 1, + BTC_BT_REG_BLUEWIZE = 2, + BTC_BT_REG_VENDOR = 3, + BTC_BT_REG_LE = 4, + BTC_BT_REG_MAX +}; + enum btc_chip_interface { BTC_INTF_UNKNOWN = 0, BTC_INTF_PCI = 1, @@ -139,6 +148,7 @@ struct btc_board_info { u8 pg_ant_num; /* pg ant number */ u8 btdm_ant_num; /* ant number for btdm */ u8 btdm_ant_pos; + u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */ bool bt_exist; }; @@ -205,6 +215,7 @@ enum btc_get_type { BTC_GET_BL_WIFI_ENABLE_ENCRYPTION, BTC_GET_BL_WIFI_UNDER_B_MODE, BTC_GET_BL_EXT_SWITCH, + BTC_GET_BL_WIFI_IS_IN_MP_MODE, /* type s4Byte */ BTC_GET_S4_WIFI_RSSI, @@ -249,6 +260,8 @@ enum btc_set_type { BTC_SET_BL_TO_REJ_AP_AGG_PKT, BTC_SET_BL_BT_CTRL_AGG_SIZE, BTC_SET_BL_INC_SCAN_DEV_NUM, + BTC_SET_BL_BT_TX_RX_MASK, + BTC_SET_BL_MIRACAST_PLUS_BT, /* type u1Byte */ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, @@ -275,7 +288,7 @@ enum btc_set_type { BTC_SET_ACT_NORMAL_LPS, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, BTC_SET_ACT_DISABLE_LOW_POWER, - BTC_SET_ACT_UPDATE_ra_mask, + BTC_SET_ACT_UPDATE_RAMASK, BTC_SET_ACT_SEND_MIMO_PS, /* BT Coex related */ BTC_SET_ACT_CTRL_BT_INFO, @@ -366,6 +379,7 @@ typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data); typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data); +typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data); typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr, u8 bit_mask, u8 data); @@ -388,6 +402,9 @@ typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf); typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf); +typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset, + u32 value); + typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type); struct btc_bt_info { @@ -459,6 +476,7 @@ struct btc_bt_link_info { bool hid_only; bool pan_exist; bool pan_only; + bool slave_role; }; enum btc_antenna_pos { @@ -492,6 +510,7 @@ struct btc_coexist { bfp_btc_w2 btc_write_2byte; bfp_btc_r4 btc_read_4byte; bfp_btc_w4 btc_write_4byte; + bfp_btc_local_reg_w1 btc_write_local_reg_1byte; bfp_btc_set_bb_reg btc_set_bb_reg; bfp_btc_get_bb_reg btc_get_bb_reg; @@ -505,6 +524,8 @@ struct btc_coexist { bfp_btc_get btc_get; bfp_btc_set btc_set; + + bfp_btc_set_bt_reg btc_set_bt_reg; }; bool halbtc_is_wifi_uplink(struct rtl_priv *adapter); diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 558c31bf5c807dda6b56539b39f3bdc3e29292f3..1bf3eb25c1da85a805451e885e51665b85478781 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -435,7 +435,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, channel_plan_to_country_code(rtlpriv->efuse.channel_plan); RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, - "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n", + "rtl: EEPROM regdomain: 0x%0x country code: %d\n", rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code); if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 09c908d4cf91f710e7f6c4c22c04d52baed30d5f..dd3e12b7444703f803afcc1b6ced1236f6fec968 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -444,10 +444,10 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 3616ba21959d1edeca7df314ba2b7a648dab255d..94a4b39437cd1eed6c6c795f1975584e75f3954e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -369,10 +369,10 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (stats->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 1611e42479d97eb9b5f8322f36af119c2747cac5..41422e4da8b70473a64b25e8267e7ccc9a55983a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -329,9 +329,9 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RX_HT(pdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; @@ -398,9 +398,9 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) if (!GET_RX_DESC_SWDEC(rxdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(rxdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RX_HT(rxdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; /* Data rate */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht, false, stats.rate); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 5c9c8741134fb344898008c0cad7b432b7b9f547..86019f654428eda28395defb275c243a33becd23 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -503,9 +503,9 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (GET_RX_DESC_RXHT(pdesc)) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 9fec345a42a01c1a03fd74f6b4b9f14cfd148598..1f42ce5f8f27fb8f46c69b91e0c7d636b5f5ce98 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -468,8 +468,10 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus) #define PSPOLL_PG 2 #define NULL_PG 3 #define PROBERSP_PG 4 /* ->5 */ +#define QOS_NULL_PG 6 +#define BT_QOS_NULL_PG 7 -#define TOTAL_RESERVED_PKT_LEN 768 +#define TOTAL_RESERVED_PKT_LEN 1024 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ @@ -570,6 +572,42 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 6 qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 7 BT-qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -595,6 +633,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; + u8 *qosnull; + u8 *btqosnull; /*--------------------------------------------------------- * (1) beacon *--------------------------------------------------------- @@ -636,6 +676,28 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); + /*--------------------------------------------------------- + * (5) QoS null data + *---------------------------------------------------------- + */ + qosnull = &reserved_page_packet[QOS_NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(qosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG); + + /*--------------------------------------------------------- + * (6) BT QoS null data + *---------------------------------------------------------- + */ + btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG); + totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD , diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index 72da3f92f02c57911ee307e5c74e22d8750e97fb..af8271967a880bab1c2f9f5c050c0478c6777d3c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -165,6 +165,10 @@ enum rtl8192e_c2h_evt { SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val) /* _MEDIA_STATUS_RPT_PARM_CMD1 */ #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 56ca7f5351eaba043a792764fe80c5860a0a9aae..6f5098a18655536e6c210a5cd70e0cef55c4b3a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -699,9 +699,9 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw) u8 txpktbuf_bndy; u8 u8tmp, testcnt = 0; - txpktbuf_bndy = 0xFA; + txpktbuf_bndy = 0xF7; - rtl_write_dword(rtlpriv, REG_RQPN, 0x80E90808); + rtl_write_dword(rtlpriv, REG_RQPN, 0x80E60808); rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x3d00 - 1); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 07440e9a8ca26ae0b259cf46760c188cc7422cc5..b1864bb07c2cb35cbcbdd0b9b048323bded65d0d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -394,10 +394,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 12cef01e593bb2f2550db876817b98c2107f2467..a01dbd31d1b4c43793bcffa5b54e01aefda18497 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -289,10 +289,10 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (stats->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index c9838f52a7ea8a5c30802a4c2824df41b4fe48f1..f713c7249feda16f1b00799c7617bd809787dff1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -317,10 +317,10 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index c7ee9ba5e26ea15e4152e2ed4110e946f4d5b78a..4fc839b1d60165321f302006fdc813edb5826613 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -284,8 +284,10 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus) #define PSPOLL_PG 2 #define NULL_PG 3 #define PROBERSP_PG 4 /* ->5 */ +#define QOS_NULL_PG 6 +#define BT_QOS_NULL_PG 7 -#define TOTAL_RESERVED_PKT_LEN 768 +#define TOTAL_RESERVED_PKT_LEN 1024 /* can be up to 1280 (tx_bndy=245) */ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ @@ -390,11 +392,48 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 6 qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 7 BT-qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, @@ -413,6 +452,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; + u8 *qosnull; + u8 *btqosnull; /*--------------------------------------------------------- * (1) beacon *--------------------------------------------------------- @@ -454,6 +495,28 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); + /*--------------------------------------------------------- + * (5) QoS Null + *--------------------------------------------------------- + */ + qosnull = &reserved_page_packet[QOS_NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(qosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOS_NULL_PG); + + /*--------------------------------------------------------- + * (5) QoS Null + *--------------------------------------------------------- + */ + btqosnull = &reserved_page_packet[BT_QOS_NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOS_NULL_PG); + totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, @@ -461,7 +524,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl8723be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", - u1rsvdpageloc, 3); + u1rsvdpageloc, sizeof(u1rsvdpageloc)); skb = dev_alloc_skb(totalpacketlen); memcpy((u8 *)skb_put(skb, totalpacketlen), @@ -476,7 +539,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n", - u1rsvdpageloc, 3); + u1rsvdpageloc, sizeof(u1rsvdpageloc)); rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE, sizeof(u1rsvdpageloc), u1rsvdpageloc); } else diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index c652fa1339a760642a8c021c8200ba4777de49ed..2482b3bc2bfaa890a0711402029ed285e5093929 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -139,6 +139,10 @@ enum rtl8723b_c2h_evt { SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 3, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val) void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 92dbfa8f297f318a766506e50be2d102c312f470..8c0ac96b543005efe8348b7f09dfa4f2be725e90 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -91,7 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - char *fw_name = "rtlwifi/rtl8723befw.bin"; + char *fw_name = "rtlwifi/rtl8723befw_36.bin"; rtl8723be_bt_reg_init(hw); rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); @@ -187,8 +187,16 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - pr_err("Failed to request firmware!\n"); - return 1; + /* Failed to get firmware. Check if old version available */ + fw_name = "rtlwifi/rtl8723befw.bin"; + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + pr_err("Failed to request firmware!\n"); + return 1; + } } return 0; } @@ -384,6 +392,7 @@ MODULE_AUTHOR("Realtek WlanFAE "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723befw_36.bin"); module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444); module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 6f65003a895a572448446c21a22725191f3cb24d..3c6ce994c6aacb6c9e7015ea4ee70ed5b8842ce6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -373,10 +373,10 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_is40Mhzpacket) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; rx_status->flag |= RX_FLAG_MACTIME_START; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index a504dfae4ed37486ea90451bd000022c4eac56f4..73350103b736c181b040e722112861ce107831e4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -678,12 +678,13 @@ void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw) #define PSPOLL_PG 1 #define NULL_PG 2 #define QOSNULL_PG 3 -#define ARPRESP_PG 4 -#define REMOTE_PG 5 -#define GTKEXT_PG 6 +#define BT_QOSNULL_PG 4 +#define ARPRESP_PG 5 +#define REMOTE_PG 6 +#define GTKEXT_PG 7 -#define TOTAL_RESERVED_PKT_LEN_8812 3584 -#define TOTAL_RESERVED_PKT_LEN_8821 1792 +#define TOTAL_RESERVED_PKT_LEN_8812 4096 +#define TOTAL_RESERVED_PKT_LEN_8821 2048 static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { /* page 0: beacon */ @@ -813,13 +814,46 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 4: BT qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 4~6 is for wowlan */ - /* page 4: ARP resp */ + /* page 5~7 is for wowlan */ + /* page 5: ARP resp */ 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, @@ -852,7 +886,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */ + /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -885,7 +919,7 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 6: Rsvd GTK extend memory (zero memory) */ + /* page 7: Rsvd GTK extend memory (zero memory) */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1176,13 +1210,78 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 4: BT Qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 4~6 is for wowlan */ - /* page 4: ARP resp */ + /* page 5~7 is for wowlan */ + /* page 5: ARP resp */ 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, @@ -1247,7 +1346,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */ + /* page 6: H2C_REMOTE_WAKE_CTRL_INFO */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1312,7 +1411,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /* page 6: Rsvd GTK extend memory (zero memory) */ + /* page 7: Rsvd GTK extend memory (zero memory) */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1394,6 +1493,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u8 *p_pspoll; u8 *nullfunc; u8 *qosnull; + u8 *btqosnull; u8 *arpresp; /*--------------------------------------------------------- @@ -1441,12 +1541,23 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + /*--------------------------------------------------------- + * (5) BT Qos null data + *---------------------------------------------------------- + */ + btqosnull = &reserved_page_packet_8812[BT_QOSNULL_PG * 512]; + SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG); + if (!dl_whole_packets) { - totalpacketlen = 512 * (QOSNULL_PG + 1) - 40; + totalpacketlen = 512 * (BT_QOSNULL_PG + 1) - 40; goto out; } /*--------------------------------------------------------- - * (5) ARP Resp + * (6) ARP Resp *---------------------------------------------------------- */ arpresp = &reserved_page_packet_8812[ARPRESP_PG * 512]; @@ -1457,14 +1568,14 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); /*--------------------------------------------------------- - * (6) Remote Wake Ctrl + * (7) Remote Wake Ctrl *---------------------------------------------------------- */ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, REMOTE_PG); /*--------------------------------------------------------- - * (7) GTK Ext Memory + * (8) GTK Ext Memory *---------------------------------------------------------- */ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); @@ -1518,6 +1629,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u8 *p_pspoll; u8 *nullfunc; u8 *qosnull; + u8 *btqosnull; u8 *arpresp; /*--------------------------------------------------------- @@ -1565,12 +1677,23 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + /*--------------------------------------------------------- + * (5) Qos null data + *---------------------------------------------------------- + */ + btqosnull = &reserved_page_packet_8821[BT_QOSNULL_PG * 256]; + SET_80211_HDR_ADDRESS1(btqosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG); + if (!dl_whole_packets) { - totalpacketlen = 256 * (QOSNULL_PG + 1) - 40; + totalpacketlen = 256 * (BT_QOSNULL_PG + 1) - 40; goto out; } /*--------------------------------------------------------- - * (5) ARP Resp + * (6) ARP Resp *---------------------------------------------------------- */ arpresp = &reserved_page_packet_8821[ARPRESP_PG * 256]; @@ -1581,14 +1704,14 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); /*--------------------------------------------------------- - * (6) Remote Wake Ctrl + * (7) Remote Wake Ctrl *---------------------------------------------------------- */ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, REMOTE_PG); /*--------------------------------------------------------- - * (7) GTK Ext Memory + * (8) GTK Ext Memory *---------------------------------------------------------- */ SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 90a98ed879f7e839b6f85a12240a673f4a18c5b6..98d871afd92ab4c0c1555f3bf4ac5612ae001b35 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -229,6 +229,8 @@ enum rtl8821a_h2c_cmd { SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) #define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 4, 0, 8, __val) /* _MEDIA_STATUS_RPT_PARM_CMD1 */ #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 363d2f28da1fccca8ef986eea90a1d607b87a4d2..3571ce4bd276584618680c1242445c27296e7212 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -842,12 +842,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw) bool status; maxpage = 255; - txpktbuf_bndy = 0xF8; - rqpn = 0x80e70808; - if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) { - txpktbuf_bndy = 0xFA; - rqpn = 0x80e90808; - } + txpktbuf_bndy = 0xF7; + rqpn = 0x80e60808; rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 8da874cbec1a19353877f5cdf4855050f333bbc6..aa3ccc740521d07755c387760f476ae71d27f7c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) return rtl8821ae_phy_rf6052_config(hw); } +static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + switch (rtlhal->rfe_type) { + case 3: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); + break; + case 4: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001); + break; + case 5: + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + case 1: + if (rtlpriv->btcoexist.bt_coexistence) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + } + case 0: + case 2: + default: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + break; + } +} + +static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + switch (rtlhal->rfe_type) { + case 0: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + case 1: + if (rtlpriv->btcoexist.bt_coexistence) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + } else { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); + } + break; + case 3: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); + break; + case 5: + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + case 2: + case 4: + default: + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); + break; + } +} + u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, u8 rf_path) { @@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) /* 0x82C[1:0] = 2b'00 */ rtl_set_bbreg(hw, 0x82c, 0x3, 0); } - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, - 0x77777777); - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, - 0x77777777); - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); - } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_phy_set_rfe_reg_24g(hw); rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1); rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1); @@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) /* 0x82C[1:0] = 2'b00 */ rtl_set_bbreg(hw, 0x82c, 0x3, 1); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, - 0x77337777); - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, - 0x77337777); - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); - } + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_phy_set_rfe_reg_5g(hw); rtl_set_bbreg(hw, RTXPATH, 0xf0, 0); rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf); @@ -660,6 +750,88 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) return; } +static bool _rtl8821ae_check_positive(struct ieee80211_hw *hw, + const u32 condition1, + const u32 condition2) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK) + >> CHIP_VER_RTL_SHIFT); + u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0)); + + u8 board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */ + ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA */ + ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */ + ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA */ + ((rtlhal->board_type & BIT(2)) >> 2) << 4; /* _BT */ + + u32 cond1 = condition1, cond2 = condition2; + u32 driver1 = cut_ver << 24 | /* CUT ver */ + 0 << 20 | /* interface 2/2 */ + 0x04 << 16 | /* platform */ + rtlhal->package_type << 12 | + intf << 8 | /* interface 1/2 */ + board_type; + + u32 driver2 = rtlhal->type_glna << 0 | + rtlhal->type_gpa << 8 | + rtlhal->type_alna << 16 | + rtlhal->type_apa << 24; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n", + cond1, cond2); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n", + driver1, driver2); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Board, Package) = (0x%X, 0x%X)\n", + rtlhal->board_type, rtlhal->package_type); + + /*============== Value Defined Check ===============*/ + /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/ + + if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != + (driver1 & 0x0000F000))) + return false; + if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != + (driver1 & 0x0F000000))) + return false; + + /*=============== Bit Defined Check ================*/ + /* We don't care [31:28] */ + + cond1 &= 0x00FF0FFF; + driver1 &= 0x00FF0FFF; + + if ((cond1 & driver1) == cond1) { + u32 mask = 0; + + if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/ + return true; + + if ((cond1 & BIT(0)) != 0) /*GLNA*/ + mask |= 0x000000FF; + if ((cond1 & BIT(1)) != 0) /*GPA*/ + mask |= 0x0000FF00; + if ((cond1 & BIT(2)) != 0) /*ALNA*/ + mask |= 0x00FF0000; + if ((cond1 & BIT(3)) != 0) /*APA*/ + mask |= 0xFF000000; + + /* BoardType of each RF path is matched*/ + if ((cond2 & mask) == (driver2 & mask)) + return true; + else + return false; + } else + return false; +} + static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw, const u32 condition) { @@ -1695,55 +1867,78 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) return true; } +static bool +__rtl8821ae_phy_config_with_headerfile(struct ieee80211_hw *hw, + u32 *array_table, u16 arraylen, + void (*set_reg)(struct ieee80211_hw *hw, + u32 regaddr, u32 data)) +{ + #define COND_ELSE 2 + #define COND_ENDIF 3 + + int i = 0; + u8 cond; + bool matched = true, skipped = false; + + while ((i + 1) < arraylen) { + u32 v1 = array_table[i]; + u32 v2 = array_table[i + 1]; + + if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/ + if (v1 & BIT(31)) {/* positive condition*/ + cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28); + if (cond == COND_ENDIF) {/*end*/ + matched = true; + skipped = false; + } else if (cond == COND_ELSE) /*else*/ + matched = skipped ? false : true; + else {/*if , else if*/ + if (skipped) { + matched = false; + } else { + if (_rtl8821ae_check_positive( + hw, v1, v2)) { + matched = true; + skipped = true; + } else { + matched = false; + skipped = false; + } + } + } + } else if (v1 & BIT(30)) { /*negative condition*/ + /*do nothing*/ + } + } else { + if (matched) + set_reg(hw, v1, v2); + } + i = i + 2; + } + + return true; +} + static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u32 i, v1, v2; u32 arraylength; u32 *ptrarray; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n"); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { - arraylength = RTL8821AEMAC_1T_ARRAYLEN; + arraylength = RTL8821AE_MAC_1T_ARRAYLEN; ptrarray = RTL8821AE_MAC_REG_ARRAY; } else { - arraylength = RTL8812AEMAC_1T_ARRAYLEN; + arraylength = RTL8812AE_MAC_1T_ARRAYLEN; ptrarray = RTL8812AE_MAC_REG_ARRAY; } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Img: MAC_REG_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i += 2) { - v1 = ptrarray[i]; - v2 = (u8)ptrarray[i + 1]; - if (v1 < 0xCDCDCDCD) { - rtl_write_byte(rtlpriv, v1, (u8)v2); - continue; - } else { - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(ptrarray, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylength - 2) { - READ_NEXT_PAIR(ptrarray, v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_PAIR(ptrarray, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylength - 2) { - rtl_write_byte(rtlpriv, v1, v2); - READ_NEXT_PAIR(ptrarray, v1, v2, i); - } - while (v2 != 0xDEAD && i < arraylength - 2) - READ_NEXT_PAIR(ptrarray, v1, v2, i); - } - } - } - return true; + return __rtl8821ae_phy_config_with_headerfile(hw, + ptrarray, arraylength, rtl_write_byte_with_val32); } static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, @@ -1751,111 +1946,33 @@ static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - int i; u32 *array_table; u16 arraylen; - u32 v1 = 0, v2 = 0; if (configtype == BASEBAND_CONFIG_PHY_REG) { if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - arraylen = RTL8812AEPHY_REG_1TARRAYLEN; + arraylen = RTL8812AE_PHY_REG_1TARRAYLEN; array_table = RTL8812AE_PHY_REG_ARRAY; } else { - arraylen = RTL8821AEPHY_REG_1TARRAYLEN; + arraylen = RTL8821AE_PHY_REG_1TARRAYLEN; array_table = RTL8821AE_PHY_REG_ARRAY; } - for (i = 0; i < arraylen; i += 2) { - v1 = array_table[i]; - v2 = array_table[i + 1]; - if (v1 < 0xCDCDCDCD) { - _rtl8821ae_config_bb_reg(hw, v1, v2); - continue; - } else {/*This line is the start line of branch.*/ - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(array_table, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_PAIR(array_table, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - _rtl8821ae_config_bb_reg(hw, v1, - v2); - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - - while (v2 != 0xDEAD && - i < arraylen - 2) { - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - } - } - } + return __rtl8821ae_phy_config_with_headerfile(hw, + array_table, arraylen, + _rtl8821ae_config_bb_reg); } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - arraylen = RTL8812AEAGCTAB_1TARRAYLEN; + arraylen = RTL8812AE_AGC_TAB_1TARRAYLEN; array_table = RTL8812AE_AGC_TAB_ARRAY; } else { - arraylen = RTL8821AEAGCTAB_1TARRAYLEN; + arraylen = RTL8821AE_AGC_TAB_1TARRAYLEN; array_table = RTL8821AE_AGC_TAB_ARRAY; } - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xCDCDCDCD) { - rtl_set_bbreg(hw, v1, MASKDWORD, v2); - udelay(1); - continue; - } else {/*This line is the start line of branch.*/ - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(array_table, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_PAIR(array_table, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_set_bbreg(hw, v1, MASKDWORD, - v2); - udelay(1); - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - - while (v2 != 0xDEAD && - i < arraylen - 2) { - READ_NEXT_PAIR(array_table, v1, - v2, i); - } - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], array_table[i + 1]); - } - } + return __rtl8821ae_phy_config_with_headerfile(hw, + array_table, arraylen, + rtl_set_bbreg_with_dwmask); } return true; } @@ -1913,10 +2030,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, u32 v1, v2, v3, v4, v5, v6; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - arraylen = RTL8812AEPHY_REG_ARRAY_PGLEN; + arraylen = RTL8812AE_PHY_REG_ARRAY_PGLEN; array = RTL8812AE_PHY_REG_ARRAY_PG; } else { - arraylen = RTL8821AEPHY_REG_ARRAY_PGLEN; + arraylen = RTL8821AE_PHY_REG_ARRAY_PGLEN; array = RTL8821AE_PHY_REG_ARRAY_PG; } @@ -1980,12 +2097,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - int i; bool rtstatus = true; u32 *radioa_array_table_a, *radioa_array_table_b; u16 radioa_arraylen_a, radioa_arraylen_b; struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN; radioa_array_table_a = RTL8812AE_RADIOA_ARRAY; @@ -1997,69 +2112,14 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < radioa_arraylen_a; i = i + 2) { - v1 = radioa_array_table_a[i]; - v2 = radioa_array_table_a[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8821ae_config_rf_radio_a(hw, v1, v2); - continue; - } else{/*This line is the start line of branch.*/ - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen_a-2) - READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); - - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen_a - 2) { - _rtl8821ae_config_rf_radio_a(hw, v1, v2); - READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); - } - - while (v2 != 0xDEAD && i < radioa_arraylen_a-2) - READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); - - } - } - } + return __rtl8821ae_phy_config_with_headerfile(hw, + radioa_array_table_a, radioa_arraylen_a, + _rtl8821ae_config_rf_radio_a); break; case RF90_PATH_B: - for (i = 0; i < radioa_arraylen_b; i = i + 2) { - v1 = radioa_array_table_b[i]; - v2 = radioa_array_table_b[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8821ae_config_rf_radio_b(hw, v1, v2); - continue; - } else{/*This line is the start line of branch.*/ - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen_b-2) - READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); - - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen_b-2) { - _rtl8821ae_config_rf_radio_b(hw, v1, v2); - READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); - } - - while (v2 != 0xDEAD && i < radioa_arraylen_b-2) - READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); - } - } - } + return __rtl8821ae_phy_config_with_headerfile(hw, + radioa_array_table_b, radioa_arraylen_b, + _rtl8821ae_config_rf_radio_b); break; case RF90_PATH_C: case RF90_PATH_D: @@ -2072,21 +2132,10 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - #define READ_NEXT_RF_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = radioa_array_table[i]; \ - v2 = radioa_array_table[i+1]; \ - } \ - while (0) - - int i; bool rtstatus = true; u32 *radioa_array_table; u16 radioa_arraylen; struct rtl_priv *rtlpriv = rtl_priv(hw); - /* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */ - u32 v1 = 0, v2 = 0; radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN; radioa_array_table = RTL8821AE_RADIOA_ARRAY; @@ -2096,35 +2145,9 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < radioa_arraylen; i = i + 2) { - v1 = radioa_array_table[i]; - v2 = radioa_array_table[i+1]; - if (v1 < 0xcdcdcdcd) - _rtl8821ae_config_rf_radio_a(hw, v1, v2); - else{/*This line is the start line of branch.*/ - if (!_rtl8821ae_check_condition(hw, v1)) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen - 2) - READ_NEXT_RF_PAIR(v1, v2, i); - - i -= 2; /* prevent from for-loop += 2*/ - } else {/*Configure matched pairs and skip to end of if-else.*/ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < radioa_arraylen - 2) { - _rtl8821ae_config_rf_radio_a(hw, v1, v2); - READ_NEXT_RF_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < radioa_arraylen - 2) - READ_NEXT_RF_PAIR(v1, v2, i); - } - } - } + return __rtl8821ae_phy_config_with_headerfile(hw, + radioa_array_table, radioa_arraylen, + _rtl8821ae_config_rf_radio_a); break; case RF90_PATH_B: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h index 1d6110f9c1fb6e29e75e8bde9c1533b026a8b757..ed69dbe178ffc2eb64475161b02e57eddadf1540 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h @@ -2424,6 +2424,7 @@ #define BMASKH4BITS 0xf0000000 #define BMASKOFDM_D 0xffc00000 #define BMASKCCK 0x3f3f3f3f +#define BMASKRFEINV 0x3ff00000 #define BRFREGOFFSETMASK 0xfffff diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 77cf3b2cd3f1f4c7ba93437996efe55ad3d94fb5..abaf34cb14331487d8bd3ad789ea5450148d74f4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -203,7 +203,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) fw_name = "rtlwifi/rtl8812aefw.bin"; wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; } else { - fw_name = "rtlwifi/rtl8821aefw.bin"; + fw_name = "rtlwifi/rtl8821aefw_29.bin"; wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; } @@ -214,8 +214,16 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - pr_err("Failed to request normal firmware!\n"); - return 1; + /* Failed to get firmware. Check if old version available */ + fw_name = "rtlwifi/rtl8821aefw.bin"; + pr_info("Using firmware %s\n", fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + pr_err("Failed to request normal firmware!\n"); + return 1; + } } /*load wowlan firmware*/ pr_info("Using firmware %s\n", wowlan_fw_name); @@ -428,6 +436,7 @@ MODULE_AUTHOR("Realtek WlanFAE "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8821aefw_29.bin"); module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 62a0fb76f080d6c756d73a1302eeabc891330c60..408c4611e5dee5798911d67d1915e6afd06f3e82 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -38,7 +38,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0x824, 0x00030FE0, 0x828, 0x00000000, 0x82C, 0x002083DD, - 0x830, 0x2AAA6C86, + 0x830, 0x2EAAEEB8, 0x834, 0x0037A706, 0x838, 0x06C89B44, 0x83C, 0x0000095B, @@ -68,7 +68,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0x8BC, 0x4CA520A3, 0x8C0, 0x27F00020, 0x8C4, 0x00000000, - 0x8C8, 0x00013169, + 0x8C8, 0x00012D69, 0x8CC, 0x08248492, 0x8D0, 0x0000B800, 0x8DC, 0x00000000, @@ -76,13 +76,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0x8D8, 0x290B5612, 0x8F8, 0x400002C0, 0x8FC, 0x00000000, - 0xFF0F07D8, 0xABCD, 0x900, 0x00000701, - 0xFF0F07D0, 0xCDEF, - 0x900, 0x00000701, - 0xCDCDCDCD, 0xCDCD, - 0x900, 0x00000700, - 0xFF0F07D8, 0xDEAD, 0x90C, 0x00000000, 0x910, 0x0000FC00, 0x914, 0x00000404, @@ -120,7 +114,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0x9D4, 0x00000000, 0x9D8, 0x00000000, 0x9DC, 0x00000000, - 0x9E4, 0x00000002, + 0x9E4, 0x00000003, 0x9E8, 0x000002D5, 0xA00, 0x00D047C8, 0xA04, 0x01FF000C, @@ -189,7 +183,21 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xC5C, 0x00000058, 0xC60, 0x34344443, 0xC64, 0x07003333, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, 0xC68, 0x59791979, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000001, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0xA0000000, 0x00000000, + 0xC68, 0x59799979, + 0xB0000000, 0x00000000, 0xC6C, 0x59795979, 0xC70, 0x19795979, 0xC74, 0x19795979, @@ -203,19 +211,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xCA0, 0x00000029, 0xCA4, 0x08040201, 0xCA8, 0x80402010, - 0xFF0F0740, 0xABCD, - 0xCB0, 0x77547717, - 0xFF0F01C0, 0xCDEF, - 0xCB0, 0x77547717, - 0xFF0F02C0, 0xCDEF, - 0xCB0, 0x77547717, - 0xFF0F07D8, 0xCDEF, - 0xCB0, 0x54547710, - 0xFF0F07D0, 0xCDEF, - 0xCB0, 0x54547710, - 0xCDCDCDCD, 0xCDCD, 0xCB0, 0x77547777, - 0xFF0F0740, 0xDEAD, 0xCB4, 0x00000077, 0xCB8, 0x00508242, 0xE00, 0x00000007, @@ -257,23 +253,14 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xEA0, 0x00000029, 0xEA4, 0x08040201, 0xEA8, 0x80402010, - 0xFF0F0740, 0xABCD, - 0xEB0, 0x77547717, - 0xFF0F01C0, 0xCDEF, - 0xEB0, 0x77547717, - 0xFF0F02C0, 0xCDEF, - 0xEB0, 0x77547717, - 0xFF0F07D8, 0xCDEF, - 0xEB0, 0x54547710, - 0xFF0F07D0, 0xCDEF, - 0xEB0, 0x54547710, - 0xCDCDCDCD, 0xCDCD, 0xEB0, 0x77547777, - 0xFF0F0740, 0xDEAD, 0xEB4, 0x00000077, 0xEB8, 0x00508242, }; +u32 RTL8812AE_PHY_REG_1TARRAYLEN = + sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32); + u32 RTL8821AE_PHY_REG_ARRAY[] = { 0x800, 0x0020D090, 0x804, 0x080112E0, @@ -449,6 +436,9 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0xCB8, 0x00508240, }; +u32 RTL8821AE_PHY_REG_1TARRAYLEN = + sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32); + u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840, 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, @@ -498,6 +488,9 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628 }; +u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = + sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32); + u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838, @@ -516,6 +509,9 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022 }; +u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = + sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32); + u32 RTL8812AE_RADIOA_ARRAY[] = { 0x000, 0x00010000, 0x018, 0x0001712A, @@ -523,26 +519,25 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x066, 0x00040000, 0x01E, 0x00080000, 0x089, 0x00000080, - 0xFF0F0740, 0xABCD, - 0x086, 0x00014B38, - 0xFF0F02C0, 0xCDEF, - 0x086, 0x00014B38, - 0xFF0F01C0, 0xCDEF, - 0x086, 0x00014B38, - 0xFF0F07D8, 0xCDEF, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, 0x086, 0x00014B3A, - 0xFF0F07D0, 0xCDEF, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, 0x086, 0x00014B3A, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x086, 0x00014B38, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x08B, 0x00080180, + 0xA0000000, 0x00000000, + 0x08B, 0x00087180, + 0xB0000000, 0x00000000, 0x0B1, 0x0001FC1A, 0x0B3, 0x000F0810, 0x0B4, 0x0001A78D, 0x0BA, 0x00086180, 0x018, 0x00000006, 0x0EF, 0x00002000, - 0xFF0F07D8, 0xABCD, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, 0x03B, 0x0003F218, 0x03B, 0x00030A58, 0x03B, 0x0002FA58, @@ -550,7 +545,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x0001FA50, 0x03B, 0x00010248, 0x03B, 0x00008240, - 0xFF0F07D0, 0xCDEF, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, 0x03B, 0x0003F218, 0x03B, 0x00030A58, 0x03B, 0x0002FA58, @@ -558,7 +553,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x0001FA50, 0x03B, 0x00010248, 0x03B, 0x00008240, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x03B, 0x00038A58, 0x03B, 0x00037A58, 0x03B, 0x0002A590, @@ -566,9 +561,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x00018248, 0x03B, 0x00010240, 0x03B, 0x00008240, - 0xFF0F07D8, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000100, - 0xFF0F07D8, 0xABCD, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A4EE, 0x034, 0x00009076, 0x034, 0x00008073, @@ -580,7 +575,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x00002028, 0x034, 0x00001025, 0x034, 0x00000022, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x034, 0x0000ADF4, 0x034, 0x00009DF1, 0x034, 0x00008DEE, @@ -592,7 +587,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x000024E7, 0x034, 0x0000146B, 0x034, 0x0000006D, - 0xFF0F07D8, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x000020A2, 0x0DF, 0x00000080, @@ -646,7 +641,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x0006B064, 0x03C, 0x00004000, 0x03A, 0x000000D8, - 0x03B, 0x00023070, + 0x03B, 0x00063070, 0x03C, 0x00004000, 0x03A, 0x00000468, 0x03B, 0x0005B870, @@ -685,31 +680,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x00082080, 0x03C, 0x00010000, 0x0EF, 0x00001100, - 0xFF0F0740, 0xABCD, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F02C0, 0xCDEF, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F01C0, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004A0B2, 0x034, 0x000490AF, 0x034, 0x00048070, @@ -721,92 +692,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x0004200A, 0x034, 0x00041007, 0x034, 0x00040004, - 0xFF0F07D8, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x034, 0x0004A0B2, 0x034, 0x000490AF, 0x034, 0x00048070, 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F07D0, 0xCDEF, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004604D, + 0x034, 0x0004504A, + 0x034, 0x00044047, + 0x034, 0x00043044, + 0x034, 0x00042007, + 0x034, 0x00041004, + 0x034, 0x00040001, + 0xA0000000, 0x00000000, 0x034, 0x0004ADF5, 0x034, 0x00049DF2, 0x034, 0x00048DEF, 0x034, 0x00047DEC, 0x034, 0x00046DE9, - 0x034, 0x00045DC9, - 0x034, 0x00044CE8, - 0x034, 0x000438CA, - 0x034, 0x00042889, - 0x034, 0x0004184A, - 0x034, 0x0004044A, - 0xFF0F0740, 0xDEAD, - 0xFF0F0740, 0xABCD, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F02C0, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F01C0, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F07D8, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F07D0, 0xCDEF, + 0x034, 0x00045DE6, + 0x034, 0x00044DE3, + 0x034, 0x000438C8, + 0x034, 0x000428C5, + 0x034, 0x000418C2, + 0x034, 0x000408C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0B2, 0x034, 0x000290AF, 0x034, 0x00028070, @@ -818,32 +729,32 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x0002200A, 0x034, 0x00021007, 0x034, 0x00020004, - 0xCDCDCDCD, 0xCDCD, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B4, + 0x034, 0x000290B1, + 0x034, 0x00028072, + 0x034, 0x0002706F, + 0x034, 0x0002604F, + 0x034, 0x0002504C, + 0x034, 0x00024049, + 0x034, 0x00023046, + 0x034, 0x00022009, + 0x034, 0x00021006, + 0x034, 0x00020003, + 0xA0000000, 0x00000000, 0x034, 0x0002ADF5, 0x034, 0x00029DF2, 0x034, 0x00028DEF, 0x034, 0x00027DEC, 0x034, 0x00026DE9, - 0x034, 0x00025DC9, - 0x034, 0x00024CE8, - 0x034, 0x000238CA, - 0x034, 0x00022889, - 0x034, 0x0002184A, - 0x034, 0x0002044A, - 0xFF0F0740, 0xDEAD, - 0xFF0F0740, 0xABCD, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F02C0, 0xCDEF, + 0x034, 0x00025DE6, + 0x034, 0x00024DE3, + 0x034, 0x000238C8, + 0x034, 0x000228C5, + 0x034, 0x000218C2, + 0x034, 0x000208C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0B2, 0x034, 0x000090AF, 0x034, 0x00008070, @@ -855,93 +766,33 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x0000200A, 0x034, 0x00001007, 0x034, 0x00000004, - 0xFF0F01C0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x034, 0x0000A0B2, 0x034, 0x000090AF, 0x034, 0x00008070, 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F07D8, 0xCDEF, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F07D0, 0xCDEF, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000604D, + 0x034, 0x0000504A, + 0x034, 0x00004047, + 0x034, 0x00003044, + 0x034, 0x00002007, + 0x034, 0x00001004, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, 0x034, 0x0000AFF7, 0x034, 0x00009DF7, 0x034, 0x00008DF4, 0x034, 0x00007DF1, 0x034, 0x00006DEE, - 0x034, 0x00005DCD, - 0x034, 0x00004CEB, + 0x034, 0x00005DEB, + 0x034, 0x00004DE8, 0x034, 0x000038CC, - 0x034, 0x0000288B, - 0x034, 0x0000184C, - 0x034, 0x0000044C, - 0xFF0F0740, 0xDEAD, + 0x034, 0x000028C9, + 0x034, 0x000018C6, + 0x034, 0x000008C3, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, - 0xFF0F0740, 0xABCD, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001D4, - 0x035, 0x000081D4, - 0x035, 0x000101D4, - 0x035, 0x000201B4, - 0x035, 0x000281B4, - 0x035, 0x000301B4, - 0x035, 0x000401B4, - 0x035, 0x000481B4, - 0x035, 0x000501B4, - 0xFF0F02C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001D4, - 0x035, 0x000081D4, - 0x035, 0x000101D4, - 0x035, 0x000201B4, - 0x035, 0x000281B4, - 0x035, 0x000301B4, - 0x035, 0x000401B4, - 0x035, 0x000481B4, - 0x035, 0x000501B4, - 0xFF0F01C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001D4, - 0x035, 0x000081D4, - 0x035, 0x000101D4, - 0x035, 0x000201B4, - 0x035, 0x000281B4, - 0x035, 0x000301B4, - 0x035, 0x000401B4, - 0x035, 0x000481B4, - 0x035, 0x000501B4, - 0xFF0F07D8, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, 0x035, 0x000001D4, @@ -953,7 +804,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x035, 0x000401B4, 0x035, 0x000481B4, 0x035, 0x000501B4, - 0xFF0F07D0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, 0x035, 0x000001D4, @@ -965,7 +816,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x035, 0x000401B4, 0x035, 0x000481B4, 0x035, 0x000501B4, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, 0x035, 0x00000188, @@ -977,54 +828,9 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x035, 0x000401D8, 0x035, 0x000481D8, 0x035, 0x000501D8, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, - 0xFF0F0740, 0xABCD, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00004BFB, - 0x036, 0x0000CBFB, - 0x036, 0x00014BFB, - 0x036, 0x0001CBFB, - 0x036, 0x00024F4B, - 0x036, 0x0002CF4B, - 0x036, 0x00034F4B, - 0x036, 0x0003CF4B, - 0x036, 0x00044F4B, - 0x036, 0x0004CF4B, - 0x036, 0x00054F4B, - 0x036, 0x0005CF4B, - 0xFF0F02C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00004BFB, - 0x036, 0x0000CBFB, - 0x036, 0x00014BFB, - 0x036, 0x0001CBFB, - 0x036, 0x00024F4B, - 0x036, 0x0002CF4B, - 0x036, 0x00034F4B, - 0x036, 0x0003CF4B, - 0x036, 0x00044F4B, - 0x036, 0x0004CF4B, - 0x036, 0x00054F4B, - 0x036, 0x0005CF4B, - 0xFF0F01C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00004BFB, - 0x036, 0x0000CBFB, - 0x036, 0x00014BFB, - 0x036, 0x0001CBFB, - 0x036, 0x00024F4B, - 0x036, 0x0002CF4B, - 0x036, 0x00034F4B, - 0x036, 0x0003CF4B, - 0x036, 0x00044F4B, - 0x036, 0x0004CF4B, - 0x036, 0x00054F4B, - 0x036, 0x0005CF4B, - 0xFF0F07D8, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00004BFB, @@ -1039,7 +845,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x036, 0x0004CF4B, 0x036, 0x00054F4B, 0x036, 0x0005CF4B, - 0xFF0F07D0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00004BFB, @@ -1054,91 +860,61 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x036, 0x0004CF4B, 0x036, 0x00054F4B, 0x036, 0x0005CF4B, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00084EB4, 0x036, 0x0008CC35, 0x036, 0x00094C35, 0x036, 0x0009CC35, - 0x036, 0x000A4935, + 0x036, 0x000A4C35, 0x036, 0x000ACC35, 0x036, 0x000B4C35, 0x036, 0x000BCC35, - 0x036, 0x000C4EB4, - 0x036, 0x000CCEB5, - 0x036, 0x000D4EB5, - 0x036, 0x000DCEB5, - 0xFF0F0740, 0xDEAD, + 0x036, 0x000C4C34, + 0x036, 0x000CCC35, + 0x036, 0x000D4C35, + 0x036, 0x000DCC35, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, - 0xFF0F0740, 0xABCD, - 0x03C, 0x000002CC, - 0x03C, 0x00000522, - 0x03C, 0x00000902, - 0xFF0F02C0, 0xCDEF, - 0x03C, 0x000002CC, - 0x03C, 0x00000522, - 0x03C, 0x00000902, - 0xFF0F01C0, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000002CC, 0x03C, 0x00000522, 0x03C, 0x00000902, - 0xFF0F07D8, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x03C, 0x000002CC, 0x03C, 0x00000522, 0x03C, 0x00000902, - 0xFF0F07D0, 0xCDEF, - 0x03C, 0x000002CC, - 0x03C, 0x00000522, - 0x03C, 0x00000902, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x03C, 0x000002A8, 0x03C, 0x000005A2, 0x03C, 0x00000880, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, 0x0DF, 0x00000080, - 0x01F, 0x00040064, - 0xFF0F0740, 0xABCD, - 0x061, 0x000FDD43, - 0x062, 0x00038F4B, - 0x063, 0x00032117, - 0x064, 0x000194AC, - 0x065, 0x000931D1, - 0xFF0F02C0, 0xCDEF, + 0x01F, 0x00000064, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000FDD43, 0x062, 0x00038F4B, 0x063, 0x00032117, 0x064, 0x000194AC, 0x065, 0x000931D1, - 0xFF0F01C0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x061, 0x000FDD43, 0x062, 0x00038F4B, 0x063, 0x00032117, 0x064, 0x000194AC, - 0x065, 0x000931D1, - 0xFF0F07D8, 0xCDEF, - 0x061, 0x000FDD43, - 0x062, 0x00038F4B, - 0x063, 0x00032117, - 0x064, 0x000194AC, - 0x065, 0x000931D1, - 0xFF0F07D0, 0xCDEF, - 0x061, 0x000FDD43, - 0x062, 0x00038F4B, - 0x063, 0x00032117, - 0x064, 0x000194AC, - 0x065, 0x000931D1, - 0xCDCDCDCD, 0xCDCD, + 0x065, 0x000931D2, + 0xA0000000, 0x00000000, 0x061, 0x000E5D53, 0x062, 0x00038FCD, - 0x063, 0x000314EB, + 0x063, 0x000114EB, 0x064, 0x000196AC, 0x065, 0x000911D7, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, 0x008, 0x00008400, 0x01C, 0x000739D2, 0x0B4, 0x0001E78D, @@ -1149,29 +925,29 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x0FE, 0x00000000, 0x0B4, 0x0001A78D, 0x018, 0x0001712A, - }; +u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32); + u32 RTL8812AE_RADIOB_ARRAY[] = { 0x056, 0x00051CF2, 0x066, 0x00040000, 0x089, 0x00000080, - 0xFF0F0740, 0xABCD, - 0x086, 0x00014B38, - 0xFF0F01C0, 0xCDEF, - 0x086, 0x00014B38, - 0xFF0F02C0, 0xCDEF, - 0x086, 0x00014B38, - 0xFF0F07D8, 0xCDEF, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, 0x086, 0x00014B3A, - 0xFF0F07D0, 0xCDEF, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, 0x086, 0x00014B3A, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x086, 0x00014B38, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x08B, 0x00080180, + 0xA0000000, 0x00000000, + 0x08B, 0x00087180, + 0xB0000000, 0x00000000, 0x018, 0x00000006, 0x0EF, 0x00002000, - 0xFF0F07D8, 0xABCD, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, 0x03B, 0x0003F218, 0x03B, 0x00030A58, 0x03B, 0x0002FA58, @@ -1179,7 +955,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03B, 0x0001FA50, 0x03B, 0x00010248, 0x03B, 0x00008240, - 0xFF0F07D0, 0xCDEF, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, 0x03B, 0x0003F218, 0x03B, 0x00030A58, 0x03B, 0x0002FA58, @@ -1187,7 +963,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03B, 0x0001FA50, 0x03B, 0x00010248, 0x03B, 0x00008240, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x03B, 0x00038A58, 0x03B, 0x00037A58, 0x03B, 0x0002A590, @@ -1195,9 +971,9 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03B, 0x00018248, 0x03B, 0x00010240, 0x03B, 0x00008240, - 0xFF0F07D8, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000100, - 0xFF0F07D8, 0xABCD, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A4EE, 0x034, 0x00009076, 0x034, 0x00008073, @@ -1209,7 +985,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x00002028, 0x034, 0x00001025, 0x034, 0x00000022, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x034, 0x0000ADF4, 0x034, 0x00009DF1, 0x034, 0x00008DEE, @@ -1221,7 +997,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x000024E7, 0x034, 0x0000146B, 0x034, 0x0000006D, - 0xFF0F07D8, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x000020A2, 0x0DF, 0x00000080, @@ -1314,55 +1090,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03B, 0x00082080, 0x03C, 0x00010000, 0x0EF, 0x00001100, - 0xFF0F0740, 0xABCD, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F01C0, 0xCDEF, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F02C0, 0xCDEF, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F07D8, 0xCDEF, - 0x034, 0x0004A0B2, - 0x034, 0x000490AF, - 0x034, 0x00048070, - 0x034, 0x0004706D, - 0x034, 0x00046050, - 0x034, 0x0004504D, - 0x034, 0x0004404A, - 0x034, 0x00043047, - 0x034, 0x0004200A, - 0x034, 0x00041007, - 0x034, 0x00040004, - 0xFF0F07D0, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0004A0B2, 0x034, 0x000490AF, 0x034, 0x00048070, @@ -1374,68 +1102,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x0004200A, 0x034, 0x00041007, 0x034, 0x00040004, - 0xCDCDCDCD, 0xCDCD, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0B1, + 0x034, 0x000490AE, + 0x034, 0x0004806F, + 0x034, 0x0004706C, + 0x034, 0x0004604C, + 0x034, 0x00045049, + 0x034, 0x00044046, + 0x034, 0x00043043, + 0x034, 0x00042006, + 0x034, 0x00041003, + 0x034, 0x00040000, + 0xA0000000, 0x00000000, 0x034, 0x0004ADF5, 0x034, 0x00049DF2, 0x034, 0x00048DEF, 0x034, 0x00047DEC, 0x034, 0x00046DE9, - 0x034, 0x00045DC9, - 0x034, 0x00044CE8, - 0x034, 0x000438CA, - 0x034, 0x00042889, - 0x034, 0x0004184A, - 0x034, 0x0004044A, - 0xFF0F0740, 0xDEAD, - 0xFF0F0740, 0xABCD, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F01C0, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F02C0, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F07D8, 0xCDEF, - 0x034, 0x0002A0B2, - 0x034, 0x000290AF, - 0x034, 0x00028070, - 0x034, 0x0002706D, - 0x034, 0x00026050, - 0x034, 0x0002504D, - 0x034, 0x0002404A, - 0x034, 0x00023047, - 0x034, 0x0002200A, - 0x034, 0x00021007, - 0x034, 0x00020004, - 0xFF0F07D0, 0xCDEF, + 0x034, 0x00045DE6, + 0x034, 0x00044DE3, + 0x034, 0x000438C8, + 0x034, 0x000428C5, + 0x034, 0x000418C2, + 0x034, 0x000408C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0B2, 0x034, 0x000290AF, 0x034, 0x00028070, @@ -1447,56 +1139,32 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x0002200A, 0x034, 0x00021007, 0x034, 0x00020004, - 0xCDCDCDCD, 0xCDCD, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B3, + 0x034, 0x000290B0, + 0x034, 0x00028071, + 0x034, 0x0002706E, + 0x034, 0x0002604E, + 0x034, 0x0002504B, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022008, + 0x034, 0x00021005, + 0x034, 0x00020002, + 0xA0000000, 0x00000000, 0x034, 0x0002ADF5, 0x034, 0x00029DF2, 0x034, 0x00028DEF, 0x034, 0x00027DEC, 0x034, 0x00026DE9, - 0x034, 0x00025DC9, - 0x034, 0x00024CE8, - 0x034, 0x000238CA, - 0x034, 0x00022889, - 0x034, 0x0002184A, - 0x034, 0x0002044A, - 0xFF0F0740, 0xDEAD, - 0xFF0F0740, 0xABCD, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F01C0, 0xCDEF, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F02C0, 0xCDEF, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, - 0x034, 0x00008070, - 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xFF0F07D8, 0xCDEF, + 0x034, 0x00025DE6, + 0x034, 0x00024DE3, + 0x034, 0x000238C8, + 0x034, 0x000228C5, + 0x034, 0x000218C2, + 0x034, 0x000208C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0B2, 0x034, 0x000090AF, 0x034, 0x00008070, @@ -1508,72 +1176,33 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x0000200A, 0x034, 0x00001007, 0x034, 0x00000004, - 0xFF0F07D0, 0xCDEF, - 0x034, 0x0000A0B2, - 0x034, 0x000090AF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0B3, + 0x034, 0x000090B0, 0x034, 0x00008070, 0x034, 0x0000706D, - 0x034, 0x00006050, - 0x034, 0x0000504D, - 0x034, 0x0000404A, - 0x034, 0x00003047, - 0x034, 0x0000200A, - 0x034, 0x00001007, - 0x034, 0x00000004, - 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000604D, + 0x034, 0x0000504A, + 0x034, 0x00004047, + 0x034, 0x00003044, + 0x034, 0x00002007, + 0x034, 0x00001004, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, 0x034, 0x0000AFF7, 0x034, 0x00009DF7, 0x034, 0x00008DF4, 0x034, 0x00007DF1, 0x034, 0x00006DEE, - 0x034, 0x00005DCD, - 0x034, 0x00004CEB, + 0x034, 0x00005DEB, + 0x034, 0x00004DE8, 0x034, 0x000038CC, - 0x034, 0x0000288B, - 0x034, 0x0000184C, - 0x034, 0x0000044C, - 0xFF0F0740, 0xDEAD, - 0x0EF, 0x00000000, - 0xFF0F0740, 0xABCD, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001C5, - 0x035, 0x000081C5, - 0x035, 0x000101C5, - 0x035, 0x00020174, - 0x035, 0x00028174, - 0x035, 0x00030174, - 0x035, 0x00040185, - 0x035, 0x00048185, - 0x035, 0x00050185, - 0x0EF, 0x00000000, - 0xFF0F01C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001C5, - 0x035, 0x000081C5, - 0x035, 0x000101C5, - 0x035, 0x00020174, - 0x035, 0x00028174, - 0x035, 0x00030174, - 0x035, 0x00040185, - 0x035, 0x00048185, - 0x035, 0x00050185, - 0x0EF, 0x00000000, - 0xFF0F02C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000040, - 0x035, 0x000001C5, - 0x035, 0x000081C5, - 0x035, 0x000101C5, - 0x035, 0x00020174, - 0x035, 0x00028174, - 0x035, 0x00030174, - 0x035, 0x00040185, - 0x035, 0x00048185, - 0x035, 0x00050185, + 0x034, 0x000028C9, + 0x034, 0x000018C6, + 0x034, 0x000008C3, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, - 0xFF0F07D8, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, 0x035, 0x000001C5, @@ -1586,7 +1215,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x035, 0x00048185, 0x035, 0x00050185, 0x0EF, 0x00000000, - 0xFF0F07D0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, 0x035, 0x000001C5, @@ -1599,36 +1228,21 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x035, 0x00048185, 0x035, 0x00050185, 0x0EF, 0x00000000, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000040, - 0x035, 0x00000186, - 0x035, 0x00008186, - 0x035, 0x00010185, - 0x035, 0x000201D5, - 0x035, 0x000281D5, - 0x035, 0x000301D5, - 0x035, 0x000401D5, - 0x035, 0x000481D5, - 0x035, 0x000501D5, + 0x035, 0x00000188, + 0x035, 0x00008147, + 0x035, 0x00010147, + 0x035, 0x000201D7, + 0x035, 0x000281D7, + 0x035, 0x000301D7, + 0x035, 0x000401D8, + 0x035, 0x000481D8, + 0x035, 0x000501D8, 0x0EF, 0x00000000, - 0xFF0F0740, 0xDEAD, - 0xFF0F0740, 0xABCD, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00005B8B, - 0x036, 0x0000DB8B, - 0x036, 0x00015B8B, - 0x036, 0x0001DB8B, - 0x036, 0x000262DB, - 0x036, 0x0002E2DB, - 0x036, 0x000362DB, - 0x036, 0x0003E2DB, - 0x036, 0x0004553B, - 0x036, 0x0004D53B, - 0x036, 0x0005553B, - 0x036, 0x0005D53B, - 0xFF0F01C0, 0xCDEF, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00005B8B, @@ -1643,37 +1257,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x036, 0x0004D53B, 0x036, 0x0005553B, 0x036, 0x0005D53B, - 0xFF0F02C0, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00005B8B, - 0x036, 0x0000DB8B, - 0x036, 0x00015B8B, - 0x036, 0x0001DB8B, - 0x036, 0x000262DB, - 0x036, 0x0002E2DB, - 0x036, 0x000362DB, - 0x036, 0x0003E2DB, - 0x036, 0x0004553B, - 0x036, 0x0004D53B, - 0x036, 0x0005553B, - 0x036, 0x0005D53B, - 0xFF0F07D8, 0xCDEF, - 0x018, 0x0001712A, - 0x0EF, 0x00000010, - 0x036, 0x00005B8B, - 0x036, 0x0000DB8B, - 0x036, 0x00015B8B, - 0x036, 0x0001DB8B, - 0x036, 0x000262DB, - 0x036, 0x0002E2DB, - 0x036, 0x000362DB, - 0x036, 0x0003E2DB, - 0x036, 0x0004553B, - 0x036, 0x0004D53B, - 0x036, 0x0005553B, - 0x036, 0x0005D53B, - 0xFF0F07D0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00005B8B, @@ -1688,94 +1272,71 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x036, 0x0004D53B, 0x036, 0x0005553B, 0x036, 0x0005D53B, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, 0x036, 0x00084EB4, - 0x036, 0x0008C9B4, - 0x036, 0x000949B4, - 0x036, 0x0009C9B4, - 0x036, 0x000A4935, - 0x036, 0x000AC935, - 0x036, 0x000B4935, - 0x036, 0x000BC935, - 0x036, 0x000C4EB4, - 0x036, 0x000CCEB4, - 0x036, 0x000D4EB4, - 0x036, 0x000DCEB4, - 0xFF0F0740, 0xDEAD, - 0x0EF, 0x00000000, - 0x0EF, 0x00000008, - 0xFF0F0740, 0xABCD, - 0x03C, 0x000002DC, - 0x03C, 0x00000524, - 0x03C, 0x00000902, - 0xFF0F01C0, 0xCDEF, - 0x03C, 0x000002DC, - 0x03C, 0x00000524, - 0x03C, 0x00000902, - 0xFF0F02C0, 0xCDEF, - 0x03C, 0x000002DC, - 0x03C, 0x00000524, - 0x03C, 0x00000902, - 0xFF0F07D8, 0xCDEF, + 0x036, 0x0008CC35, + 0x036, 0x00094C35, + 0x036, 0x0009CC35, + 0x036, 0x000A4C35, + 0x036, 0x000ACC35, + 0x036, 0x000B4C35, + 0x036, 0x000BCC35, + 0x036, 0x000C4C34, + 0x036, 0x000CCC35, + 0x036, 0x000D4C35, + 0x036, 0x000DCC35, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x03C, 0x000002DC, 0x03C, 0x00000524, 0x03C, 0x00000902, - 0xFF0F07D0, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x03C, 0x000002DC, 0x03C, 0x00000524, 0x03C, 0x00000902, - 0xCDCDCDCD, 0xCDCD, - 0x03C, 0x000002AA, + 0xA0000000, 0x00000000, + 0x03C, 0x000002A8, 0x03C, 0x000005A2, 0x03C, 0x00000880, - 0xFF0F0740, 0xDEAD, + 0xB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, 0x0DF, 0x00000080, - 0xFF0F0740, 0xABCD, - 0x061, 0x000EAC43, - 0x062, 0x00038F47, - 0x063, 0x00031157, - 0x064, 0x0001C4AC, - 0x065, 0x000931D1, - 0xFF0F01C0, 0xCDEF, - 0x061, 0x000EAC43, - 0x062, 0x00038F47, - 0x063, 0x00031157, - 0x064, 0x0001C4AC, - 0x065, 0x000931D1, - 0xFF0F02C0, 0xCDEF, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000EAC43, 0x062, 0x00038F47, 0x063, 0x00031157, 0x064, 0x0001C4AC, 0x065, 0x000931D1, - 0xFF0F07D8, 0xCDEF, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, 0x061, 0x000EAC43, 0x062, 0x00038F47, 0x063, 0x00031157, 0x064, 0x0001C4AC, - 0x065, 0x000931D1, - 0xFF0F07D0, 0xCDEF, + 0x065, 0x000931D2, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, 0x061, 0x000EAC43, 0x062, 0x00038F47, 0x063, 0x00031157, 0x064, 0x0001C4AC, 0x065, 0x000931D1, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x061, 0x000E5D53, 0x062, 0x00038FCD, - 0x063, 0x000314EB, + 0x063, 0x000114EB, 0x064, 0x000196AC, - 0x065, 0x000931D7, - 0xFF0F0740, 0xDEAD, + 0x065, 0x000911D7, + 0xB0000000, 0x00000000, 0x008, 0x00008400, - }; +u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32); + u32 RTL8821AE_RADIOA_ARRAY[] = { 0x018, 0x0001712A, 0x056, 0x00051CF2, @@ -2285,16 +1846,16 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x0EF, 0x00000000, 0x0EF, 0x00000100, 0x034, 0x0000ADF3, - 0x034, 0x00009DEF, - 0x034, 0x00008DEC, - 0x034, 0x00007DE9, - 0x034, 0x00006CED, - 0x034, 0x00005CE9, - 0x034, 0x000044E9, - 0x034, 0x000034E6, - 0x034, 0x0000246A, - 0x034, 0x00001467, - 0x034, 0x00000068, + 0x034, 0x00009DF0, + 0x034, 0x00008D70, + 0x034, 0x00007D6D, + 0x034, 0x00006CEE, + 0x034, 0x00005CCC, + 0x034, 0x000044EC, + 0x034, 0x000034AC, + 0x034, 0x0000246D, + 0x034, 0x0000106F, + 0x034, 0x0000006C, 0x0EF, 0x00000000, 0x0ED, 0x00000010, 0x044, 0x0000ADF2, @@ -2365,18 +1926,21 @@ u32 RTL8821AE_RADIOA_ARRAY[] = { 0x0FE, 0x00000000, 0x0FE, 0x00000000, 0x018, 0x0001712A, + }; +u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32); + u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x010, 0x0000000C, - 0xFF0F0180, 0xABCD, + 0x80000200, 0x00000000, 0x40000000, 0x00000000, + 0x011, 0x00000066, + 0xA0000000, 0x00000000, + 0x011, 0x0000005A, + 0xB0000000, 0x00000000, 0x025, 0x0000000F, - 0xFF0F01C0, 0xCDEF, - 0x025, 0x0000000F, - 0xCDCDCDCD, 0xCDCD, - 0x025, 0x0000006F, - 0xFF0F0180, 0xDEAD, 0x072, 0x00000000, + 0x420, 0x00000080, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -2443,7 +2007,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x559, 0x00000002, 0x55C, 0x00000050, 0x55D, 0x000000FF, - 0x604, 0x00000001, + 0x604, 0x00000009, 0x605, 0x00000030, 0x607, 0x00000003, 0x608, 0x0000000E, @@ -2475,9 +2039,10 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x70A, 0x00000065, 0x70B, 0x00000087, 0x718, 0x00000040, - }; +u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32); + u32 RTL8821AE_MAC_REG_ARRAY[] = { 0x428, 0x0000000A, 0x429, 0x00000010, @@ -2523,584 +2088,261 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = { 0x500, 0x00000026, 0x501, 0x000000A2, 0x502, 0x0000002F, - 0x503, 0x00000000, - 0x504, 0x00000028, - 0x505, 0x000000A3, - 0x506, 0x0000005E, - 0x507, 0x00000000, - 0x508, 0x0000002B, - 0x509, 0x000000A4, - 0x50A, 0x0000005E, - 0x50B, 0x00000000, - 0x50C, 0x0000004F, - 0x50D, 0x000000A4, - 0x50E, 0x00000000, - 0x50F, 0x00000000, - 0x512, 0x0000001C, - 0x514, 0x0000000A, - 0x516, 0x0000000A, - 0x525, 0x0000004F, - 0x550, 0x00000010, - 0x551, 0x00000010, - 0x559, 0x00000002, - 0x55C, 0x00000050, - 0x55D, 0x000000FF, - 0x605, 0x00000030, - 0x607, 0x00000007, - 0x608, 0x0000000E, - 0x609, 0x0000002A, - 0x620, 0x000000FF, - 0x621, 0x000000FF, - 0x622, 0x000000FF, - 0x623, 0x000000FF, - 0x624, 0x000000FF, - 0x625, 0x000000FF, - 0x626, 0x000000FF, - 0x627, 0x000000FF, - 0x638, 0x00000050, - 0x63C, 0x0000000A, - 0x63D, 0x0000000A, - 0x63E, 0x0000000E, - 0x63F, 0x0000000E, - 0x640, 0x00000040, - 0x642, 0x00000040, - 0x643, 0x00000000, - 0x652, 0x000000C8, - 0x66E, 0x00000005, - 0x700, 0x00000021, - 0x701, 0x00000043, - 0x702, 0x00000065, - 0x703, 0x00000087, - 0x708, 0x00000021, - 0x709, 0x00000043, - 0x70A, 0x00000065, - 0x70B, 0x00000087, - 0x718, 0x00000040, -}; - -u32 RTL8812AE_AGC_TAB_ARRAY[] = { - 0xFF0F07D8, 0xABCD, - 0x81C, 0xFC000001, - 0x81C, 0xFB020001, - 0x81C, 0xFA040001, - 0x81C, 0xF9060001, - 0x81C, 0xF8080001, - 0x81C, 0xF70A0001, - 0x81C, 0xF60C0001, - 0x81C, 0xF50E0001, - 0x81C, 0xF4100001, - 0x81C, 0xF3120001, - 0x81C, 0xF2140001, - 0x81C, 0xF1160001, - 0x81C, 0xF0180001, - 0x81C, 0xEF1A0001, - 0x81C, 0xEE1C0001, - 0x81C, 0xED1E0001, - 0x81C, 0xEC200001, - 0x81C, 0xEB220001, - 0x81C, 0xEA240001, - 0x81C, 0xCD260001, - 0x81C, 0xCC280001, - 0x81C, 0xCB2A0001, - 0x81C, 0xCA2C0001, - 0x81C, 0xC92E0001, - 0x81C, 0xC8300001, - 0x81C, 0xA6320001, - 0x81C, 0xA5340001, - 0x81C, 0xA4360001, - 0x81C, 0xA3380001, - 0x81C, 0xA23A0001, - 0x81C, 0x883C0001, - 0x81C, 0x873E0001, - 0x81C, 0x86400001, - 0x81C, 0x85420001, - 0x81C, 0x84440001, - 0x81C, 0x83460001, - 0x81C, 0x82480001, - 0x81C, 0x814A0001, - 0x81C, 0x484C0001, - 0x81C, 0x474E0001, - 0x81C, 0x46500001, - 0x81C, 0x45520001, - 0x81C, 0x44540001, - 0x81C, 0x43560001, - 0x81C, 0x42580001, - 0x81C, 0x415A0001, - 0x81C, 0x255C0001, - 0x81C, 0x245E0001, - 0x81C, 0x23600001, - 0x81C, 0x22620001, - 0x81C, 0x21640001, - 0x81C, 0x21660001, - 0x81C, 0x21680001, - 0x81C, 0x216A0001, - 0x81C, 0x216C0001, - 0x81C, 0x216E0001, - 0x81C, 0x21700001, - 0x81C, 0x21720001, - 0x81C, 0x21740001, - 0x81C, 0x21760001, - 0x81C, 0x21780001, - 0x81C, 0x217A0001, - 0x81C, 0x217C0001, - 0x81C, 0x217E0001, - 0xFF0F07D0, 0xCDEF, - 0x81C, 0xF9000001, - 0x81C, 0xF8020001, - 0x81C, 0xF7040001, - 0x81C, 0xF6060001, - 0x81C, 0xF5080001, - 0x81C, 0xF40A0001, - 0x81C, 0xF30C0001, - 0x81C, 0xF20E0001, - 0x81C, 0xF1100001, - 0x81C, 0xF0120001, - 0x81C, 0xEF140001, - 0x81C, 0xEE160001, - 0x81C, 0xED180001, - 0x81C, 0xEC1A0001, - 0x81C, 0xEB1C0001, - 0x81C, 0xEA1E0001, - 0x81C, 0xCD200001, - 0x81C, 0xCC220001, - 0x81C, 0xCB240001, - 0x81C, 0xCA260001, - 0x81C, 0xC9280001, - 0x81C, 0xC82A0001, - 0x81C, 0xC72C0001, - 0x81C, 0xC62E0001, - 0x81C, 0xA5300001, - 0x81C, 0xA4320001, - 0x81C, 0xA3340001, - 0x81C, 0xA2360001, - 0x81C, 0x88380001, - 0x81C, 0x873A0001, - 0x81C, 0x863C0001, - 0x81C, 0x853E0001, - 0x81C, 0x84400001, - 0x81C, 0x83420001, - 0x81C, 0x82440001, - 0x81C, 0x81460001, - 0x81C, 0x48480001, - 0x81C, 0x474A0001, - 0x81C, 0x464C0001, - 0x81C, 0x454E0001, - 0x81C, 0x44500001, - 0x81C, 0x43520001, - 0x81C, 0x42540001, - 0x81C, 0x41560001, - 0x81C, 0x25580001, - 0x81C, 0x245A0001, - 0x81C, 0x235C0001, - 0x81C, 0x225E0001, - 0x81C, 0x21600001, - 0x81C, 0x21620001, - 0x81C, 0x21640001, - 0x81C, 0x21660001, - 0x81C, 0x21680001, - 0x81C, 0x216A0001, - 0x81C, 0x236C0001, - 0x81C, 0x226E0001, - 0x81C, 0x21700001, - 0x81C, 0x21720001, - 0x81C, 0x21740001, - 0x81C, 0x21760001, - 0x81C, 0x21780001, - 0x81C, 0x217A0001, - 0x81C, 0x217C0001, - 0x81C, 0x217E0001, - 0xCDCDCDCD, 0xCDCD, - 0x81C, 0xFF000001, - 0x81C, 0xFF020001, - 0x81C, 0xFF040001, - 0x81C, 0xFF060001, - 0x81C, 0xFF080001, - 0x81C, 0xFE0A0001, - 0x81C, 0xFD0C0001, - 0x81C, 0xFC0E0001, - 0x81C, 0xFB100001, - 0x81C, 0xFA120001, - 0x81C, 0xF9140001, - 0x81C, 0xF8160001, - 0x81C, 0xF7180001, - 0x81C, 0xF61A0001, - 0x81C, 0xF51C0001, - 0x81C, 0xF41E0001, - 0x81C, 0xF3200001, - 0x81C, 0xF2220001, - 0x81C, 0xF1240001, - 0x81C, 0xF0260001, - 0x81C, 0xEF280001, - 0x81C, 0xEE2A0001, - 0x81C, 0xED2C0001, - 0x81C, 0xEC2E0001, - 0x81C, 0xEB300001, - 0x81C, 0xEA320001, - 0x81C, 0xE9340001, - 0x81C, 0xE8360001, - 0x81C, 0xE7380001, - 0x81C, 0xE63A0001, - 0x81C, 0xE53C0001, - 0x81C, 0xC73E0001, - 0x81C, 0xC6400001, - 0x81C, 0xC5420001, - 0x81C, 0xC4440001, - 0x81C, 0xC3460001, - 0x81C, 0xC2480001, - 0x81C, 0xC14A0001, - 0x81C, 0xA74C0001, - 0x81C, 0xA64E0001, - 0x81C, 0xA5500001, - 0x81C, 0xA4520001, - 0x81C, 0xA3540001, - 0x81C, 0xA2560001, - 0x81C, 0xA1580001, - 0x81C, 0x675A0001, - 0x81C, 0x665C0001, - 0x81C, 0x655E0001, - 0x81C, 0x64600001, - 0x81C, 0x63620001, - 0x81C, 0x48640001, - 0x81C, 0x47660001, - 0x81C, 0x46680001, - 0x81C, 0x456A0001, - 0x81C, 0x446C0001, - 0x81C, 0x436E0001, - 0x81C, 0x42700001, - 0x81C, 0x41720001, - 0x81C, 0x41740001, - 0x81C, 0x41760001, - 0x81C, 0x41780001, - 0x81C, 0x417A0001, - 0x81C, 0x417C0001, - 0x81C, 0x417E0001, - 0xFF0F07D8, 0xDEAD, - 0xFF0F0180, 0xABCD, - 0x81C, 0xFC800001, - 0x81C, 0xFB820001, - 0x81C, 0xFA840001, - 0x81C, 0xF9860001, - 0x81C, 0xF8880001, - 0x81C, 0xF78A0001, - 0x81C, 0xF68C0001, - 0x81C, 0xF58E0001, - 0x81C, 0xF4900001, - 0x81C, 0xF3920001, - 0x81C, 0xF2940001, - 0x81C, 0xF1960001, - 0x81C, 0xF0980001, - 0x81C, 0xEF9A0001, - 0x81C, 0xEE9C0001, - 0x81C, 0xED9E0001, - 0x81C, 0xECA00001, - 0x81C, 0xEBA20001, - 0x81C, 0xEAA40001, - 0x81C, 0xE9A60001, - 0x81C, 0xE8A80001, - 0x81C, 0xE7AA0001, - 0x81C, 0xE6AC0001, - 0x81C, 0xE5AE0001, - 0x81C, 0xE4B00001, - 0x81C, 0xE3B20001, - 0x81C, 0xA8B40001, - 0x81C, 0xA7B60001, - 0x81C, 0xA6B80001, - 0x81C, 0xA5BA0001, - 0x81C, 0xA4BC0001, - 0x81C, 0xA3BE0001, - 0x81C, 0xA2C00001, - 0x81C, 0xA1C20001, - 0x81C, 0x68C40001, - 0x81C, 0x67C60001, - 0x81C, 0x66C80001, - 0x81C, 0x65CA0001, - 0x81C, 0x64CC0001, - 0x81C, 0x47CE0001, - 0x81C, 0x46D00001, - 0x81C, 0x45D20001, - 0x81C, 0x44D40001, - 0x81C, 0x43D60001, - 0x81C, 0x42D80001, - 0x81C, 0x08DA0001, - 0x81C, 0x07DC0001, - 0x81C, 0x06DE0001, - 0x81C, 0x05E00001, - 0x81C, 0x04E20001, - 0x81C, 0x03E40001, - 0x81C, 0x02E60001, - 0x81C, 0x01E80001, - 0x81C, 0x01EA0001, - 0x81C, 0x01EC0001, - 0x81C, 0x01EE0001, - 0x81C, 0x01F00001, - 0x81C, 0x01F20001, - 0x81C, 0x01F40001, - 0x81C, 0x01F60001, - 0x81C, 0x01F80001, - 0x81C, 0x01FA0001, - 0x81C, 0x01FC0001, - 0x81C, 0x01FE0001, - 0xFF0F0280, 0xCDEF, - 0x81C, 0xFC800001, - 0x81C, 0xFB820001, - 0x81C, 0xFA840001, - 0x81C, 0xF9860001, - 0x81C, 0xF8880001, - 0x81C, 0xF78A0001, - 0x81C, 0xF68C0001, - 0x81C, 0xF58E0001, - 0x81C, 0xF4900001, - 0x81C, 0xF3920001, - 0x81C, 0xF2940001, - 0x81C, 0xF1960001, - 0x81C, 0xF0980001, - 0x81C, 0xEF9A0001, - 0x81C, 0xEE9C0001, - 0x81C, 0xED9E0001, - 0x81C, 0xECA00001, - 0x81C, 0xEBA20001, - 0x81C, 0xEAA40001, - 0x81C, 0xE9A60001, - 0x81C, 0xE8A80001, - 0x81C, 0xE7AA0001, - 0x81C, 0xE6AC0001, - 0x81C, 0xE5AE0001, - 0x81C, 0xE4B00001, - 0x81C, 0xE3B20001, - 0x81C, 0xA8B40001, - 0x81C, 0xA7B60001, - 0x81C, 0xA6B80001, - 0x81C, 0xA5BA0001, - 0x81C, 0xA4BC0001, - 0x81C, 0xA3BE0001, - 0x81C, 0xA2C00001, - 0x81C, 0xA1C20001, - 0x81C, 0x68C40001, - 0x81C, 0x67C60001, - 0x81C, 0x66C80001, - 0x81C, 0x65CA0001, - 0x81C, 0x64CC0001, - 0x81C, 0x47CE0001, - 0x81C, 0x46D00001, - 0x81C, 0x45D20001, - 0x81C, 0x44D40001, - 0x81C, 0x43D60001, - 0x81C, 0x42D80001, - 0x81C, 0x08DA0001, - 0x81C, 0x07DC0001, - 0x81C, 0x06DE0001, - 0x81C, 0x05E00001, - 0x81C, 0x04E20001, - 0x81C, 0x03E40001, - 0x81C, 0x02E60001, - 0x81C, 0x01E80001, - 0x81C, 0x01EA0001, - 0x81C, 0x01EC0001, - 0x81C, 0x01EE0001, - 0x81C, 0x01F00001, - 0x81C, 0x01F20001, - 0x81C, 0x01F40001, - 0x81C, 0x01F60001, - 0x81C, 0x01F80001, - 0x81C, 0x01FA0001, - 0x81C, 0x01FC0001, - 0x81C, 0x01FE0001, - 0xFF0F01C0, 0xCDEF, - 0x81C, 0xFC800001, - 0x81C, 0xFB820001, - 0x81C, 0xFA840001, - 0x81C, 0xF9860001, - 0x81C, 0xF8880001, - 0x81C, 0xF78A0001, - 0x81C, 0xF68C0001, - 0x81C, 0xF58E0001, - 0x81C, 0xF4900001, - 0x81C, 0xF3920001, - 0x81C, 0xF2940001, - 0x81C, 0xF1960001, - 0x81C, 0xF0980001, - 0x81C, 0xEF9A0001, - 0x81C, 0xEE9C0001, - 0x81C, 0xED9E0001, - 0x81C, 0xECA00001, - 0x81C, 0xEBA20001, - 0x81C, 0xEAA40001, - 0x81C, 0xE9A60001, - 0x81C, 0xE8A80001, - 0x81C, 0xE7AA0001, - 0x81C, 0xE6AC0001, - 0x81C, 0xE5AE0001, - 0x81C, 0xE4B00001, - 0x81C, 0xE3B20001, - 0x81C, 0xA8B40001, - 0x81C, 0xA7B60001, - 0x81C, 0xA6B80001, - 0x81C, 0xA5BA0001, - 0x81C, 0xA4BC0001, - 0x81C, 0xA3BE0001, - 0x81C, 0xA2C00001, - 0x81C, 0xA1C20001, - 0x81C, 0x68C40001, - 0x81C, 0x67C60001, - 0x81C, 0x66C80001, - 0x81C, 0x65CA0001, - 0x81C, 0x64CC0001, - 0x81C, 0x47CE0001, - 0x81C, 0x46D00001, - 0x81C, 0x45D20001, - 0x81C, 0x44D40001, - 0x81C, 0x43D60001, - 0x81C, 0x42D80001, - 0x81C, 0x08DA0001, - 0x81C, 0x07DC0001, - 0x81C, 0x06DE0001, - 0x81C, 0x05E00001, - 0x81C, 0x04E20001, - 0x81C, 0x03E40001, - 0x81C, 0x02E60001, - 0x81C, 0x01E80001, - 0x81C, 0x01EA0001, - 0x81C, 0x01EC0001, - 0x81C, 0x01EE0001, - 0x81C, 0x01F00001, - 0x81C, 0x01F20001, - 0x81C, 0x01F40001, - 0x81C, 0x01F60001, - 0x81C, 0x01F80001, - 0x81C, 0x01FA0001, - 0x81C, 0x01FC0001, - 0x81C, 0x01FE0001, - 0xFF0F02C0, 0xCDEF, - 0x81C, 0xFC800001, - 0x81C, 0xFB820001, - 0x81C, 0xFA840001, - 0x81C, 0xF9860001, - 0x81C, 0xF8880001, - 0x81C, 0xF78A0001, - 0x81C, 0xF68C0001, - 0x81C, 0xF58E0001, - 0x81C, 0xF4900001, - 0x81C, 0xF3920001, - 0x81C, 0xF2940001, - 0x81C, 0xF1960001, - 0x81C, 0xF0980001, - 0x81C, 0xEF9A0001, - 0x81C, 0xEE9C0001, - 0x81C, 0xED9E0001, - 0x81C, 0xECA00001, - 0x81C, 0xEBA20001, - 0x81C, 0xEAA40001, - 0x81C, 0xE9A60001, - 0x81C, 0xE8A80001, - 0x81C, 0xE7AA0001, - 0x81C, 0xE6AC0001, - 0x81C, 0xE5AE0001, - 0x81C, 0xE4B00001, - 0x81C, 0xE3B20001, - 0x81C, 0xA8B40001, - 0x81C, 0xA7B60001, - 0x81C, 0xA6B80001, - 0x81C, 0xA5BA0001, - 0x81C, 0xA4BC0001, - 0x81C, 0xA3BE0001, - 0x81C, 0xA2C00001, - 0x81C, 0xA1C20001, - 0x81C, 0x68C40001, - 0x81C, 0x67C60001, - 0x81C, 0x66C80001, - 0x81C, 0x65CA0001, - 0x81C, 0x64CC0001, - 0x81C, 0x47CE0001, - 0x81C, 0x46D00001, - 0x81C, 0x45D20001, - 0x81C, 0x44D40001, - 0x81C, 0x43D60001, - 0x81C, 0x42D80001, - 0x81C, 0x08DA0001, - 0x81C, 0x07DC0001, - 0x81C, 0x06DE0001, - 0x81C, 0x05E00001, - 0x81C, 0x04E20001, - 0x81C, 0x03E40001, - 0x81C, 0x02E60001, - 0x81C, 0x01E80001, - 0x81C, 0x01EA0001, - 0x81C, 0x01EC0001, - 0x81C, 0x01EE0001, - 0x81C, 0x01F00001, - 0x81C, 0x01F20001, - 0x81C, 0x01F40001, - 0x81C, 0x01F60001, - 0x81C, 0x01F80001, - 0x81C, 0x01FA0001, - 0x81C, 0x01FC0001, - 0x81C, 0x01FE0001, - 0xFF0F07D8, 0xCDEF, - 0x81C, 0xFC800001, - 0x81C, 0xFB820001, - 0x81C, 0xFA840001, - 0x81C, 0xF9860001, - 0x81C, 0xF8880001, - 0x81C, 0xF78A0001, - 0x81C, 0xF68C0001, - 0x81C, 0xF58E0001, - 0x81C, 0xF4900001, - 0x81C, 0xF3920001, - 0x81C, 0xF2940001, - 0x81C, 0xF1960001, - 0x81C, 0xF0980001, - 0x81C, 0xEF9A0001, - 0x81C, 0xEE9C0001, - 0x81C, 0xED9E0001, - 0x81C, 0xECA00001, - 0x81C, 0xEBA20001, - 0x81C, 0xEAA40001, - 0x81C, 0xE9A60001, - 0x81C, 0xE8A80001, - 0x81C, 0xE7AA0001, - 0x81C, 0xE6AC0001, - 0x81C, 0xE5AE0001, - 0x81C, 0xE4B00001, - 0x81C, 0xE3B20001, - 0x81C, 0xA8B40001, - 0x81C, 0xA7B60001, - 0x81C, 0xA6B80001, - 0x81C, 0xA5BA0001, - 0x81C, 0xA4BC0001, - 0x81C, 0xA3BE0001, - 0x81C, 0xA2C00001, - 0x81C, 0xA1C20001, - 0x81C, 0x68C40001, - 0x81C, 0x67C60001, - 0x81C, 0x66C80001, - 0x81C, 0x65CA0001, - 0x81C, 0x64CC0001, - 0x81C, 0x47CE0001, - 0x81C, 0x46D00001, - 0x81C, 0x45D20001, - 0x81C, 0x44D40001, - 0x81C, 0x43D60001, - 0x81C, 0x42D80001, - 0x81C, 0x08DA0001, - 0x81C, 0x07DC0001, - 0x81C, 0x06DE0001, - 0x81C, 0x05E00001, - 0x81C, 0x04E20001, - 0x81C, 0x03E40001, - 0x81C, 0x02E60001, - 0x81C, 0x01E80001, - 0x81C, 0x01EA0001, - 0x81C, 0x01EC0001, - 0x81C, 0x01EE0001, - 0x81C, 0x01F00001, - 0x81C, 0x01F20001, - 0x81C, 0x01F40001, - 0x81C, 0x01F60001, - 0x81C, 0x01F80001, - 0x81C, 0x01FA0001, - 0x81C, 0x01FC0001, - 0x81C, 0x01FE0001, - 0xFF0F07D0, 0xCDEF, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x607, 0x00000007, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32); + +u32 RTL8812AE_AGC_TAB_ARRAY[] = { + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000001, + 0x81C, 0xFB020001, + 0x81C, 0xFA040001, + 0x81C, 0xF9060001, + 0x81C, 0xF8080001, + 0x81C, 0xF70A0001, + 0x81C, 0xF60C0001, + 0x81C, 0xF50E0001, + 0x81C, 0xF4100001, + 0x81C, 0xF3120001, + 0x81C, 0xF2140001, + 0x81C, 0xF1160001, + 0x81C, 0xF0180001, + 0x81C, 0xEF1A0001, + 0x81C, 0xEE1C0001, + 0x81C, 0xED1E0001, + 0x81C, 0xEC200001, + 0x81C, 0xEB220001, + 0x81C, 0xEA240001, + 0x81C, 0xCD260001, + 0x81C, 0xCC280001, + 0x81C, 0xCB2A0001, + 0x81C, 0xCA2C0001, + 0x81C, 0xC92E0001, + 0x81C, 0xC8300001, + 0x81C, 0xA6320001, + 0x81C, 0xA5340001, + 0x81C, 0xA4360001, + 0x81C, 0xA3380001, + 0x81C, 0xA23A0001, + 0x81C, 0x883C0001, + 0x81C, 0x873E0001, + 0x81C, 0x86400001, + 0x81C, 0x85420001, + 0x81C, 0x84440001, + 0x81C, 0x83460001, + 0x81C, 0x82480001, + 0x81C, 0x814A0001, + 0x81C, 0x484C0001, + 0x81C, 0x474E0001, + 0x81C, 0x46500001, + 0x81C, 0x45520001, + 0x81C, 0x44540001, + 0x81C, 0x43560001, + 0x81C, 0x42580001, + 0x81C, 0x415A0001, + 0x81C, 0x255C0001, + 0x81C, 0x245E0001, + 0x81C, 0x23600001, + 0x81C, 0x22620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x216C0001, + 0x81C, 0x216E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x81C, 0xF9000001, + 0x81C, 0xF8020001, + 0x81C, 0xF7040001, + 0x81C, 0xF6060001, + 0x81C, 0xF5080001, + 0x81C, 0xF40A0001, + 0x81C, 0xF30C0001, + 0x81C, 0xF20E0001, + 0x81C, 0xF1100001, + 0x81C, 0xF0120001, + 0x81C, 0xEF140001, + 0x81C, 0xEE160001, + 0x81C, 0xED180001, + 0x81C, 0xEC1A0001, + 0x81C, 0xEB1C0001, + 0x81C, 0xEA1E0001, + 0x81C, 0xCD200001, + 0x81C, 0xCC220001, + 0x81C, 0xCB240001, + 0x81C, 0xCA260001, + 0x81C, 0xC9280001, + 0x81C, 0xC82A0001, + 0x81C, 0xC72C0001, + 0x81C, 0xC62E0001, + 0x81C, 0xA5300001, + 0x81C, 0xA4320001, + 0x81C, 0xA3340001, + 0x81C, 0xA2360001, + 0x81C, 0x88380001, + 0x81C, 0x873A0001, + 0x81C, 0x863C0001, + 0x81C, 0x853E0001, + 0x81C, 0x84400001, + 0x81C, 0x83420001, + 0x81C, 0x82440001, + 0x81C, 0x81460001, + 0x81C, 0x48480001, + 0x81C, 0x474A0001, + 0x81C, 0x464C0001, + 0x81C, 0x454E0001, + 0x81C, 0x44500001, + 0x81C, 0x43520001, + 0x81C, 0x42540001, + 0x81C, 0x41560001, + 0x81C, 0x25580001, + 0x81C, 0x245A0001, + 0x81C, 0x235C0001, + 0x81C, 0x225E0001, + 0x81C, 0x21600001, + 0x81C, 0x21620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x236C0001, + 0x81C, 0x226E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000001, + 0x81C, 0xFF020001, + 0x81C, 0xFF040001, + 0x81C, 0xFF060001, + 0x81C, 0xFF080001, + 0x81C, 0xFE0A0001, + 0x81C, 0xFD0C0001, + 0x81C, 0xFC0E0001, + 0x81C, 0xFB100001, + 0x81C, 0xFA120001, + 0x81C, 0xF9140001, + 0x81C, 0xF8160001, + 0x81C, 0xF7180001, + 0x81C, 0xF61A0001, + 0x81C, 0xF51C0001, + 0x81C, 0xF41E0001, + 0x81C, 0xF3200001, + 0x81C, 0xF2220001, + 0x81C, 0xF1240001, + 0x81C, 0xF0260001, + 0x81C, 0xEF280001, + 0x81C, 0xEE2A0001, + 0x81C, 0xED2C0001, + 0x81C, 0xEC2E0001, + 0x81C, 0xEB300001, + 0x81C, 0xEA320001, + 0x81C, 0xE9340001, + 0x81C, 0xE8360001, + 0x81C, 0xE7380001, + 0x81C, 0xE63A0001, + 0x81C, 0xE53C0001, + 0x81C, 0xC73E0001, + 0x81C, 0xC6400001, + 0x81C, 0xC5420001, + 0x81C, 0xC4440001, + 0x81C, 0xC3460001, + 0x81C, 0xC2480001, + 0x81C, 0xC14A0001, + 0x81C, 0xA74C0001, + 0x81C, 0xA64E0001, + 0x81C, 0xA5500001, + 0x81C, 0xA4520001, + 0x81C, 0xA3540001, + 0x81C, 0xA2560001, + 0x81C, 0xA1580001, + 0x81C, 0x675A0001, + 0x81C, 0x665C0001, + 0x81C, 0x655E0001, + 0x81C, 0x64600001, + 0x81C, 0x63620001, + 0x81C, 0x48640001, + 0x81C, 0x47660001, + 0x81C, 0x46680001, + 0x81C, 0x456A0001, + 0x81C, 0x446C0001, + 0x81C, 0x436E0001, + 0x81C, 0x42700001, + 0x81C, 0x41720001, + 0x81C, 0x41740001, + 0x81C, 0x41760001, + 0x81C, 0x41780001, + 0x81C, 0x417A0001, + 0x81C, 0x417C0001, + 0x81C, 0x417E0001, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFC800001, 0x81C, 0xFB820001, 0x81C, 0xFA840001, @@ -3165,7 +2407,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0x81C, 0x01FA0001, 0x81C, 0x01FC0001, 0x81C, 0x01FE0001, - 0xCDCDCDCD, 0xCDCD, + 0xA0000000, 0x00000000, 0x81C, 0xFF800001, 0x81C, 0xFF820001, 0x81C, 0xFF840001, @@ -3230,14 +2472,16 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0x81C, 0x01FA0001, 0x81C, 0x01FC0001, 0x81C, 0x01FE0001, - 0xFF0F0180, 0xDEAD, + 0xB0000000, 0x00000000, 0xC50, 0x00000022, 0xC50, 0x00000020, 0xE50, 0x00000022, 0xE50, 0x00000020, - }; +u32 RTL8812AE_AGC_TAB_1TARRAYLEN = + sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32); + u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0xBF000001, 0x81C, 0xBF020001, @@ -3430,9 +2674,11 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = { 0x81C, 0x017E0101, 0xC50, 0x00000022, 0xC50, 0x00000020, - }; +u32 RTL8821AE_AGC_TAB_1TARRAYLEN = + sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32); + /****************************************************************************** * TXPWR_LMT.TXT ******************************************************************************/ @@ -3717,9 +2963,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "OFDM", "1T", "100", "30", "ETSI", "5G", "20M", "OFDM", "1T", "100", "32", "MKK", "5G", "20M", "OFDM", "1T", "100", "32", - "FCC", "5G", "20M", "OFDM", "1T", "114", "30", - "ETSI", "5G", "20M", "OFDM", "1T", "114", "32", - "MKK", "5G", "20M", "OFDM", "1T", "114", "32", + "FCC", "5G", "20M", "OFDM", "1T", "104", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "104", "32", + "MKK", "5G", "20M", "OFDM", "1T", "104", "32", "FCC", "5G", "20M", "OFDM", "1T", "108", "32", "ETSI", "5G", "20M", "OFDM", "1T", "108", "32", "MKK", "5G", "20M", "OFDM", "1T", "108", "32", @@ -3789,9 +3035,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "HT", "1T", "100", "30", "ETSI", "5G", "20M", "HT", "1T", "100", "32", "MKK", "5G", "20M", "HT", "1T", "100", "32", - "FCC", "5G", "20M", "HT", "1T", "114", "30", - "ETSI", "5G", "20M", "HT", "1T", "114", "32", - "MKK", "5G", "20M", "HT", "1T", "114", "32", + "FCC", "5G", "20M", "HT", "1T", "104", "30", + "ETSI", "5G", "20M", "HT", "1T", "104", "32", + "MKK", "5G", "20M", "HT", "1T", "104", "32", "FCC", "5G", "20M", "HT", "1T", "108", "32", "ETSI", "5G", "20M", "HT", "1T", "108", "32", "MKK", "5G", "20M", "HT", "1T", "108", "32", @@ -3861,9 +3107,9 @@ u8 *RTL8812AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "HT", "2T", "100", "28", "ETSI", "5G", "20M", "HT", "2T", "100", "30", "MKK", "5G", "20M", "HT", "2T", "100", "30", - "FCC", "5G", "20M", "HT", "2T", "114", "28", - "ETSI", "5G", "20M", "HT", "2T", "114", "30", - "MKK", "5G", "20M", "HT", "2T", "114", "30", + "FCC", "5G", "20M", "HT", "2T", "104", "28", + "ETSI", "5G", "20M", "HT", "2T", "104", "30", + "MKK", "5G", "20M", "HT", "2T", "104", "30", "FCC", "5G", "20M", "HT", "2T", "108", "30", "ETSI", "5G", "20M", "HT", "2T", "108", "30", "MKK", "5G", "20M", "HT", "2T", "108", "30", @@ -4004,6 +3250,8 @@ u8 *RTL8812AE_TXPWR_LMT[] = { "MKK", "5G", "80M", "VHT", "2T", "155", "63" }; +u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *); + u8 *RTL8821AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", @@ -4284,9 +3532,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "OFDM", "1T", "100", "32", "ETSI", "5G", "20M", "OFDM", "1T", "100", "30", "MKK", "5G", "20M", "OFDM", "1T", "100", "30", - "FCC", "5G", "20M", "OFDM", "1T", "114", "32", - "ETSI", "5G", "20M", "OFDM", "1T", "114", "30", - "MKK", "5G", "20M", "OFDM", "1T", "114", "30", + "FCC", "5G", "20M", "OFDM", "1T", "104", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "104", "30", + "MKK", "5G", "20M", "OFDM", "1T", "104", "30", "FCC", "5G", "20M", "OFDM", "1T", "108", "32", "ETSI", "5G", "20M", "OFDM", "1T", "108", "30", "MKK", "5G", "20M", "OFDM", "1T", "108", "30", @@ -4356,9 +3604,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "HT", "1T", "100", "32", "ETSI", "5G", "20M", "HT", "1T", "100", "30", "MKK", "5G", "20M", "HT", "1T", "100", "30", - "FCC", "5G", "20M", "HT", "1T", "114", "32", - "ETSI", "5G", "20M", "HT", "1T", "114", "30", - "MKK", "5G", "20M", "HT", "1T", "114", "30", + "FCC", "5G", "20M", "HT", "1T", "104", "32", + "ETSI", "5G", "20M", "HT", "1T", "104", "30", + "MKK", "5G", "20M", "HT", "1T", "104", "30", "FCC", "5G", "20M", "HT", "1T", "108", "32", "ETSI", "5G", "20M", "HT", "1T", "108", "30", "MKK", "5G", "20M", "HT", "1T", "108", "30", @@ -4428,9 +3676,9 @@ u8 *RTL8821AE_TXPWR_LMT[] = { "FCC", "5G", "20M", "HT", "2T", "100", "28", "ETSI", "5G", "20M", "HT", "2T", "100", "30", "MKK", "5G", "20M", "HT", "2T", "100", "30", - "FCC", "5G", "20M", "HT", "2T", "114", "28", - "ETSI", "5G", "20M", "HT", "2T", "114", "30", - "MKK", "5G", "20M", "HT", "2T", "114", "30", + "FCC", "5G", "20M", "HT", "2T", "104", "28", + "ETSI", "5G", "20M", "HT", "2T", "104", "30", + "MKK", "5G", "20M", "HT", "2T", "104", "30", "FCC", "5G", "20M", "HT", "2T", "108", "30", "ETSI", "5G", "20M", "HT", "2T", "108", "30", "MKK", "5G", "20M", "HT", "2T", "108", "30", @@ -4570,3 +3818,5 @@ u8 *RTL8821AE_TXPWR_LMT[] = { "ETSI", "5G", "80M", "VHT", "2T", "155", "30", "MKK", "5G", "80M", "VHT", "2T", "155", "63" }; + +u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 24bcff6bc507bff6b27a03e939790db9d59bd5dd..36c2388b60bca303e3df7405996952a32ca3dcd7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h @@ -29,32 +29,30 @@ #define __RTL8821AE_TABLE__H_ #include -#define RTL8821AEPHY_REG_1TARRAYLEN 344 +extern u32 RTL8821AE_PHY_REG_1TARRAYLEN; extern u32 RTL8821AE_PHY_REG_ARRAY[]; -#define RTL8812AEPHY_REG_1TARRAYLEN 490 +extern u32 RTL8812AE_PHY_REG_1TARRAYLEN; extern u32 RTL8812AE_PHY_REG_ARRAY[]; -#define RTL8821AEPHY_REG_ARRAY_PGLEN 90 +extern u32 RTL8821AE_PHY_REG_ARRAY_PGLEN; extern u32 RTL8821AE_PHY_REG_ARRAY_PG[]; -#define RTL8812AEPHY_REG_ARRAY_PGLEN 276 +extern u32 RTL8812AE_PHY_REG_ARRAY_PGLEN; extern u32 RTL8812AE_PHY_REG_ARRAY_PG[]; -/* #define RTL8723BE_RADIOA_1TARRAYLEN 206 */ -/* extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[]; */ -#define RTL8812AE_RADIOA_1TARRAYLEN 1264 +extern u32 RTL8812AE_RADIOA_1TARRAYLEN; extern u32 RTL8812AE_RADIOA_ARRAY[]; -#define RTL8812AE_RADIOB_1TARRAYLEN 1240 +extern u32 RTL8812AE_RADIOB_1TARRAYLEN; extern u32 RTL8812AE_RADIOB_ARRAY[]; -#define RTL8821AE_RADIOA_1TARRAYLEN 1176 +extern u32 RTL8821AE_RADIOA_1TARRAYLEN; extern u32 RTL8821AE_RADIOA_ARRAY[]; -#define RTL8821AEMAC_1T_ARRAYLEN 194 +extern u32 RTL8821AE_MAC_1T_ARRAYLEN; extern u32 RTL8821AE_MAC_REG_ARRAY[]; -#define RTL8812AEMAC_1T_ARRAYLEN 214 +extern u32 RTL8812AE_MAC_1T_ARRAYLEN; extern u32 RTL8812AE_MAC_REG_ARRAY[]; -#define RTL8821AEAGCTAB_1TARRAYLEN 382 +extern u32 RTL8821AE_AGC_TAB_1TARRAYLEN; extern u32 RTL8821AE_AGC_TAB_ARRAY[]; -#define RTL8812AEAGCTAB_1TARRAYLEN 1312 +extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN; extern u32 RTL8812AE_AGC_TAB_ARRAY[]; -#define RTL8812AE_TXPWR_LMT_ARRAY_LEN 3948 +extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN; extern u8 *RTL8812AE_TXPWR_LMT[]; -#define RTL8821AE_TXPWR_LMT_ARRAY_LEN 3948 +extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN; extern u8 *RTL8821AE_TXPWR_LMT[]; #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 108098152cf3c20f67e4b147c5bdb45759835449..03665e82065f18e3503fce0bfa7c3bc98a6ab564 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -520,18 +520,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status->bw = RATE_INFO_BW_40; else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80) - rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + rx_status->bw = RATE_INFO_BW_80; if (status->is_ht) - rx_status->flag |= RX_FLAG_HT; + rx_status->encoding = RX_ENC_HT; if (status->is_vht) - rx_status->flag |= RX_FLAG_VHT; + rx_status->encoding = RX_ENC_VHT; if (status->is_short_gi) - rx_status->flag |= RX_FLAG_SHORT_GI; + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - rx_status->vht_nss = status->vht_nss; + rx_status->nss = status->vht_nss; rx_status->flag |= RX_FLAG_MACTIME_START; /* hw will set status->decrypted true, if it finds the diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 65ef42b376515dfac7de588ba5ea67504f8c6630..c0d2601bc55fbb337ea854afd4207b1e67b073eb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1529,6 +1529,10 @@ struct rtl_hal { u8 external_lna_2g; u8 external_pa_5g; u8 external_lna_5g; + u8 type_glna; + u8 type_gpa; + u8 type_alna; + u8 type_apa; u8 rfe_type; /*firmware */ @@ -2933,6 +2937,14 @@ static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8) rtlpriv->io.read8_sync(rtlpriv, addr); } +static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw, + u32 addr, u32 val8) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, addr, (u8)val8); +} + static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16) { rtlpriv->io.write16_async(rtlpriv, addr, val16); @@ -2966,6 +2978,12 @@ static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data); } +static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw, + u32 regaddr, u32 data) +{ + rtl_set_bbreg(hw, regaddr, 0xffffffff, data); +} + static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask) diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 785334f7a5386e82c32cd6fe41dee6d36b37b969..9935bd09db1fb309090222be94c85ec98917b5cc 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -479,7 +479,7 @@ struct rndis_wlan_private { */ static int rndis_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params); static int rndis_scan(struct wiphy *wiphy, @@ -1857,7 +1857,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, */ static int rndis_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); @@ -2830,15 +2830,22 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) } if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { - if (!roamed) + if (!roamed) { cfg80211_connect_result(usbdev->net, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); - else - cfg80211_roamed(usbdev->net, - get_current_channel(usbdev, NULL), - bssid, req_ie, req_ie_len, - resp_ie, resp_ie_len, GFP_KERNEL); + } else { + struct cfg80211_roam_info roam_info = { + .channel = get_current_channel(usbdev, NULL), + .bssid = bssid, + .req_ie = req_ie, + .req_ie_len = req_ie_len, + .resp_ie = resp_ie, + .resp_ie_len = resp_ie_len, + }; + + cfg80211_roamed(usbdev->net, &roam_info, GFP_KERNEL); + } } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, get_current_channel(usbdev, NULL), @@ -3392,6 +3399,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = rndis_wlan_set_multicast_list, @@ -3427,6 +3435,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + if (!priv->workqueue) { + wiphy_free(wiphy); + return -ENOMEM; + } INIT_WORK(&priv->work, rndis_wlan_worker); INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index e3216473aecb7a7e27afb707a66e3b00ddd3164d..021e5ac5f1073c2d3ba98815359ea3f88f8ef514 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1261,6 +1261,8 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->reg_notifier = rsi_reg_notify; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + status = ieee80211_register_hw(hw); if (status) return status; diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c index d3acc85932a569024a51d4b9d68b6dbc79dcfec8..709f56e5ad875c534dc44fac64d371c74b88fe1a 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index 3d170287cd0b9df0428d8ec559f2f0f7e5c09a27..cd63ffef025af52d666861c53e7c8d9f353e9778 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -1085,7 +1085,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, hdr->band); if (arg->rx_rate >= 14) { - hdr->flag |= RX_FLAG_HT; + hdr->encoding = RX_ENC_HT; hdr->rate_idx = arg->rx_rate - 14; } else if (arg->rx_rate >= 4) { hdr->rate_idx = arg->rx_rate - 2; diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index a27d4c22b6e88aaa1058d42b2accf9760a639272..50fb2a4a5259d25d01c888bfc9a9283e8d049c52 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c @@ -141,7 +141,7 @@ static void wl1251_rx_status(struct wl1251 *wl, } if (desc->mod_pre & SHORT_PREAMBLE_BIT) - status->flag |= RX_FLAG_SHORTPRE; + status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } static void wl1251_rx_body(struct wl1251 *wl, diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 58e148d7bc7b66f4d819209cb42b89c998496cbb..de7e2a5fdffa048e2f00f2edab72f9363b56e01a 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1249,7 +1249,7 @@ static ssize_t fw_logger_write(struct file *file, } if (wl->conf.fwlog.output == 0) { - wl1271_warning("iligal opperation - fw logger disabled by default, please change mode via wlconf"); + wl1271_warning("invalid operation - fw logger disabled by default, please change mode via wlconf"); return -EINVAL; } diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index a21fda910529a45aff033ccbda6bfbdd758a66c9..382ec15ec1af0c90ee2fb9fbe6c6ca16a7a6a343 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6128,6 +6128,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); + wl->hw->wiphy->max_sched_scan_reqs = 1; wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); @@ -6135,7 +6136,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | - WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_CHANNEL_SWITCH; wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index b9e14045195fd734c5bff67585b36212305e7820..52a55f9acd80d41da600b16c2b379ff9a680c83a 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -72,7 +72,7 @@ static void wl1271_rx_status(struct wl1271 *wl, /* 11n support */ if (desc->rate <= wl->hw_min_ht_rate) - status->flag |= RX_FLAG_HT; + status->encoding = RX_ENC_HT; /* * Read the signal level and antenna diversity indication. diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index ddad58f614da4fa862a0e3f2b776fe0e994a742d..009ec07c4cec14f057b39fe122aa3a6acb943eb8 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -366,7 +366,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 nla_cmd; int err; - err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); + err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy, + NULL); if (err) return err; diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index fd4e9ba176c9b1ab3f16878f95d6041f12e39903..5c0bcb1fe1a1f2e2468d629afe951cc4826e3719 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -41,7 +41,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, return -EINVAL; ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, - wlcore_vendor_attr_policy); + wlcore_vendor_attr_policy, NULL); if (ret) return ret; @@ -116,7 +116,7 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, return -EINVAL; ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, - wlcore_vendor_attr_policy); + wlcore_vendor_attr_policy, NULL); if (ret) return ret; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index 3e37a045f7025fa39cf55f55cc1d59d26b8feee9..fe6517a621b083ea3995703784de74a68a9655d4 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -1408,6 +1408,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + hw->max_signal = 100; hw->queues = 1; hw->extra_tx_headroom = sizeof(struct zd_ctrlset); diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index c5effd6c6be9beea8481d3c1a68e57f5ace403cf..01ca1d57b3d92074d2953195b9776b368f653ead 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1278,6 +1278,9 @@ static int eject_installer(struct usb_interface *intf) u8 bulk_out_ep; int r; + if (iface_desc->desc.bNumEndpoints < 2) + return -ENODEV; + /* Find bulk out endpoint */ for (r = 1; r >= 0; r--) { endpoint = &iface_desc->endpoint[r].desc; diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 9d2369269abf141e9d993a50568637d7c4225e45..c4208487fadc5693d7c4d7dfda9cc3c5f614ab61 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -5,17 +5,6 @@ menu "Near Field Communication (NFC) devices" depends on NFC -config NFC_WILINK - tristate "Texas Instruments NFC WiLink driver" - depends on TI_ST && NFC_NCI - help - This enables the NFC driver for Texas Instrument's BT/FM/GPS/NFC - combo devices. This makes use of shared transport line discipline - core driver to communicate with the NFC core of the combo chip. - - Say Y here to compile support for Texas Instrument's NFC WiLink driver - into the kernel or say M to compile it as module. - config NFC_TRF7970A tristate "Texas Instruments TRF7970a NFC driver" depends on SPI && NFC_DIGITAL diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index bab8ef06ae354129b82d45b7dbbd9dce5641a4a9..640b7274371cfbdc3d67cde889d2b5b10c83b418 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_NFC_FDP) += fdp/ obj-$(CONFIG_NFC_PN544) += pn544/ obj-$(CONFIG_NFC_MICROREAD) += microread/ obj-$(CONFIG_NFC_PN533) += pn533/ -obj-$(CONFIG_NFC_WILINK) += nfcwilink.o obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o obj-$(CONFIG_NFC_SIM) += nfcsim.o obj-$(CONFIG_NFC_PORT100) += port100.o diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 5e797d5c38edce00356702bde47814a30d651d80..712936f5d2d64a200f16a28e044aeb4a362dc7ff 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -210,14 +210,14 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) struct sk_buff *skb; int r; - client = phy->i2c_dev; - dev_dbg(&client->dev, "%s\n", __func__); - if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } + client = phy->i2c_dev; + dev_dbg(&client->dev, "%s\n", __func__); + r = fdp_nci_i2c_read(phy, &skb); if (r == -EREMOTEIO) diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index f8dcdf4b24f6ff8b63a1a70e9e5249b997209780..c38bdd6a5a821899d16488f3b988b741316cb26e 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -17,7 +17,7 @@ */ #include -#include +#include #include #include #include @@ -281,12 +281,11 @@ static int process_state_fw_dnld(struct nfcmrvl_private *priv, return -EINVAL; } skb_pull(skb, 1); - memcpy(&len, skb->data, 2); + len = get_unaligned_le16(skb->data); skb_pull(skb, 2); + comp_len = get_unaligned_le16(skb->data); memcpy(&comp_len, skb->data, 2); skb_pull(skb, 2); - len = get_unaligned_le16(&len); - comp_len = get_unaligned_le16(&comp_len); if (((~len) & 0xFFFF) != comp_len) { nfc_err(priv->dev, "bad len complement: %x %x %x", len, comp_len, (~len & 0xFFFF)); diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index a7faa0bcc01e8a101bfafefff157975bc448709b..8e0ddb43477042dfa035c1ebb05b175b39d13796 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -26,7 +26,6 @@ #include #include #include -#include #include "nfcmrvl.h" #define SPI_WAIT_HANDSHAKE 1 @@ -96,10 +95,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv, /* Send the SPI packet */ err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, skb); - if (err != 0) { + if (err) nfc_err(priv->dev, "spi_send failed %d", err); - kfree_skb(skb); - } + return err; } diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c deleted file mode 100644 index 3fbd18b8e473f6966dd53ac61cee333b172981d4..0000000000000000000000000000000000000000 --- a/drivers/nfc/nfcwilink.c +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Texas Instrument's NFC Driver For Shared Transport. - * - * NFC Driver acts as interface between NCI core and - * TI Shared Transport Layer. - * - * Copyright (C) 2011 Texas Instruments, Inc. - * - * Written by Ilan Elias - * - * Acknowledgements: - * This file is based on btwilink.c, which was written - * by Raja Mani and Pavan Savoy. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#define NFCWILINK_CHNL 12 -#define NFCWILINK_OPCODE 7 -#define NFCWILINK_MAX_FRAME_SIZE 300 -#define NFCWILINK_HDR_LEN 4 -#define NFCWILINK_OFFSET_LEN_IN_HDR 1 -#define NFCWILINK_LEN_SIZE 2 -#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */ -#define NFCWILINK_CMD_TIMEOUT 5000 /* 5 sec */ - -#define BTS_FILE_NAME_MAX_SIZE 40 -#define BTS_FILE_HDR_MAGIC 0x42535442 -#define BTS_FILE_CMD_MAX_LEN 0xff -#define BTS_FILE_ACTION_TYPE_SEND_CMD 1 - -#define NCI_VS_NFCC_INFO_CMD_GID 0x2f -#define NCI_VS_NFCC_INFO_CMD_OID 0x12 -#define NCI_VS_NFCC_INFO_RSP_GID 0x4f -#define NCI_VS_NFCC_INFO_RSP_OID 0x12 - -struct nfcwilink_hdr { - __u8 chnl; - __u8 opcode; - __le16 len; -} __packed; - -struct nci_vs_nfcc_info_cmd { - __u8 gid; - __u8 oid; - __u8 plen; -} __packed; - -struct nci_vs_nfcc_info_rsp { - __u8 gid; - __u8 oid; - __u8 plen; - __u8 status; - __u8 hw_id; - __u8 sw_ver_x; - __u8 sw_ver_z; - __u8 patch_id; -} __packed; - -struct bts_file_hdr { - __le32 magic; - __le32 ver; - __u8 rfu[24]; - __u8 actions[0]; -} __packed; - -struct bts_file_action { - __le16 type; - __le16 len; - __u8 data[0]; -} __packed; - -struct nfcwilink { - struct platform_device *pdev; - struct nci_dev *ndev; - unsigned long flags; - - int st_register_cb_status; - long (*st_write) (struct sk_buff *); - - struct completion completed; - - struct nci_vs_nfcc_info_rsp nfcc_info; -}; - -/* NFCWILINK driver flags */ -enum { - NFCWILINK_RUNNING, - NFCWILINK_FW_DOWNLOAD, -}; - -static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb); - -static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how) -{ - struct sk_buff *skb; - - skb = alloc_skb(len + NFCWILINK_HDR_LEN, how); - if (skb) - skb_reserve(skb, NFCWILINK_HDR_LEN); - - return skb; -} - -static void nfcwilink_fw_download_receive(struct nfcwilink *drv, - struct sk_buff *skb) -{ - struct nci_vs_nfcc_info_rsp *rsp = (void *)skb->data; - - /* Detect NCI_VS_NFCC_INFO_RSP and store the result */ - if ((skb->len > 3) && (rsp->gid == NCI_VS_NFCC_INFO_RSP_GID) && - (rsp->oid == NCI_VS_NFCC_INFO_RSP_OID)) { - memcpy(&drv->nfcc_info, rsp, - sizeof(struct nci_vs_nfcc_info_rsp)); - } - - kfree_skb(skb); - - complete(&drv->completed); -} - -static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) -{ - struct nci_vs_nfcc_info_cmd *cmd; - struct sk_buff *skb; - unsigned long comp_ret; - int rc; - - skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd), - GFP_KERNEL); - if (!skb) { - nfc_err(&drv->pdev->dev, - "no memory for nci_vs_nfcc_info_cmd\n"); - return -ENOMEM; - } - - cmd = (struct nci_vs_nfcc_info_cmd *) - skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd)); - cmd->gid = NCI_VS_NFCC_INFO_CMD_GID; - cmd->oid = NCI_VS_NFCC_INFO_CMD_OID; - cmd->plen = 0; - - drv->nfcc_info.plen = 0; - - rc = nfcwilink_send(drv->ndev, skb); - if (rc) - return rc; - - comp_ret = wait_for_completion_timeout(&drv->completed, - msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT)); - dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n", - comp_ret); - if (comp_ret == 0) { - nfc_err(&drv->pdev->dev, - "timeout on wait_for_completion_timeout\n"); - return -ETIMEDOUT; - } - - dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d\n", - drv->nfcc_info.plen, drv->nfcc_info.status); - - if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) { - nfc_err(&drv->pdev->dev, "invalid nci_vs_nfcc_info_rsp\n"); - return -EINVAL; - } - - snprintf(file_name, BTS_FILE_NAME_MAX_SIZE, - "TINfcInit_%d.%d.%d.%d.bts", - drv->nfcc_info.hw_id, - drv->nfcc_info.sw_ver_x, - drv->nfcc_info.sw_ver_z, - drv->nfcc_info.patch_id); - - nfc_info(&drv->pdev->dev, "nfcwilink FW file name: %s\n", file_name); - - return 0; -} - -static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len) -{ - struct nfcwilink_hdr *hdr = (struct nfcwilink_hdr *)data; - struct sk_buff *skb; - unsigned long comp_ret; - int rc; - - /* verify valid cmd for the NFC channel */ - if ((len <= sizeof(struct nfcwilink_hdr)) || - (len > BTS_FILE_CMD_MAX_LEN) || - (hdr->chnl != NFCWILINK_CHNL) || - (hdr->opcode != NFCWILINK_OPCODE)) { - nfc_err(&drv->pdev->dev, - "ignoring invalid bts cmd, len %d, chnl %d, opcode %d\n", - len, hdr->chnl, hdr->opcode); - return 0; - } - - /* remove the ST header */ - len -= sizeof(struct nfcwilink_hdr); - data += sizeof(struct nfcwilink_hdr); - - skb = nfcwilink_skb_alloc(len, GFP_KERNEL); - if (!skb) { - nfc_err(&drv->pdev->dev, "no memory for bts cmd\n"); - return -ENOMEM; - } - - memcpy(skb_put(skb, len), data, len); - - rc = nfcwilink_send(drv->ndev, skb); - if (rc) - return rc; - - comp_ret = wait_for_completion_timeout(&drv->completed, - msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT)); - dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n", - comp_ret); - if (comp_ret == 0) { - nfc_err(&drv->pdev->dev, - "timeout on wait_for_completion_timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - -static int nfcwilink_download_fw(struct nfcwilink *drv) -{ - unsigned char file_name[BTS_FILE_NAME_MAX_SIZE]; - const struct firmware *fw; - __u16 action_type, action_len; - __u8 *ptr; - int len, rc; - - set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags); - - rc = nfcwilink_get_bts_file_name(drv, file_name); - if (rc) - goto exit; - - rc = request_firmware(&fw, file_name, &drv->pdev->dev); - if (rc) { - nfc_err(&drv->pdev->dev, "request_firmware failed %d\n", rc); - - /* if the file is not found, don't exit with failure */ - if (rc == -ENOENT) - rc = 0; - - goto exit; - } - - len = fw->size; - ptr = (__u8 *)fw->data; - - if ((len == 0) || (ptr == NULL)) { - dev_dbg(&drv->pdev->dev, - "request_firmware returned size %d\n", len); - goto release_fw; - } - - if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) != - BTS_FILE_HDR_MAGIC) { - nfc_err(&drv->pdev->dev, "wrong bts magic number\n"); - rc = -EINVAL; - goto release_fw; - } - - /* remove the BTS header */ - len -= sizeof(struct bts_file_hdr); - ptr += sizeof(struct bts_file_hdr); - - while (len > 0) { - action_type = - __le16_to_cpu(((struct bts_file_action *)ptr)->type); - action_len = - __le16_to_cpu(((struct bts_file_action *)ptr)->len); - - dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d\n", - action_type, action_len); - - switch (action_type) { - case BTS_FILE_ACTION_TYPE_SEND_CMD: - rc = nfcwilink_send_bts_cmd(drv, - ((struct bts_file_action *)ptr)->data, - action_len); - if (rc) - goto release_fw; - break; - } - - /* advance to the next action */ - len -= (sizeof(struct bts_file_action) + action_len); - ptr += (sizeof(struct bts_file_action) + action_len); - } - -release_fw: - release_firmware(fw); - -exit: - clear_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags); - return rc; -} - -/* Called by ST when registration is complete */ -static void nfcwilink_register_complete(void *priv_data, int data) -{ - struct nfcwilink *drv = priv_data; - - /* store ST registration status */ - drv->st_register_cb_status = data; - - /* complete the wait in nfc_st_open() */ - complete(&drv->completed); -} - -/* Called by ST when receive data is available */ -static long nfcwilink_receive(void *priv_data, struct sk_buff *skb) -{ - struct nfcwilink *drv = priv_data; - int rc; - - if (!skb) - return -EFAULT; - - if (!drv) { - kfree_skb(skb); - return -EFAULT; - } - - dev_dbg(&drv->pdev->dev, "receive entry, len %d\n", skb->len); - - /* strip the ST header - (apart for the chnl byte, which is not received in the hdr) */ - skb_pull(skb, (NFCWILINK_HDR_LEN-1)); - - if (test_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags)) { - nfcwilink_fw_download_receive(drv, skb); - return 0; - } - - /* Forward skb to NCI core layer */ - rc = nci_recv_frame(drv->ndev, skb); - if (rc < 0) { - nfc_err(&drv->pdev->dev, "nci_recv_frame failed %d\n", rc); - return rc; - } - - return 0; -} - -/* protocol structure registered with ST */ -static struct st_proto_s nfcwilink_proto = { - .chnl_id = NFCWILINK_CHNL, - .max_frame_size = NFCWILINK_MAX_FRAME_SIZE, - .hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */ - .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR, - .len_size = NFCWILINK_LEN_SIZE, - .reserve = 0, - .recv = nfcwilink_receive, - .reg_complete_cb = nfcwilink_register_complete, - .write = NULL, -}; - -static int nfcwilink_open(struct nci_dev *ndev) -{ - struct nfcwilink *drv = nci_get_drvdata(ndev); - unsigned long comp_ret; - int rc; - - if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) { - rc = -EBUSY; - goto exit; - } - - nfcwilink_proto.priv_data = drv; - - init_completion(&drv->completed); - drv->st_register_cb_status = -EINPROGRESS; - - rc = st_register(&nfcwilink_proto); - if (rc < 0) { - if (rc == -EINPROGRESS) { - comp_ret = wait_for_completion_timeout( - &drv->completed, - msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT)); - - dev_dbg(&drv->pdev->dev, - "wait_for_completion_timeout returned %ld\n", - comp_ret); - - if (comp_ret == 0) { - /* timeout */ - rc = -ETIMEDOUT; - goto clear_exit; - } else if (drv->st_register_cb_status != 0) { - rc = drv->st_register_cb_status; - nfc_err(&drv->pdev->dev, - "st_register_cb failed %d\n", rc); - goto clear_exit; - } - } else { - nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc); - goto clear_exit; - } - } - - /* st_register MUST fill the write callback */ - BUG_ON(nfcwilink_proto.write == NULL); - drv->st_write = nfcwilink_proto.write; - - if (nfcwilink_download_fw(drv)) { - nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n", - rc); - /* open should succeed, even if the FW download failed */ - } - - goto exit; - -clear_exit: - clear_bit(NFCWILINK_RUNNING, &drv->flags); - -exit: - return rc; -} - -static int nfcwilink_close(struct nci_dev *ndev) -{ - struct nfcwilink *drv = nci_get_drvdata(ndev); - int rc; - - if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags)) - return 0; - - rc = st_unregister(&nfcwilink_proto); - if (rc) - nfc_err(&drv->pdev->dev, "st_unregister failed %d\n", rc); - - drv->st_write = NULL; - - return rc; -} - -static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb) -{ - struct nfcwilink *drv = nci_get_drvdata(ndev); - struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; - long len; - - dev_dbg(&drv->pdev->dev, "send entry, len %d\n", skb->len); - - if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) { - kfree_skb(skb); - return -EINVAL; - } - - /* add the ST hdr to the start of the buffer */ - hdr.len = cpu_to_le16(skb->len); - memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN); - - /* Insert skb to shared transport layer's transmit queue. - * Freeing skb memory is taken care in shared transport layer, - * so don't free skb memory here. - */ - len = drv->st_write(skb); - if (len < 0) { - kfree_skb(skb); - nfc_err(&drv->pdev->dev, "st_write failed %ld\n", len); - return -EFAULT; - } - - return 0; -} - -static struct nci_ops nfcwilink_ops = { - .open = nfcwilink_open, - .close = nfcwilink_close, - .send = nfcwilink_send, -}; - -static int nfcwilink_probe(struct platform_device *pdev) -{ - struct nfcwilink *drv; - int rc; - __u32 protocols; - - drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL); - if (!drv) { - rc = -ENOMEM; - goto exit; - } - - drv->pdev = pdev; - - protocols = NFC_PROTO_JEWEL_MASK - | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK - | NFC_PROTO_ISO14443_MASK - | NFC_PROTO_ISO14443_B_MASK - | NFC_PROTO_NFC_DEP_MASK; - - drv->ndev = nci_allocate_device(&nfcwilink_ops, - protocols, - NFCWILINK_HDR_LEN, - 0); - if (!drv->ndev) { - nfc_err(&pdev->dev, "nci_allocate_device failed\n"); - rc = -ENOMEM; - goto exit; - } - - nci_set_parent_dev(drv->ndev, &pdev->dev); - nci_set_drvdata(drv->ndev, drv); - - rc = nci_register_device(drv->ndev); - if (rc < 0) { - nfc_err(&pdev->dev, "nci_register_device failed %d\n", rc); - goto free_dev_exit; - } - - dev_set_drvdata(&pdev->dev, drv); - - goto exit; - -free_dev_exit: - nci_free_device(drv->ndev); - -exit: - return rc; -} - -static int nfcwilink_remove(struct platform_device *pdev) -{ - struct nfcwilink *drv = dev_get_drvdata(&pdev->dev); - struct nci_dev *ndev; - - if (!drv) - return -EFAULT; - - ndev = drv->ndev; - - nci_unregister_device(ndev); - nci_free_device(ndev); - - return 0; -} - -static struct platform_driver nfcwilink_driver = { - .probe = nfcwilink_probe, - .remove = nfcwilink_remove, - .driver = { - .name = "nfcwilink", - }, -}; - -module_platform_driver(nfcwilink_driver); - -/* ------ Module Info ------ */ - -MODULE_AUTHOR("Ilan Elias "); -MODULE_DESCRIPTION("NFC Driver for TI Shared Transport"); -MODULE_LICENSE("GPL"); diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c index 5291797324bae0752098a15b6b3f7221c612b8ce..553011f583395f609b48f591289d0025a2975551 100644 --- a/drivers/nfc/nxp-nci/firmware.c +++ b/drivers/nfc/nxp-nci/firmware.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include "nxp-nci.h" diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 36099e557730e3cb47b151a36e02fc901b38f518..ff22d761183cbed59aef9cf38eee29c4bd1de407 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -29,14 +29,13 @@ #include #include #include -#include #include #include #include #include #include #include -#include +#include #include @@ -86,7 +85,7 @@ static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb) r = i2c_master_send(client, skb->data, skb->len); if (r < 0) { /* Retry, chip was in standby */ - usleep_range(110000, 120000); + msleep(110); r = i2c_master_send(client, skb->data, skb->len); } @@ -127,7 +126,7 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy, goto fw_read_exit; } - frame_len = (get_unaligned_be16(&header) & NXP_NCI_FW_FRAME_LEN_MASK) + + frame_len = (be16_to_cpu(header) & NXP_NCI_FW_FRAME_LEN_MASK) + NXP_NCI_FW_CRC_LEN; *skb = alloc_skb(NXP_NCI_FW_HDR_LEN + frame_len, GFP_KERNEL); diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 1dc89248e58e5b2876490fb926c80c350d239cda..8f60ce039b0d43191cf9921877564aa920a5abfd 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -51,7 +51,7 @@ static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags) { struct pn533_i2c_phy *phy = dev->phy; struct i2c_client *client = phy->i2c_dev; - u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; + static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ int rc; @@ -206,14 +206,6 @@ static int pn533_i2c_probe(struct i2c_client *client, phy->i2c_dev = client; i2c_set_clientdata(client, phy); - r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn, - IRQF_TRIGGER_FALLING | - IRQF_SHARED | IRQF_ONESHOT, - PN533_I2C_DRIVER_NAME, phy); - - if (r < 0) - nfc_err(&client->dev, "Unable to register IRQ handler\n"); - priv = pn533_register_device(PN533_DEVICE_PN532, PN533_NO_TYPE_B_PROTOCOLS, PN533_PROTO_REQ_ACK_RESP, @@ -223,16 +215,32 @@ static int pn533_i2c_probe(struct i2c_client *client, if (IS_ERR(priv)) { r = PTR_ERR(priv); - goto err_register; + return r; } phy->priv = priv; + r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn, + IRQF_TRIGGER_FALLING | + IRQF_SHARED | IRQF_ONESHOT, + PN533_I2C_DRIVER_NAME, phy); + if (r < 0) { + nfc_err(&client->dev, "Unable to register IRQ handler\n"); + goto irq_rqst_err; + } + + r = pn533_finalize_setup(priv); + if (r) + goto fn_setup_err; + return 0; -err_register: +fn_setup_err: free_irq(client->irq, phy); +irq_rqst_err: + pn533_unregister_device(phy->priv); + return r; } @@ -242,10 +250,10 @@ static int pn533_i2c_remove(struct i2c_client *client) dev_dbg(&client->dev, "%s\n", __func__); - pn533_unregister_device(phy->priv); - free_irq(client->irq, phy); + pn533_unregister_device(phy->priv); + return 0; } diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index a966c6a85ea869384d295fe6468cbd3e56d02b6f..65bbaa5fcdda6c2a1dae0e415b4cc2c7b2029eac 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -383,14 +383,18 @@ static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code, static int pn533_send_async_complete(struct pn533 *dev) { struct pn533_cmd *cmd = dev->cmd; - int status = cmd->status; + struct sk_buff *resp; + int status, rc = 0; - struct sk_buff *req = cmd->req; - struct sk_buff *resp = cmd->resp; + if (!cmd) { + dev_dbg(dev->dev, "%s: cmd not set\n", __func__); + goto done; + } - int rc; + dev_kfree_skb(cmd->req); - dev_kfree_skb(req); + status = cmd->status; + resp = cmd->resp; if (status < 0) { rc = cmd->complete_cb(dev, cmd->complete_cb_context, @@ -399,8 +403,14 @@ static int pn533_send_async_complete(struct pn533 *dev) goto done; } - skb_pull(resp, dev->ops->rx_header_len); - skb_trim(resp, resp->len - dev->ops->rx_tail_len); + /* when no response is set we got interrupted */ + if (!resp) + resp = ERR_PTR(-EINTR); + + if (!IS_ERR(resp)) { + skb_pull(resp, dev->ops->rx_header_len); + skb_trim(resp, resp->len - dev->ops->rx_tail_len); + } rc = cmd->complete_cb(dev, cmd->complete_cb_context, resp); @@ -434,12 +444,14 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, mutex_lock(&dev->cmd_lock); if (!dev->cmd_pending) { + dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, req); - if (rc) + if (rc) { + dev->cmd = NULL; goto error; + } dev->cmd_pending = 1; - dev->cmd = cmd; goto unlock; } @@ -511,11 +523,12 @@ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code, pn533_build_cmd_frame(dev, cmd_code, req); + dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, req); - if (rc < 0) + if (rc < 0) { + dev->cmd = NULL; kfree(cmd); - else - dev->cmd = cmd; + } return rc; } @@ -550,14 +563,15 @@ static void pn533_wq_cmd(struct work_struct *work) mutex_unlock(&dev->cmd_lock); + dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, cmd->req); if (rc < 0) { + dev->cmd = NULL; dev_kfree_skb(cmd->req); kfree(cmd); return; } - dev->cmd = cmd; } struct pn533_sync_cmd_response { @@ -2556,6 +2570,31 @@ static int pn533_setup(struct pn533 *dev) return 0; } +int pn533_finalize_setup(struct pn533 *dev) +{ + + struct pn533_fw_version fw_ver; + int rc; + + memset(&fw_ver, 0, sizeof(fw_ver)); + + rc = pn533_get_firmware_version(dev, &fw_ver); + if (rc) { + nfc_err(dev->dev, "Unable to get FW version\n"); + return rc; + } + + nfc_info(dev->dev, "NXP PN5%02X firmware ver %d.%d now attached\n", + fw_ver.ic, fw_ver.ver, fw_ver.rev); + + rc = pn533_setup(dev); + if (rc) + return rc; + + return 0; +} +EXPORT_SYMBOL_GPL(pn533_finalize_setup); + struct pn533 *pn533_register_device(u32 device_type, u32 protocols, enum pn533_protocol_type protocol_type, @@ -2565,7 +2604,6 @@ struct pn533 *pn533_register_device(u32 device_type, struct device *dev, struct device *parent) { - struct pn533_fw_version fw_ver; struct pn533 *priv; int rc = -ENOMEM; @@ -2608,15 +2646,6 @@ struct pn533 *pn533_register_device(u32 device_type, INIT_LIST_HEAD(&priv->cmd_queue); - memset(&fw_ver, 0, sizeof(fw_ver)); - rc = pn533_get_firmware_version(priv, &fw_ver); - if (rc < 0) - goto destroy_wq; - - nfc_info(dev, "NXP PN5%02X firmware ver %d.%d now attached\n", - fw_ver.ic, fw_ver.ver, fw_ver.rev); - - priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, priv->ops->tx_header_len + PN533_CMD_DATAEXCH_HEAD_LEN, @@ -2633,15 +2662,8 @@ struct pn533 *pn533_register_device(u32 device_type, if (rc) goto free_nfc_dev; - rc = pn533_setup(priv); - if (rc) - goto unregister_nfc_dev; - return priv; -unregister_nfc_dev: - nfc_unregister_device(priv->nfc_dev); - free_nfc_dev: nfc_free_device(priv->nfc_dev); diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h index 553c7d171fd1112b6c4906b566aa66a760bb81dc..88d569666c51fd3ce6fff3b800900bd26d303eb5 100644 --- a/drivers/nfc/pn533/pn533.h +++ b/drivers/nfc/pn533/pn533.h @@ -231,6 +231,7 @@ struct pn533 *pn533_register_device(u32 device_type, struct device *dev, struct device *parent); +int pn533_finalize_setup(struct pn533 *dev); void pn533_unregister_device(struct pn533 *priv); void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status); diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 33ed78be2750455c34fd51d3b69611f177b52374..8ed203ea21eac53cd1db08c03e046aa7bd9d6bc5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -148,11 +148,11 @@ static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags) static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) { struct pn533_usb_phy *phy = dev->phy; - u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; + static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ int rc; - phy->out_urb->transfer_buffer = ack; + phy->out_urb->transfer_buffer = (u8 *)ack; phy->out_urb->transfer_buffer_length = sizeof(ack); rc = usb_submit_urb(phy->out_urb, flags); @@ -543,6 +543,10 @@ static int pn533_usb_probe(struct usb_interface *interface, phy->priv = priv; + rc = pn533_finalize_setup(priv); + if (rc) + goto error; + usb_set_intfdata(interface, phy); return 0; diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index f837c39a801709a8a9be3f8c58cf74baf2ca196e..71ac0836c9f4955e569c0ed07f8af8cf9a35721c 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -21,17 +21,13 @@ #include #include #include -#include -#include -#include #include -#include #include #include #include #include #include -#include + #include #include @@ -165,8 +161,9 @@ struct pn544_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; - unsigned int gpio_en; - unsigned int gpio_fw; + struct gpio_desc *gpiod_en; + struct gpio_desc *gpiod_fw; + unsigned int en_polarity; u8 hw_variant; @@ -208,19 +205,18 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n"); /* Disable fw download */ - gpio_set_value_cansleep(phy->gpio_fw, 0); + gpiod_set_value_cansleep(phy->gpiod_fw, 0); for (polarity = 0; polarity < 2; polarity++) { phy->en_polarity = polarity; retry = 3; while (retry--) { /* power off */ - gpio_set_value_cansleep(phy->gpio_en, - !phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); /* power on */ - gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); /* send reset */ @@ -239,14 +235,13 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) "Could not detect nfc_en polarity, fallback to active high\n"); out: - gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); } static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) { - gpio_set_value_cansleep(phy->gpio_fw, - run_mode == PN544_FW_MODE ? 1 : 0); - gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_fw, run_mode == PN544_FW_MODE ? 1 : 0); + gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); phy->run_mode = run_mode; @@ -269,14 +264,14 @@ static void pn544_hci_i2c_disable(void *phy_id) { struct pn544_i2c_phy *phy = phy_id; - gpio_set_value_cansleep(phy->gpio_fw, 0); - gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_fw, 0); + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); - gpio_set_value_cansleep(phy->gpio_en, phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); - gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity); + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); phy->powered = 0; @@ -874,106 +869,20 @@ static void pn544_hci_i2c_fw_work(struct work_struct *work) } } -static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client) -{ - struct pn544_i2c_phy *phy = i2c_get_clientdata(client); - struct gpio_desc *gpiod_en, *gpiod_fw; - struct device *dev = &client->dev; - - /* Get EN GPIO from ACPI */ - gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1, - GPIOD_OUT_LOW); - if (IS_ERR(gpiod_en)) { - nfc_err(dev, "Unable to get EN GPIO\n"); - return -ENODEV; - } - - phy->gpio_en = desc_to_gpio(gpiod_en); - - /* Get FW GPIO from ACPI */ - gpiod_fw = devm_gpiod_get_index(dev, PN544_GPIO_NAME_FW, 2, - GPIOD_OUT_LOW); - if (IS_ERR(gpiod_fw)) { - nfc_err(dev, "Unable to get FW GPIO\n"); - return -ENODEV; - } - - phy->gpio_fw = desc_to_gpio(gpiod_fw); - - return 0; -} - -static int pn544_hci_i2c_of_request_resources(struct i2c_client *client) -{ - struct pn544_i2c_phy *phy = i2c_get_clientdata(client); - struct device_node *pp; - int ret; - - pp = client->dev.of_node; - if (!pp) { - ret = -ENODEV; - goto err_dt; - } - - /* Obtention of EN GPIO from device tree */ - ret = of_get_named_gpio(pp, "enable-gpios", 0); - if (ret < 0) { - if (ret != -EPROBE_DEFER) - nfc_err(&client->dev, - "Failed to get EN gpio, error: %d\n", ret); - goto err_dt; - } - phy->gpio_en = ret; - - /* Configuration of EN GPIO */ - ret = gpio_request(phy->gpio_en, PN544_GPIO_NAME_EN); - if (ret) { - nfc_err(&client->dev, "Fail EN pin\n"); - goto err_dt; - } - ret = gpio_direction_output(phy->gpio_en, 0); - if (ret) { - nfc_err(&client->dev, "Fail EN pin direction\n"); - goto err_gpio_en; - } - - /* Obtention of FW GPIO from device tree */ - ret = of_get_named_gpio(pp, "firmware-gpios", 0); - if (ret < 0) { - if (ret != -EPROBE_DEFER) - nfc_err(&client->dev, - "Failed to get FW gpio, error: %d\n", ret); - goto err_gpio_en; - } - phy->gpio_fw = ret; - - /* Configuration of FW GPIO */ - ret = gpio_request(phy->gpio_fw, PN544_GPIO_NAME_FW); - if (ret) { - nfc_err(&client->dev, "Fail FW pin\n"); - goto err_gpio_en; - } - ret = gpio_direction_output(phy->gpio_fw, 0); - if (ret) { - nfc_err(&client->dev, "Fail FW pin direction\n"); - goto err_gpio_fw; - } - - return 0; +static const struct acpi_gpio_params enable_gpios = { 1, 0, false }; +static const struct acpi_gpio_params firmware_gpios = { 2, 0, false }; -err_gpio_fw: - gpio_free(phy->gpio_fw); -err_gpio_en: - gpio_free(phy->gpio_en); -err_dt: - return ret; -} +static const struct acpi_gpio_mapping acpi_pn544_gpios[] = { + { "enable-gpios", &enable_gpios, 1 }, + { "firmware-gpios", &firmware_gpios, 1 }, + { }, +}; static int pn544_hci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct device *dev = &client->dev; struct pn544_i2c_phy *phy; - struct pn544_nfc_platform_data *pdata; int r = 0; dev_dbg(&client->dev, "%s\n", __func__); @@ -995,53 +904,33 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, phy->i2c_dev = client; i2c_set_clientdata(client, phy); - pdata = client->dev.platform_data; - - /* No platform data, using device tree. */ - if (!pdata && client->dev.of_node) { - r = pn544_hci_i2c_of_request_resources(client); - if (r) { - nfc_err(&client->dev, "No DT data\n"); - return r; - } - /* Using platform data. */ - } else if (pdata) { - - if (pdata->request_resources == NULL) { - nfc_err(&client->dev, "request_resources() missing\n"); - return -EINVAL; - } + r = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), acpi_pn544_gpios); + if (r) + dev_dbg(dev, "Unable to add GPIO mapping table\n"); - r = pdata->request_resources(client); - if (r) { - nfc_err(&client->dev, - "Cannot get platform resources\n"); - return r; - } + /* Get EN GPIO */ + phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_en)) { + nfc_err(dev, "Unable to get EN GPIO\n"); + return PTR_ERR(phy->gpiod_en); + } - phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE); - phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET); - /* Using ACPI */ - } else if (ACPI_HANDLE(&client->dev)) { - r = pn544_hci_i2c_acpi_request_resources(client); - if (r) { - nfc_err(&client->dev, - "Cannot get ACPI data\n"); - return r; - } - } else { - nfc_err(&client->dev, "No platform data\n"); - return -EINVAL; + /* Get FW GPIO */ + phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_fw)) { + nfc_err(dev, "Unable to get FW GPIO\n"); + return PTR_ERR(phy->gpiod_fw); } pn544_hci_i2c_platform_init(phy); - r = request_threaded_irq(client->irq, NULL, pn544_hci_i2c_irq_thread_fn, - IRQF_TRIGGER_RISING | IRQF_ONESHOT, - PN544_HCI_I2C_DRIVER_NAME, phy); + r = devm_request_threaded_irq(&client->dev, client->irq, NULL, + pn544_hci_i2c_irq_thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + PN544_HCI_I2C_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); - goto err_rti; + return r; } r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, @@ -1049,28 +938,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client, PN544_HCI_I2C_LLC_MAX_PAYLOAD, pn544_hci_i2c_fw_download, &phy->hdev); if (r < 0) - goto err_hci; + return r; return 0; - -err_hci: - free_irq(client->irq, phy); - -err_rti: - if (!pdata) { - gpio_free(phy->gpio_en); - gpio_free(phy->gpio_fw); - } else if (pdata->free_resources) { - pdata->free_resources(); - } - - return r; } static int pn544_hci_i2c_remove(struct i2c_client *client) { struct pn544_i2c_phy *phy = i2c_get_clientdata(client); - struct pn544_nfc_platform_data *pdata = client->dev.platform_data; dev_dbg(&client->dev, "%s\n", __func__); @@ -1083,17 +958,7 @@ static int pn544_hci_i2c_remove(struct i2c_client *client) if (phy->powered) pn544_hci_i2c_disable(phy); - free_irq(client->irq, phy); - - /* No platform data, GPIOs have been requested by this driver */ - if (!pdata) { - gpio_free(phy->gpio_en); - gpio_free(phy->gpio_fw); - /* Using platform data */ - } else if (pdata->free_resources) { - pdata->free_resources(); - } - + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev)); return 0; } diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 2b2330b235e63a251755bcd23ae4e066cd4aa7db..19be93e177fe563fce1765e170f6752270e1aab0 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -21,8 +21,9 @@ #define VERSION "0.1" -#define SONY_VENDOR_ID 0x054c -#define RCS380_PRODUCT_ID 0x06c1 +#define SONY_VENDOR_ID 0x054c +#define RCS380S_PRODUCT_ID 0x06c1 +#define RCS380P_PRODUCT_ID 0x06c3 #define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ @@ -725,23 +726,33 @@ static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags) static int port100_send_ack(struct port100 *dev) { - int rc; + int rc = 0; mutex_lock(&dev->out_urb_lock); - init_completion(&dev->cmd_cancel_done); + /* + * If prior cancel is in-flight (dev->cmd_cancel == true), we + * can skip to send cancel. Then this will wait the prior + * cancel, or merged into the next cancel rarely if next + * cancel was started before waiting done. In any case, this + * will be waked up soon or later. + */ + if (!dev->cmd_cancel) { + reinit_completion(&dev->cmd_cancel_done); - usb_kill_urb(dev->out_urb); + usb_kill_urb(dev->out_urb); - dev->out_urb->transfer_buffer = ack_frame; - dev->out_urb->transfer_buffer_length = sizeof(ack_frame); - rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); + dev->out_urb->transfer_buffer = ack_frame; + dev->out_urb->transfer_buffer_length = sizeof(ack_frame); + rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); - /* Set the cmd_cancel flag only if the URB has been successfully - * submitted. It will be reset by the out URB completion callback - * port100_send_complete(). - */ - dev->cmd_cancel = !rc; + /* + * Set the cmd_cancel flag only if the URB has been + * successfully submitted. It will be reset by the out + * URB completion callback port100_send_complete(). + */ + dev->cmd_cancel = !rc; + } mutex_unlock(&dev->out_urb_lock); @@ -928,8 +939,8 @@ static void port100_send_complete(struct urb *urb) struct port100 *dev = urb->context; if (dev->cmd_cancel) { + complete_all(&dev->cmd_cancel_done); dev->cmd_cancel = false; - complete(&dev->cmd_cancel_done); } switch (urb->status) { @@ -1477,7 +1488,8 @@ static struct nfc_digital_ops port100_digital_ops = { }; static const struct usb_device_id port100_table[] = { - { USB_DEVICE(SONY_VENDOR_ID, RCS380_PRODUCT_ID), }, + { USB_DEVICE(SONY_VENDOR_ID, RCS380S_PRODUCT_ID), }, + { USB_DEVICE(SONY_VENDOR_ID, RCS380P_PRODUCT_ID), }, { } }; MODULE_DEVICE_TABLE(usb, port100_table); @@ -1538,11 +1550,13 @@ static int port100_probe(struct usb_interface *interface, usb_fill_bulk_urb(dev->out_urb, dev->udev, usb_sndbulkpipe(dev->udev, out_endpoint), NULL, 0, port100_send_complete, dev); + dev->out_urb->transfer_flags = URB_ZERO_PACKET; dev->skb_headroom = PORT100_FRAME_HEADER_LEN + PORT100_COMM_RF_HEAD_MAX_LEN; dev->skb_tailroom = PORT100_FRAME_TAIL_LEN; + init_completion(&dev->cmd_cancel_done); INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete); /* The first thing to do with the Port-100 is to set the command type diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index dacb9166081b9e152a70002404c5f6eb544ad746..50be3b788f1cca7ca5265c11f9c60faa1df0b8dd 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -959,10 +959,8 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, unsigned long quirks = 0; info = kzalloc(sizeof(struct st21nfca_hci_info), GFP_KERNEL); - if (!info) { - r = -ENOMEM; - goto err_alloc_hdev; - } + if (!info) + return -ENOMEM; info->phy_ops = phy_ops; info->phy_id = phy_id; @@ -978,8 +976,10 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, * persistent info to discriminate 2 identical chips */ dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES); - if (dev_num >= ST21NFCA_NUM_DEVICES) - return -ENODEV; + if (dev_num >= ST21NFCA_NUM_DEVICES) { + r = -ENODEV; + goto err_alloc_hdev; + } set_bit(dev_num, dev_mask); diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 5a82f553906cbc778f53d94a07f63db1e05d7edc..02a920ca07c8477a526856d229d286c129de72f6 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -20,17 +20,15 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include #include -#include + #include #include @@ -60,6 +58,7 @@ #define IS_START_OF_FRAME(buf) (buf[0] == ST21NFCA_SOF_EOF && \ buf[1] == 0) +#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" #define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c" #define ST21NFCA_GPIO_NAME_EN "enable" @@ -68,9 +67,7 @@ struct st21nfca_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; - unsigned int gpio_ena; - unsigned int irq_polarity; - + struct gpio_desc *gpiod_ena; struct st21nfca_se_status se_status; struct sk_buff *pending_skb; @@ -151,7 +148,7 @@ static int st21nfca_hci_i2c_enable(void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; - gpio_set_value(phy->gpio_ena, 1); + gpiod_set_value(phy->gpiod_ena, 1); phy->powered = 1; phy->run_mode = ST21NFCA_HCI_MODE; @@ -164,7 +161,7 @@ static void st21nfca_hci_i2c_disable(void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; - gpio_set_value(phy->gpio_ena, 0); + gpiod_set_value(phy->gpiod_ena, 0); phy->powered = 0; } @@ -507,33 +504,14 @@ static struct nfc_phy_ops i2c_phy_ops = { static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) { struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); - struct gpio_desc *gpiod_ena; struct device *dev = &client->dev; - u8 tmp; /* Get EN GPIO from ACPI */ - gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1, - GPIOD_OUT_LOW); - if (!IS_ERR(gpiod_ena)) { + phy->gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1, + GPIOD_OUT_LOW); + if (IS_ERR(phy->gpiod_ena)) { nfc_err(dev, "Unable to get ENABLE GPIO\n"); - return -ENODEV; - } - - phy->gpio_ena = desc_to_gpio(gpiod_ena); - - phy->irq_polarity = irq_get_trigger_type(client->irq); - - phy->se_status.is_ese_present = false; - phy->se_status.is_uicc_present = false; - - if (device_property_present(dev, "ese-present")) { - device_property_read_u8(dev, "ese-present", &tmp); - phy->se_status.is_ese_present = tmp; - } - - if (device_property_present(dev, "uicc-present")) { - device_property_read_u8(dev, "uicc-present", &tmp); - phy->se_status.is_uicc_present = tmp; + return PTR_ERR(phy->gpiod_ena); } return 0; @@ -542,70 +520,16 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client) static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) { struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); - struct device_node *pp; - int gpio; - int r; - - pp = client->dev.of_node; - if (!pp) - return -ENODEV; + struct device *dev = &client->dev; /* Get GPIO from device tree */ - gpio = of_get_named_gpio(pp, "enable-gpios", 0); - if (gpio < 0) { - nfc_err(&client->dev, "Failed to retrieve enable-gpios from device tree\n"); - return gpio; - } - - /* GPIO request and configuration */ - r = devm_gpio_request_one(&client->dev, gpio, GPIOF_OUT_INIT_HIGH, - ST21NFCA_GPIO_NAME_EN); - if (r) { - nfc_err(&client->dev, "Failed to request enable pin\n"); - return r; + phy->gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 0, + GPIOD_OUT_HIGH); + if (IS_ERR(phy->gpiod_ena)) { + nfc_err(dev, "Failed to request enable pin\n"); + return PTR_ERR(phy->gpiod_ena); } - phy->gpio_ena = gpio; - - phy->irq_polarity = irq_get_trigger_type(client->irq); - - phy->se_status.is_ese_present = - of_property_read_bool(pp, "ese-present"); - phy->se_status.is_uicc_present = - of_property_read_bool(pp, "uicc-present"); - - return 0; -} - -static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) -{ - struct st21nfca_nfc_platform_data *pdata; - struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); - int r; - - pdata = client->dev.platform_data; - if (pdata == NULL) { - nfc_err(&client->dev, "No platform data\n"); - return -EINVAL; - } - - /* store for later use */ - phy->gpio_ena = pdata->gpio_ena; - phy->irq_polarity = pdata->irq_polarity; - - if (phy->gpio_ena > 0) { - r = devm_gpio_request_one(&client->dev, phy->gpio_ena, - GPIOF_OUT_INIT_HIGH, - ST21NFCA_GPIO_NAME_EN); - if (r) { - pr_err("%s : ena gpio_request failed\n", __FILE__); - return r; - } - } - - phy->se_status.is_ese_present = pdata->is_ese_present; - phy->se_status.is_uicc_present = pdata->is_uicc_present; - return 0; } @@ -613,7 +537,6 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct st21nfca_i2c_phy *phy; - struct st21nfca_nfc_platform_data *pdata; int r; dev_dbg(&client->dev, "%s\n", __func__); @@ -639,19 +562,12 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, mutex_init(&phy->phy_lock); i2c_set_clientdata(client, phy); - pdata = client->dev.platform_data; - if (!pdata && client->dev.of_node) { + if (client->dev.of_node) { r = st21nfca_hci_i2c_of_request_resources(client); if (r) { nfc_err(&client->dev, "No platform data\n"); return r; } - } else if (pdata) { - r = st21nfca_hci_i2c_request_resources(client); - if (r) { - nfc_err(&client->dev, "Cannot get platform resources\n"); - return r; - } } else if (ACPI_HANDLE(&client->dev)) { r = st21nfca_hci_i2c_acpi_request_resources(client); if (r) { @@ -663,6 +579,11 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, return -ENODEV; } + phy->se_status.is_ese_present = + device_property_read_bool(&client->dev, "ese-present"); + phy->se_status.is_uicc_present = + device_property_read_bool(&client->dev, "uicc-present"); + r = st21nfca_hci_platform_init(phy); if (r < 0) { nfc_err(&client->dev, "Unable to reboot st21nfca\n"); @@ -671,7 +592,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client, r = devm_request_threaded_irq(&client->dev, client->irq, NULL, st21nfca_hci_irq_thread_fn, - phy->irq_polarity | IRQF_ONESHOT, + IRQF_ONESHOT, ST21NFCA_HCI_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index 26c9dbbccb0c504c59fcec5157adf2310c323587..2d1c8ca6e67907002a05d284316925b951d178de 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -124,6 +124,9 @@ NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK) #define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */ +#define TRF7970A_13MHZ_CLOCK_FREQUENCY 13560000 +#define TRF7970A_27MHZ_CLOCK_FREQUENCY 27120000 + #define TRF7970A_RX_SKB_ALLOC_SIZE 256 @@ -441,6 +444,7 @@ struct trf7970a { u8 iso_ctrl_tech; u8 modulator_sys_clk_ctrl; u8 special_fcn_reg1; + u8 io_ctrl; unsigned int guard_time; int technology; int framing; @@ -1048,6 +1052,11 @@ static int trf7970a_init(struct trf7970a *trf) if (ret) goto err_out; + ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL, + trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1)); + if (ret) + goto err_out; + ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0); if (ret) goto err_out; @@ -1056,12 +1065,11 @@ static int trf7970a_init(struct trf7970a *trf) trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON; - ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, 0); + ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, + trf->modulator_sys_clk_ctrl); if (ret) goto err_out; - trf->modulator_sys_clk_ctrl = 0; - ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 | TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32); @@ -1181,27 +1189,37 @@ static int trf7970a_in_config_rf_tech(struct trf7970a *trf, int tech) switch (tech) { case NFC_DIGITAL_RF_TECH_106A: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_OOK; trf->guard_time = TRF7970A_GUARD_TIME_NFCA; break; case NFC_DIGITAL_RF_TECH_106B: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCB; break; case NFC_DIGITAL_RF_TECH_212F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_424F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_ISO15693: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_OOK; trf->guard_time = TRF7970A_GUARD_TIME_15693; break; default: @@ -1571,17 +1589,23 @@ static int trf7970a_tg_config_rf_tech(struct trf7970a *trf, int tech) trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_CE | TRF7970A_ISO_CTRL_NFC_CE_14443A; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_OOK; break; case NFC_DIGITAL_RF_TECH_212F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_NFCF_212; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_ASK10; break; case NFC_DIGITAL_RF_TECH_424F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_NFCF_424; - trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->modulator_sys_clk_ctrl = + (trf->modulator_sys_clk_ctrl & 0xf8) | + TRF7970A_MODULATOR_DEPTH_ASK10; break; default: dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); @@ -1749,7 +1773,7 @@ static int _trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, goto out_err; ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL, - TRF7970A_REG_IO_CTRL_VRS(0x1)); + trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1)); if (ret) goto out_err; @@ -1885,8 +1909,10 @@ static int trf7970a_power_up(struct trf7970a *trf) usleep_range(5000, 6000); if (!(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) { - gpio_set_value(trf->en2_gpio, 1); - usleep_range(1000, 2000); + if (gpio_is_valid(trf->en2_gpio)) { + gpio_set_value(trf->en2_gpio, 1); + usleep_range(1000, 2000); + } } gpio_set_value(trf->en_gpio, 1); @@ -1914,7 +1940,8 @@ static int trf7970a_power_down(struct trf7970a *trf) } gpio_set_value(trf->en_gpio, 0); - gpio_set_value(trf->en2_gpio, 0); + if (gpio_is_valid(trf->en2_gpio)) + gpio_set_value(trf->en2_gpio, 0); ret = regulator_disable(trf->regulator); if (ret) @@ -1987,6 +2014,7 @@ static int trf7970a_probe(struct spi_device *spi) struct device_node *np = spi->dev.of_node; struct trf7970a *trf; int uvolts, autosuspend_delay, ret; + u32 clk_freq = TRF7970A_13MHZ_CLOCK_FREQUENCY; if (!np) { dev_err(&spi->dev, "No Device Tree entry\n"); @@ -2032,15 +2060,23 @@ static int trf7970a_probe(struct spi_device *spi) trf->en2_gpio = of_get_named_gpio(np, "ti,enable-gpios", 1); if (!gpio_is_valid(trf->en2_gpio)) { - dev_err(trf->dev, "No EN2 GPIO property\n"); - return trf->en2_gpio; + dev_info(trf->dev, "No EN2 GPIO property\n"); + } else { + ret = devm_gpio_request_one(trf->dev, trf->en2_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "trf7970a EN2"); + if (ret) { + dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret); + return ret; + } } - ret = devm_gpio_request_one(trf->dev, trf->en2_gpio, - GPIOF_DIR_OUT | GPIOF_INIT_LOW, "trf7970a EN2"); - if (ret) { - dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret); - return ret; + of_property_read_u32(np, "clock-frequency", &clk_freq); + if ((clk_freq != TRF7970A_27MHZ_CLOCK_FREQUENCY) || + (clk_freq != TRF7970A_13MHZ_CLOCK_FREQUENCY)) { + dev_err(trf->dev, + "clock-frequency (%u Hz) unsupported\n", + clk_freq); + return -EINVAL; } if (of_property_read_bool(np, "en2-rf-quirk")) @@ -2077,6 +2113,24 @@ static int trf7970a_probe(struct spi_device *spi) if (uvolts > 4000000) trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3; + trf->regulator = devm_regulator_get(&spi->dev, "vdd-io"); + if (IS_ERR(trf->regulator)) { + ret = PTR_ERR(trf->regulator); + dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret); + goto err_destroy_lock; + } + + ret = regulator_enable(trf->regulator); + if (ret) { + dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret); + goto err_destroy_lock; + } + + if (regulator_get_voltage(trf->regulator) == 1800000) { + trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW; + dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n"); + } + trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops, TRF7970A_SUPPORTED_PROTOCOLS, NFC_DIGITAL_DRV_CAPS_IN_CRC | diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 0b2979816dbf5726ee1d7864fd6046308e503498..7e4c80f9b6cda0d37fe7ecca5fb276687ffa9c1e 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -22,6 +22,8 @@ #include #include +#define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ + MODULE_AUTHOR("Grant Likely "); MODULE_LICENSE("GPL"); @@ -221,6 +223,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) mdio->dev.of_node = np; + /* Get bus level PHY reset GPIO details */ + mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; + of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); + mdio->num_reset_gpios = of_gpio_named_count(np, "reset-gpios"); + /* Register the MDIO bus */ rc = mdiobus_register(mdio); if (rc) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d571bc330686517a389efec290d0cf499fb0dbd5..0042c365b29b9b94c791ca99d00dc7263354bb71 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -973,27 +973,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, return msix_capability_init(dev, entries, nvec, affd); } -/** - * pci_enable_msix - configure device's MSI-X capability structure - * @dev: pointer to the pci_dev data structure of MSI-X device function - * @entries: pointer to an array of MSI-X entries (optional) - * @nvec: number of MSI-X irqs requested for allocation by device driver - * - * Setup the MSI-X capability structure of device function with the number - * of requested irqs upon its software driver call to request for - * MSI-X mode enabled on its hardware device function. A return of zero - * indicates the successful configuration of MSI-X capability structure - * with new allocated MSI-X irqs. A return of < 0 indicates a failure. - * Or a return of > 0 indicates that driver request is exceeding the number - * of irqs or MSI-X vectors available. Driver should use the returned value to - * re-send its request. - **/ -int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) -{ - return __pci_enable_msix(dev, entries, nvec, NULL); -} -EXPORT_SYMBOL(pci_enable_msix); - void pci_msix_shutdown(struct pci_dev *dev) { struct msi_desc *entry; diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 1dc43fc5f65f38d8028388548452bd12218641fc..faad69a1a5974b8f76acde68a9e8c8922e27d01e 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -76,7 +76,7 @@ config QCOM_ADSP_PIL depends on OF && ARCH_QCOM depends on REMOTEPROC depends on QCOM_SMEM - depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_MDT_LOADER select QCOM_RPROC_COMMON @@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on REMOTEPROC - depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) select MFD_SYSCON select QCOM_RPROC_COMMON select QCOM_SCM @@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM - depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) depends on QCOM_SMEM depends on REMOTEPROC select QCOM_MDT_LOADER diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index f12ac0b28263f1dc6ae646554cc5a0b4226a1a2a..edc008f556632b03d182efc2e306601d38519b42 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -16,7 +16,6 @@ config RPMSG_CHAR config RPMSG_QCOM_SMD tristate "Qualcomm Shared Memory Driver (SMD)" depends on QCOM_SMEM - depends on QCOM_SMD=n select RPMSG help Say y here to enable support for the Qualcomm Shared Memory Driver diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index fd5944bbe224964a5afce64c1a82ba4d33c27dc2..730d9619400e5b1515b41371e6bc6f3adfa2118f 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1283,7 +1283,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) p_header = (struct pdu *) (skb_tail_pointer(ch->trans_skb) - skb->len); p_header->pdu_flag = 0x00; - if (skb->protocol == ntohs(ETH_P_SNAP)) + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) p_header->pdu_flag |= 0x60; else p_header->pdu_flag |= 0x20; diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index ac65f12bcd43c9dca411940ecabbf315e1b8585f..198842ce6876e812b07d57ab826ca8934920bd44 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -106,7 +106,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) priv->stats.rx_frame_errors++; return; } - pskb->protocol = ntohs(header->type); + pskb->protocol = cpu_to_be16(header->type); if ((header->length <= LL_HEADER_LENGTH) || (len <= LL_HEADER_LENGTH)) { if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { @@ -125,7 +125,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) header->length -= LL_HEADER_LENGTH; len -= LL_HEADER_LENGTH; if ((header->length > skb_tailroom(pskb)) || - (header->length > len)) { + (header->length > len)) { if (!(ch->logflags & LOG_FLAG_OVERRUN)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Packet size %d (overrun)" @@ -485,7 +485,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) } else { atomic_inc(&skb->users); header.length = l; - header.type = skb->protocol; + header.type = be16_to_cpu(skb->protocol); header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); @@ -503,7 +503,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; - header.type = skb->protocol; + header.type = be16_to_cpu(skb->protocol); header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; @@ -690,7 +690,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) p_header->pdu_offset = skb->len; p_header->pdu_proto = 0x01; p_header->pdu_flag = 0x00; - if (skb->protocol == ntohs(ETH_P_SNAP)) { + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; } else { p_header->pdu_flag |= PDU_FIRST; @@ -745,7 +745,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) p_header->pdu_proto = 0x01; p_header->pdu_flag = 0x00; p_header->pdu_seq = 0; - if (skb->protocol == ntohs(ETH_P_SNAP)) { + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; } else { p_header->pdu_flag |= PDU_FIRST; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 3f85b97ab8d2704820ba1f6f038a99534a14461a..dba94b486f057822ba45d29bea1589adb4026ea5 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, skb_put(pskb, NETIUCV_HDRLEN); pskb->dev = dev; pskb->ip_summed = CHECKSUM_NONE; - pskb->protocol = ntohs(ETH_P_IP); + pskb->protocol = cpu_to_be16(ETH_P_IP); while (1) { struct sk_buff *skb; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d9561e39c3b237078a2c1fe888042e1321be7883..f6aa21176d89780e9e745f9045255ff37f1cf788 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -240,7 +240,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_TX_TIMEOUT 100 * HZ #define QETH_RCD_TIMEOUT 60 * HZ #define QETH_RECLAIM_WORK_TIME HZ -#define QETH_HEADER_SIZE 32 #define QETH_MAX_PORTNO 15 /*IPv6 address autoconfiguration stuff*/ @@ -447,7 +446,7 @@ struct qeth_qdio_out_buffer { atomic_t state; int next_element_to_fill; struct sk_buff_head skb_list; - int is_header[16]; + int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; struct qaob *aob; struct qeth_qdio_out_q *q; @@ -503,22 +502,12 @@ struct qeth_qdio_info { int default_out_queue; }; -enum qeth_send_errors { - QETH_SEND_ERROR_NONE, - QETH_SEND_ERROR_LINK_FAILURE, - QETH_SEND_ERROR_RETRY, - QETH_SEND_ERROR_KICK_IT, -}; - #define QETH_ETH_MAC_V4 0x0100 /* like v4 */ #define QETH_ETH_MAC_V6 0x3333 /* like v6 */ /* tr mc mac is longer, but that will be enough to detect mc frames */ #define QETH_TR_MAC_NC 0xc000 /* non-canonical */ #define QETH_TR_MAC_C 0x0300 /* canonical */ -#define DEFAULT_ADD_HHLEN 0 -#define MAX_ADD_HHLEN 1024 - /** * buffer stuff for read channel */ @@ -644,7 +633,6 @@ struct qeth_reply { atomic_t refcnt; }; - struct qeth_card_blkt { int time_total; int inter_packet; @@ -685,7 +673,6 @@ struct qeth_card_options { struct qeth_ipa_info ipa6; struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ int fake_broadcast; - int add_hhlen; int layer2; int performance_stats; int rx_sg_cb; @@ -717,17 +704,16 @@ struct qeth_discipline { void (*start_poll)(struct ccw_device *, int, unsigned long); qdio_handler_t *input_handler; qdio_handler_t *output_handler; + int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); int (*recover)(void *ptr); int (*setup) (struct ccwgroup_device *); void (*remove) (struct ccwgroup_device *); int (*set_online) (struct ccwgroup_device *); int (*set_offline) (struct ccwgroup_device *); - void (*shutdown)(struct ccwgroup_device *); - int (*prepare) (struct ccwgroup_device *); - void (*complete) (struct ccwgroup_device *); int (*freeze)(struct ccwgroup_device *); int (*thaw) (struct ccwgroup_device *); int (*restore)(struct ccwgroup_device *); + int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd); int (*control_event_handler)(struct qeth_card *card, struct qeth_ipa_cmd *cmd); }; @@ -856,9 +842,9 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) { __be16 *p = &((struct ethhdr *)skb->data)->h_proto; - if (*p == ETH_P_8021Q) + if (be16_to_cpu(*p) == ETH_P_8021Q) p += 2; - switch (*p) { + switch (be16_to_cpu(*p)) { case ETH_P_IPV6: return 6; case ETH_P_IP: @@ -920,14 +906,12 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); int qeth_query_setadapterparms(struct qeth_card *); -int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *, - unsigned int, const char *); -void qeth_queue_input_buffer(struct qeth_card *, int); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); +int qeth_poll(struct napi_struct *napi, int budget); void qeth_qdio_input_handler(struct ccw_device *, unsigned int, unsigned int, int, int, unsigned long); @@ -948,9 +932,6 @@ void qeth_prepare_control_data(struct qeth_card *, int, void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); -int qeth_mdio_read(struct net_device *, int, int); -int qeth_snmp_command(struct qeth_card *, char __user *); -int qeth_query_oat_command(struct qeth_card *, char __user *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, @@ -965,16 +946,18 @@ int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int, int, int); + struct sk_buff *, struct qeth_hdr *, int, int); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int); +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, struct ethtool_stats *, u64 *); void qeth_core_get_strings(struct net_device *, u32, u8 *); void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a5f99ccb122bab3918f136f403bdec04b98d2f4..38114a8d56e00f471360ab400767dcb8ba8a1a7b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -55,7 +55,6 @@ static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); -static int qeth_issue_next_read(struct qeth_card *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); @@ -1202,7 +1201,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, while (skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); - if (skb->protocol == ETH_P_AF_IUCV) { + if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { if (skb->sk) { struct iucv_sock *iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, notification); @@ -1233,7 +1232,8 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); - if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { + if (notify_general_error && + be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { if (skb->sk) { iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); @@ -1396,7 +1396,6 @@ static void qeth_set_intial_options(struct qeth_card *card) card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; card->options.fake_broadcast = 0; - card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; @@ -3217,8 +3216,10 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) } EXPORT_SYMBOL_GPL(qeth_hw_trap); -int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, - unsigned int qdio_error, const char *dbftext) +static int qeth_check_qdio_errors(struct qeth_card *card, + struct qdio_buffer *buf, + unsigned int qdio_error, + const char *dbftext) { if (qdio_error) { QETH_CARD_TEXT(card, 2, dbftext); @@ -3235,18 +3236,8 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, } return 0; } -EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); -static void qeth_buffer_reclaim_work(struct work_struct *work) -{ - struct qeth_card *card = container_of(work, struct qeth_card, - buffer_reclaim_work.work); - - QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); - qeth_queue_input_buffer(card, card->reclaim_index); -} - -void qeth_queue_input_buffer(struct qeth_card *card, int index) +static void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; struct list_head *lh; @@ -3320,9 +3311,17 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) QDIO_MAX_BUFFERS_PER_Q; } } -EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); -static int qeth_handle_send_error(struct qeth_card *card, +static void qeth_buffer_reclaim_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + buffer_reclaim_work.work); + + QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); + qeth_queue_input_buffer(card, card->reclaim_index); +} + +static void qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].sflags; @@ -3338,15 +3337,14 @@ static int qeth_handle_send_error(struct qeth_card *card, qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); if (!qdio_err) - return QETH_SEND_ERROR_NONE; + return; if ((sbalf15 >= 15) && (sbalf15 <= 31)) - return QETH_SEND_ERROR_RETRY; + return; QETH_CARD_TEXT(card, 1, "lnkfail"); QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); - return QETH_SEND_ERROR_LINK_FAILURE; } /* @@ -3799,9 +3797,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); case QETH_PRIO_Q_ING_VLAN: tci = &((struct ethhdr *)skb->data)->h_proto; - if (*tci == ETH_P_8021Q) - return qeth_cut_iqd_prio(card, ~*(tci + 1) >> - (VLAN_PRIO_SHIFT + 1) & 3); + if (be16_to_cpu(*tci) == ETH_P_8021Q) + return qeth_cut_iqd_prio(card, + ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); break; default: break; @@ -4026,8 +4024,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, - struct qeth_hdr *hdr, int elements_needed, - int offset, int hd_len) + struct qeth_hdr *hdr, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4419,7 +4416,7 @@ void qeth_tx_timeout(struct net_device *dev) } EXPORT_SYMBOL_GPL(qeth_tx_timeout); -int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) +static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) { struct qeth_card *card = dev->ml_priv; int rc = 0; @@ -4482,7 +4479,6 @@ int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } return rc; } -EXPORT_SYMBOL_GPL(qeth_mdio_read); static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int len, @@ -4572,7 +4568,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, return 0; } -int qeth_snmp_command(struct qeth_card *card, char __user *udata) +static int qeth_snmp_command(struct qeth_card *card, char __user *udata) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -4632,7 +4628,6 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) kfree(qinfo.udata); return rc; } -EXPORT_SYMBOL_GPL(qeth_snmp_command); static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -4664,7 +4659,7 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, return 0; } -int qeth_query_oat_command(struct qeth_card *card, char __user *udata) +static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) { int rc = 0; struct qeth_cmd_buffer *iob; @@ -4734,7 +4729,6 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) out: return rc; } -EXPORT_SYMBOL_GPL(qeth_query_oat_command); static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -4775,12 +4769,10 @@ static int qeth_query_card_info(struct qeth_card *card, static inline int qeth_get_qdio_q_format(struct qeth_card *card) { - switch (card->info.type) { - case QETH_CARD_TYPE_IQD: - return 2; - default: - return 0; - } + if (card->info.type == QETH_CARD_TYPE_IQD) + return QDIO_IQDIO_QFMT; + else + return QDIO_QETH_QFMT; } static void qeth_determine_capabilities(struct qeth_card *card) @@ -4819,8 +4811,9 @@ static void qeth_determine_capabilities(struct qeth_card *card) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); - QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); - QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); + QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1); + QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2); + QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3); QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || @@ -5288,6 +5281,83 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); +int qeth_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + int work_done = 0; + struct qeth_qdio_buffer *buffer; + int done; + int new_budget = budget; + + if (card->options.performance_stats) { + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); + } + + while (1) { + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + card->rx.b_element = + &card->qdio.in_q->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + + while (card->rx.b_count) { + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + work_done += + card->discipline->process_rx_buffer( + card, new_budget, &done); + else + done = 1; + + if (done) { + if (card->options.performance_stats) + card->perf_stats.bufs_rec++; + qeth_put_buffer_pool_entry(card, + buffer->pool_entry); + qeth_queue_input_buffer(card, card->rx.b_index); + card->rx.b_count--; + if (card->rx.b_count) { + card->rx.b_index = + (card->rx.b_index + 1) % + QDIO_MAX_BUFFERS_PER_Q; + card->rx.b_element = + &card->qdio.in_q + ->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + } + + if (work_done >= budget) + goto out; + else + new_budget = budget - work_done; + } + } + + napi_complete(napi); + if (qdio_start_irq(card->data.ccwdev, 0)) + napi_schedule(&card->napi); +out: + if (card->options.performance_stats) + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; + return work_done; +} +EXPORT_SYMBOL_GPL(qeth_poll); + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -5678,23 +5748,12 @@ static int qeth_core_set_offline(struct ccwgroup_device *gdev) static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->shutdown) - card->discipline->shutdown(gdev); -} - -static int qeth_core_prepare(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->prepare) - return card->discipline->prepare(gdev); - return 0; -} - -static void qeth_core_complete(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->complete) - card->discipline->complete(gdev); + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_core_freeze(struct ccwgroup_device *gdev) @@ -5731,8 +5790,8 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, - .prepare = qeth_core_prepare, - .complete = qeth_core_complete, + .prepare = NULL, + .complete = NULL, .freeze = qeth_core_freeze, .thaw = qeth_core_thaw, .restore = qeth_core_restore, @@ -5762,6 +5821,60 @@ static const struct attribute_group *qeth_drv_attr_groups[] = { NULL, }; +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct mii_ioctl_data *mii_data; + int rc = 0; + + if (!card) + return -ENODEV; + + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + + if (card->info.type == QETH_CARD_TYPE_OSN) + return -EPERM; + + switch (cmd) { + case SIOC_QETH_ADP_SET_SNMP_CONTROL: + rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_GET_CARD_TYPE: + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) && + !card->info.guestlan) + return 1; + else + return 0; + case SIOCGMIIPHY: + mii_data = if_mii(rq); + mii_data->phy_id = 0; + break; + case SIOCGMIIREG: + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + mii_data->val_out = qeth_mdio_read(dev, + mii_data->phy_id, mii_data->reg_num); + break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; + default: + if (card->discipline->do_ioctl) + rc = card->discipline->do_ioctl(dev, rq, cmd); + else + rc = -EOPNOTSUPP; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "ioce%x", rc); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_ioctl); + static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { @@ -5896,104 +6009,124 @@ void qeth_core_get_drvinfo(struct net_device *dev, } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); -/* Helper function to fill 'advertizing' and 'supported' which are the same. */ -/* Autoneg and full-duplex are supported and advertized uncondionally. */ -/* Always advertize and support all speeds up to specified, and only one */ +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ /* specified port type. */ -static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd, +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, int maxspeed, int porttype) { - int port_sup, port_adv, spd_sup, spd_adv; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); switch (porttype) { case PORT_TP: - port_sup = SUPPORTED_TP; - port_adv = ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); break; case PORT_FIBRE: - port_sup = SUPPORTED_FIBRE; - port_adv = ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); break; default: - port_sup = SUPPORTED_TP; - port_adv = ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); WARN_ON_ONCE(1); } - /* "Fallthrough" case'es ordered from high to low result in setting */ - /* flags cumulatively, starting from the specified speed and down to */ - /* the lowest possible. */ - spd_sup = 0; - spd_adv = 0; + /* fallthrough from high to low, to select all legal speeds: */ switch (maxspeed) { case SPEED_10000: - spd_sup |= SUPPORTED_10000baseT_Full; - spd_adv |= ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); case SPEED_1000: - spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - spd_adv |= ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); case SPEED_100: - spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; - spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); case SPEED_10: - spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - break; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + /* end fallthrough */ + break; default: - spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); WARN_ON_ONCE(1); } - ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv; - ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup; } -int qeth_core_ethtool_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; struct carrier_info carrier_info; int rc; - u32 speed; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; else link_type = card->info.link_type; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: - qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP); - speed = SPEED_100; - ecmd->port = PORT_TP; + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; break; - case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); - speed = SPEED_1000; - ecmd->port = PORT_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; break; - case QETH_LINK_TYPE_10GBIT_ETH: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); - speed = SPEED_10000; - ecmd->port = PORT_FIBRE; + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; break; - default: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP); - speed = SPEED_10; - ecmd->port = PORT_TP; + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; } - ethtool_cmd_speed_set(ecmd, speed); + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); /* Check if we can obtain more accurate information. */ /* If QUERY_CARD_INFO command is not supported or fails, */ @@ -6018,49 +6151,48 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, switch (carrier_info.card_type) { case CARD_INFO_TYPE_1G_COPPER_A: case CARD_INFO_TYPE_1G_COPPER_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); break; case CARD_INFO_TYPE_1G_FIBRE_A: case CARD_INFO_TYPE_1G_FIBRE_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); break; case CARD_INFO_TYPE_10G_FIBRE_A: case CARD_INFO_TYPE_10G_FIBRE_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); break; } switch (carrier_info.port_mode) { case CARD_INFO_PORTM_FULLDUPLEX: - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; break; case CARD_INFO_PORTM_HALFDUPLEX: - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; break; } switch (carrier_info.port_speed) { case CARD_INFO_PORTS_10M: - speed = SPEED_10; + cmd->base.speed = SPEED_10; break; case CARD_INFO_PORTS_100M: - speed = SPEED_100; + cmd->base.speed = SPEED_100; break; case CARD_INFO_PORTS_1G: - speed = SPEED_1000; + cmd->base.speed = SPEED_1000; break; case CARD_INFO_PORTS_10G: - speed = SPEED_10000; + cmd->base.speed = SPEED_10000; break; } - ethtool_cmd_speed_set(ecmd, speed); return 0; } -EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); +EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings); /* Callback to handle checksum offload command reply from OSA card. * Verify that required features have been enabled on the card. diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index bc69d0a338ad715316b5e1aa271969e07c8da799..4accb0a61ce0be1001db084248506f480242cdcd 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -29,7 +29,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_TIMEOUT (10 * HZ) #define QETH_IPA_TIMEOUT (45 * HZ) #define QETH_IDX_COMMAND_SEQNO 0xffff0000 -#define SR_INFO_LEN 16 #define QETH_CLEAR_CHANNEL_PARM -10 #define QETH_HALT_CHANNEL_PARM -11 @@ -65,7 +64,6 @@ enum qeth_link_types { QETH_LINK_TYPE_LANE_TR = 0x82, QETH_LINK_TYPE_LANE_ETH1000 = 0x83, QETH_LINK_TYPE_LANE = 0x88, - QETH_LINK_TYPE_ATM_NATIVE = 0x90, }; /* @@ -185,8 +183,6 @@ enum qeth_ipa_return_codes { IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; -/* for DELIP */ -#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED /* for SET_DIAGNOSTIC_ASSIST */ #define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL #define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR @@ -631,14 +627,6 @@ enum qeth_ipa_addr_change_code { IPA_ADDR_CHANGE_CODE_MACADDR = 0x02, IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */ }; -enum qeth_ipa_addr_change_retcode { - IPA_ADDR_CHANGE_RETCODE_OK = 0x0000, - IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010, -}; -enum qeth_ipa_addr_change_lostmask { - IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01, - IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02, -}; struct qeth_ipacmd_addr_change_entry { struct net_if_token token; @@ -817,9 +805,4 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; ((buffer) && \ (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1)) -#define ADDR_FRAME_TYPE_DIX 1 -#define ADDR_FRAME_TYPE_802_3 2 -#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10 -#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20 - #endif diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index af4e6a639fecf20fc5d4c15b5a398ae4b460c4d5..1b07f382d74c955974d138271bdce2bbbf594f63 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -28,63 +27,12 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); static void qeth_l2_set_rx_mode(struct net_device *); -static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); static void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd); -static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct qeth_card *card = dev->ml_priv; - struct mii_ioctl_data *mii_data; - int rc = 0; - - if (!card) - return -ENODEV; - - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - - if (card->info.type == QETH_CARD_TYPE_OSN) - return -EPERM; - - switch (cmd) { - case SIOC_QETH_ADP_SET_SNMP_CONTROL: - rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); - break; - case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) - return 1; - return 0; - break; - case SIOCGMIIPHY: - mii_data = if_mii(rq); - mii_data->phy_id = 0; - break; - case SIOCGMIIREG: - mii_data = if_mii(rq); - if (mii_data->phy_id != 0) - rc = -EINVAL; - else - mii_data->val_out = qeth_mdio_read(dev, - mii_data->phy_id, mii_data->reg_num); - break; - case SIOC_QETH_QUERY_OAT: - rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); - break; - default: - rc = -EOPNOTSUPP; - } - if (rc) - QETH_CARD_TEXT_(card, 2, "ioce%d", rc); - return rc; -} - static int qeth_l2_verify_dev(struct net_device *dev) { struct qeth_card *card; @@ -332,7 +280,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, else hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; - hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE; + hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr); /* VSWITCH relies on the VLAN * information to be present in * the QDIO header */ @@ -552,81 +500,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, return work_done; } -static int qeth_l2_poll(struct napi_struct *napi, int budget) -{ - struct qeth_card *card = container_of(napi, struct qeth_card, napi); - int work_done = 0; - struct qeth_qdio_buffer *buffer; - int done; - int new_budget = budget; - - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - - while (1) { - if (!card->rx.b_count) { - card->rx.qdio_err = 0; - card->rx.b_count = qdio_get_next_buffers( - card->data.ccwdev, 0, &card->rx.b_index, - &card->rx.qdio_err); - if (card->rx.b_count <= 0) { - card->rx.b_count = 0; - break; - } - card->rx.b_element = - &card->qdio.in_q->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - - while (card->rx.b_count) { - buffer = &card->qdio.in_q->bufs[card->rx.b_index]; - if (!(card->rx.qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, - card->rx.qdio_err, "qinerr"))) - work_done += qeth_l2_process_inbound_buffer( - card, new_budget, &done); - else - done = 1; - - if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - qeth_put_buffer_pool_entry(card, - buffer->pool_entry); - qeth_queue_input_buffer(card, card->rx.b_index); - card->rx.b_count--; - if (card->rx.b_count) { - card->rx.b_index = - (card->rx.b_index + 1) % - QDIO_MAX_BUFFERS_PER_Q; - card->rx.b_element = - &card->qdio.in_q - ->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - } - - if (work_done >= budget) - goto out; - else - new_budget = budget - work_done; - } - } - - napi_complete(napi); - if (qdio_start_irq(card->data.ccwdev, 0)) - napi_schedule(&card->napi); -out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; - return work_done; -} - static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; @@ -808,7 +681,8 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) { int rc; struct qeth_hdr *hdr = NULL; @@ -910,7 +784,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - elements, data_offset, hd_len); + data_offset, hd_len); if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; @@ -1043,7 +917,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = { .get_ethtool_stats = qeth_core_get_ethtool_stats, .get_sset_count = qeth_core_get_sset_count, .get_drvinfo = qeth_core_get_drvinfo, - .get_settings = qeth_core_ethtool_get_settings, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, }; static const struct ethtool_ops qeth_l2_osn_ops = { @@ -1060,7 +934,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, - .ndo_do_ioctl = qeth_l2_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_set_mac_address = qeth_l2_set_mac_address, .ndo_change_mtu = qeth_change_mtu, .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, @@ -1117,7 +991,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); - netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); return register_netdev(card->dev); } @@ -1327,17 +1201,6 @@ static void __exit qeth_l2_exit(void) pr_info("unregister layer 2 discipline\n"); } -static void qeth_l2_shutdown(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); - if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); - qdio_free(CARD_DDEV(card)); -} - static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); @@ -1409,15 +1272,16 @@ struct qeth_discipline qeth_l2_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .process_rx_buffer = qeth_l2_process_inbound_buffer, .recover = qeth_l2_recover, .setup = qeth_l2_probe_device, .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, .set_offline = qeth_l2_set_offline, - .shutdown = qeth_l2_shutdown, .freeze = qeth_l2_pm_suspend, .thaw = qeth_l2_pm_resume, .restore = qeth_l2_pm_resume, + .do_ioctl = NULL, .control_event_handler = qeth_l2_control_event, }; EXPORT_SYMBOL_GPL(qeth_l2_discipline); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index 692db49e3d2a368d36f76bfffe6200db6782627c..687972356d6b00f8776cb332e31b1ae0063183a5 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -8,9 +8,6 @@ #include "qeth_core.h" #include "qeth_l2.h" -#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ -struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) - static ssize_t qeth_bridge_port_role_state_show(struct device *dev, struct device_attribute *attr, char *buf, int show_state) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 653f0fb76573ab66c0178615ebcd81a5fe04c16e..6e0354ef4b8629827e4c8ef40b9d569bc11e354b 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -36,16 +35,12 @@ static int qeth_l3_set_offline(struct ccwgroup_device *); -static int qeth_l3_recover(void *); static int qeth_l3_stop(struct net_device *); static void qeth_l3_set_multicast_list(struct net_device *); -static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *); static int qeth_l3_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *); static int qeth_l3_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *); -static int __qeth_l3_set_online(struct ccwgroup_device *, int); -static int __qeth_l3_set_offline(struct ccwgroup_device *, int); static int qeth_l3_isxdigit(char *buf) { @@ -1341,7 +1336,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); } -static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac) +static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac) { ip_eth_mc_map(ipm, mac); } @@ -1414,7 +1409,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) im4 = rcu_dereference(im4->next_rcu)) { qeth_l3_get_mac_for_ipm(im4->multiaddr, buf); - tmp->u.a4.addr = im4->multiaddr; + tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); memcpy(tmp->mac, buf, sizeof(tmp->mac)); ipm = qeth_l3_ip_from_hash(card, tmp); @@ -1425,7 +1420,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) if (!ipm) continue; memcpy(ipm->mac, buf, sizeof(tmp->mac)); - ipm->u.a4.addr = im4->multiaddr; + ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); ipm->is_multicast = 1; ipm->disp_flag = QETH_DISP_ADDR_ADD; hash_add(card->ip_mc_htable, @@ -1598,8 +1593,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, spin_lock_bh(&card->ip_lock); for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { - addr->u.a4.addr = ifa->ifa_address; - addr->u.a4.mask = ifa->ifa_mask; + addr->u.a4.addr = be32_to_cpu(ifa->ifa_address); + addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask); addr->type = QETH_IP_TYPE_NORMAL; qeth_l3_delete_ip(card, addr); } @@ -1690,25 +1685,25 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned short *vlan_id) { - __be16 prot; + __u16 prot; struct iphdr *ip_hdr; unsigned char tg_addr[MAX_ADDR_LEN]; int is_vlan = 0; if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { - prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : - ETH_P_IP); + prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : + ETH_P_IP; switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { case QETH_CAST_MULTICAST: switch (prot) { #ifdef CONFIG_QETH_IPV6 - case __constant_htons(ETH_P_IPV6): + case ETH_P_IPV6: ndisc_mc_map((struct in6_addr *) skb->data + 24, tg_addr, card->dev, 0); break; #endif - case __constant_htons(ETH_P_IP): + case ETH_P_IP: ip_hdr = (struct iphdr *)skb->data; ip_eth_mc_map(ip_hdr->daddr, tg_addr); break; @@ -1795,7 +1790,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, magic = *(__u16 *)skb->data; if ((card->info.type == QETH_CARD_TYPE_IQD) && (magic == ETH_P_AF_IUCV)) { - skb->protocol = ETH_P_AF_IUCV; + skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); skb->pkt_type = PACKET_HOST; skb->mac_header = NET_SKB_PAD; skb->dev = card->dev; @@ -1834,81 +1829,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, return work_done; } -static int qeth_l3_poll(struct napi_struct *napi, int budget) -{ - struct qeth_card *card = container_of(napi, struct qeth_card, napi); - int work_done = 0; - struct qeth_qdio_buffer *buffer; - int done; - int new_budget = budget; - - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - - while (1) { - if (!card->rx.b_count) { - card->rx.qdio_err = 0; - card->rx.b_count = qdio_get_next_buffers( - card->data.ccwdev, 0, &card->rx.b_index, - &card->rx.qdio_err); - if (card->rx.b_count <= 0) { - card->rx.b_count = 0; - break; - } - card->rx.b_element = - &card->qdio.in_q->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - - while (card->rx.b_count) { - buffer = &card->qdio.in_q->bufs[card->rx.b_index]; - if (!(card->rx.qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, - card->rx.qdio_err, "qinerr"))) - work_done += qeth_l3_process_inbound_buffer( - card, new_budget, &done); - else - done = 1; - - if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - qeth_put_buffer_pool_entry(card, - buffer->pool_entry); - qeth_queue_input_buffer(card, card->rx.b_index); - card->rx.b_count--; - if (card->rx.b_count) { - card->rx.b_index = - (card->rx.b_index + 1) % - QDIO_MAX_BUFFERS_PER_Q; - card->rx.b_element = - &card->qdio.in_q - ->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - } - - if (work_done >= budget) - goto out; - else - new_budget = budget - work_done; - } - } - - napi_complete(napi); - if (qdio_start_irq(card->data.ccwdev, 0)) - napi_schedule(&card->napi); -out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; - return work_done; -} - static int qeth_l3_verify_vlan_dev(struct net_device *dev, struct qeth_card *card) { @@ -2461,15 +2381,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; struct qeth_arp_cache_entry arp_entry; - struct mii_ioctl_data *mii_data; int rc = 0; - if (!card) - return -ENODEV; - - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - switch (cmd) { case SIOC_QETH_ARP_SET_NO_ENTRIES: if (!capable(CAP_NET_ADMIN)) { @@ -2514,37 +2427,9 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } rc = qeth_l3_arp_flush_cache(card); break; - case SIOC_QETH_ADP_SET_SNMP_CONTROL: - rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); - break; - case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) - return 1; - return 0; - break; - case SIOCGMIIPHY: - mii_data = if_mii(rq); - mii_data->phy_id = 0; - break; - case SIOCGMIIREG: - mii_data = if_mii(rq); - if (mii_data->phy_id != 0) - rc = -EINVAL; - else - mii_data->val_out = qeth_mdio_read(dev, - mii_data->phy_id, - mii_data->reg_num); - break; - case SIOC_QETH_QUERY_OAT: - rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); - break; default: rc = -EOPNOTSUPP; } - if (rc) - QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } @@ -2572,10 +2457,10 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) rcu_read_unlock(); /* try something else */ - if (skb->protocol == ETH_P_IPV6) + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) return (skb_network_header(skb)[24] == 0xff) ? RTN_MULTICAST : 0; - else if (skb->protocol == ETH_P_IP) + else if (be16_to_cpu(skb->protocol) == ETH_P_IP) return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; /* ... */ @@ -2726,7 +2611,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - sizeof(struct qeth_hdr_tso)); tcph->check = 0; - if (skb->protocol == ETH_P_IPV6) { + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { ip6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 0, IPPROTO_TCP, 0); @@ -2770,10 +2655,11 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } -static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) { int rc; - u16 *tag; + __be16 *tag; struct qeth_hdr *hdr = NULL; int hdr_elements = 0; int elements; @@ -2794,7 +2680,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (((card->info.type == QETH_CARD_TYPE_IQD) && (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || ((card->options.cq == QETH_CQ_ENABLED) && - (skb->protocol != ETH_P_AF_IUCV)))) || + (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || card->options.sniffer) goto tx_drop; @@ -2847,9 +2733,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) new_skb->data + 8, 4); skb_copy_to_linear_data_offset(new_skb, 8, new_skb->data + 12, 4); - tag = (u16 *)(new_skb->data + 12); - *tag = __constant_htons(ETH_P_8021Q); - *(tag + 1) = htons(skb_vlan_tag_get(new_skb)); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); } } @@ -2887,7 +2773,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); } else { - if (new_skb->protocol == ETH_P_AF_IUCV) + if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); else { qeth_l3_fill_header(card, hdr, new_skb, ipv, @@ -2925,7 +2811,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - elements, data_offset, 0); + data_offset, 0); if (!rc) { card->stats.tx_packets++; @@ -3026,7 +2912,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = { .get_ethtool_stats = qeth_core_get_ethtool_stats, .get_sset_count = qeth_core_get_sset_count, .get_drvinfo = qeth_core_get_drvinfo, - .get_settings = qeth_core_ethtool_get_settings, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, }; /* @@ -3060,7 +2946,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, @@ -3076,7 +2962,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, @@ -3145,7 +3031,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); - netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); return register_netdev(card->dev); } @@ -3366,17 +3252,6 @@ static int qeth_l3_recover(void *ptr) return 0; } -static void qeth_l3_shutdown(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); - if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); - qdio_free(CARD_DDEV(card)); -} - static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); @@ -3434,15 +3309,16 @@ struct qeth_discipline qeth_l3_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .process_rx_buffer = qeth_l3_process_inbound_buffer, .recover = qeth_l3_recover, .setup = qeth_l3_probe_device, .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, .set_offline = qeth_l3_set_offline, - .shutdown = qeth_l3_shutdown, .freeze = qeth_l3_pm_suspend, .thaw = qeth_l3_pm_resume, .restore = qeth_l3_pm_resume, + .do_ioctl = qeth_l3_do_ioctl, .control_event_handler = qeth_l3_control_event, }; EXPORT_SYMBOL_GPL(qeth_l3_discipline); @@ -3466,8 +3342,8 @@ static int qeth_l3_ip_event(struct notifier_block *this, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (addr) { - addr->u.a4.addr = ifa->ifa_address; - addr->u.a4.mask = ifa->ifa_mask; + addr->u.a4.addr = be32_to_cpu(ifa->ifa_address); + addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask); addr->type = QETH_IP_TYPE_NORMAL; } else return NOTIFY_DONE; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 05e9471e3d3fc0052e7866786486a8590f7b1129..ff29a4b416b45db98b1ee65f0d341c3a95c5f975 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -286,7 +286,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (!addr) return -ENOMEM; - addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000); addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = @@ -320,7 +320,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (addr != NULL) { - addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000); addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8]; diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile index 64e9f507ce328637cb8675ef32a2d94631d1b0c2..414f2a772a5fca9b6b0079e24e359444ae3be577 100644 --- a/drivers/scsi/qedf/Makefile +++ b/drivers/scsi/qedf/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDF) := qedf.o qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ - qedf_attr.o qedf_els.o + qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c new file mode 100644 index 0000000000000000000000000000000000000000..8c65e3b034dc85e7b2fcbfd18cfbbebb3a2cdf38 --- /dev/null +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c @@ -0,0 +1,190 @@ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "drv_fcoe_fw_funcs.h" +#include "drv_scsi_fw_funcs.h" + +#define FCOE_RX_ID (0xFFFFu) + +static inline void init_common_sqe(struct fcoe_task_params *task_params, + enum fcoe_sqe_request_type request_type) +{ + memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); + SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, + request_type); + task_params->sqe->task_id = task_params->itid; +} + +int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct regpair sense_data_buffer_phys_addr, + u32 task_retry_id, + u8 fcp_cmd_payload[32]) +{ + struct fcoe_task_context *ctx = task_params->context; + struct ystorm_fcoe_task_st_ctx *y_st_ctx; + struct tstorm_fcoe_task_st_ctx *t_st_ctx; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct mstorm_fcoe_task_st_ctx *m_st_ctx; + u32 io_size, val; + bool slow_sgl; + + memset(ctx, 0, sizeof(*(ctx))); + slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge); + io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ? + task_params->tx_io_size : task_params->rx_io_size); + + /* Ystorm ctx */ + y_st_ctx = &ctx->ystorm_st_context; + y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); + y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id); + y_st_ctx->task_type = task_params->task_type; + memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload, + fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload)); + + /* Tstorm ctx */ + t_st_ctx = &ctx->tstorm_st_context; + t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ? + FCOE_TASK_DEV_TYPE_TAPE : + FCOE_TASK_DEV_TYPE_DISK); + t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); + val = cpu_to_le32(task_params->cq_rss_number); + t_st_ctx->read_only.glbl_q_num = val; + t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size); + t_st_ctx->read_only.task_type = task_params->task_type; + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); + t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); + + /* Ustorm ctx */ + u_ag_ctx = &ctx->ustorm_ag_context; + u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); + + /* Mstorm buffer for sense/rsp data placement */ + m_st_ctx = &ctx->mstorm_st_context; + val = cpu_to_le32(sense_data_buffer_phys_addr.hi); + m_st_ctx->rsp_buf_addr.hi = val; + val = cpu_to_le32(sense_data_buffer_phys_addr.lo); + m_st_ctx->rsp_buf_addr.lo = val; + + if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { + /* Ystorm ctx */ + y_st_ctx->expect_first_xfer = 1; + + /* Set the amount of super SGEs. Can be up to 4. */ + SET_FIELD(y_st_ctx->sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + init_scsi_sgl_context(&y_st_ctx->sgl_params, + &y_st_ctx->data_desc, + sgl_task_params); + + /* Mstorm ctx */ + SET_FIELD(m_st_ctx->flags, + MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + } else { + /* Tstorm ctx */ + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, + (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + + /* Mstorm ctx */ + m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); + init_scsi_sgl_context(&m_st_ctx->sgl_params, + &m_st_ctx->data_desc, + sgl_task_params); + } + + init_common_sqe(task_params, SEND_FCOE_CMD); + return 0; +} + +int init_initiator_midpath_unsolicited_fcoe_task( + struct fcoe_task_params *task_params, + struct fcoe_tx_mid_path_params *mid_path_fc_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params, + u8 fw_to_place_fc_header) +{ + struct fcoe_task_context *ctx = task_params->context; + struct ystorm_fcoe_task_st_ctx *y_st_ctx; + struct tstorm_fcoe_task_st_ctx *t_st_ctx; + struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; + struct mstorm_fcoe_task_st_ctx *m_st_ctx; + u32 val; + + memset(ctx, 0, sizeof(*(ctx))); + + /* Init Ystorm */ + y_st_ctx = &ctx->ystorm_st_context; + init_scsi_sgl_context(&y_st_ctx->sgl_params, + &y_st_ctx->data_desc, + tx_sgl_task_params); + SET_FIELD(y_st_ctx->sgl_mode, + YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL); + y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size); + y_st_ctx->task_type = task_params->task_type; + memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path, + mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params)); + + /* Init Mstorm */ + m_st_ctx = &ctx->mstorm_st_context; + init_scsi_sgl_context(&m_st_ctx->sgl_params, + &m_st_ctx->data_desc, + rx_sgl_task_params); + SET_FIELD(m_st_ctx->flags, + MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER, + fw_to_place_fc_header); + m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size); + + /* Init Tstorm */ + t_st_ctx = &ctx->tstorm_st_context; + t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); + val = cpu_to_le32(task_params->cq_rss_number); + t_st_ctx->read_only.glbl_q_num = val; + t_st_ctx->read_only.task_type = task_params->task_type; + SET_FIELD(t_st_ctx->read_write.flags, + FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); + t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); + + /* Init Ustorm */ + u_ag_ctx = &ctx->ustorm_ag_context; + u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number); + + /* Init SQE */ + init_common_sqe(task_params, SEND_FCOE_MIDPATH); + task_params->sqe->additional_info_union.burst_length = + tx_sgl_task_params->total_buffer_size; + SET_FIELD(task_params->sqe->flags, + FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges); + SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, + SCSI_FAST_SGL); + + return 0; +} + +int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params) +{ + init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST); + return 0; +} + +int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params) +{ + init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP); + return 0; +} + +int init_initiator_sequence_recovery_fcoe_task( + struct fcoe_task_params *task_params, u32 off) +{ + init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY); + task_params->sqe->additional_info_union.seq_rec_updated_offset = off; + return 0; +} diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h new file mode 100644 index 0000000000000000000000000000000000000000..617529b058f4ad2fdbd630f02664c89fc9cd79dd --- /dev/null +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h @@ -0,0 +1,93 @@ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _FCOE_FW_FUNCS_H +#define _FCOE_FW_FUNCS_H +#include "drv_scsi_fw_funcs.h" +#include "qedf_hsi.h" +#include + +struct fcoe_task_params { + /* Output parameter [set/filled by the HSI function] */ + struct fcoe_task_context *context; + + /* Output parameter [set/filled by the HSI function] */ + struct fcoe_wqe *sqe; + enum fcoe_task_type task_type; + u32 tx_io_size; /* in bytes */ + u32 rx_io_size; /* in bytes */ + u32 conn_cid; + u16 itid; + u8 cq_rss_number; + + /* Whether it's Tape device or not (0=Disk, 1=Tape) */ + u8 is_tape_device; +}; + +/** + * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for + * read/write task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + * @param sgl_task_params - Pointer to SGL task params + * @param sense_data_buffer_phys_addr - Pointer to sense data buffer + * @param task_retry_id - retry identification - Used only for Tape device + * @param fcp_cmnd_payload - FCP CMD Payload + */ +int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct regpair sense_data_buffer_phys_addr, + u32 task_retry_id, + u8 fcp_cmd_payload[32]); + +/** + * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for + * midpath/unsolicited task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + * @param mid_path_fc_header - FC header + * @param tx_sgl_task_params - Pointer to Tx SGL task params + * @param rx_sgl_task_params - Pointer to Rx SGL task params + * @param fw_to_place_fc_header - Indication if the FW will place the FC header + * in addition to the data arrives. + */ +int init_initiator_midpath_unsolicited_fcoe_task( + struct fcoe_task_params *task_params, + struct fcoe_tx_mid_path_params *mid_path_fc_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params, + u8 fw_to_place_fc_header); + +/** + * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for + * abort task types and init fcoe_sqe + * + * @param task_params - Pointer to task parameters struct + */ +int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params); + +/** + * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for + * cleanup task types and init fcoe_sqe + * + * + * @param task_params - Pointer to task parameters struct + */ +int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params); + +/** + * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for + * sequence recovery task types and init fcoe_sqe + * + * + * @param task_params - Pointer to task parameters struct + * @param desired_offset - The desired offest the task will be re-sent from + */ +int init_initiator_sequence_recovery_fcoe_task( + struct fcoe_task_params *task_params, + u32 desired_offset); +#endif diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c new file mode 100644 index 0000000000000000000000000000000000000000..11e0cc082ec0d65d642057bb0505686a22127639 --- /dev/null +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c @@ -0,0 +1,44 @@ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#include "drv_scsi_fw_funcs.h" + +#define SCSI_NUM_SGES_IN_CACHE 0x4 + +bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) +{ + return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); +} + +void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params) +{ + /* no need to check for sgl_task_params->sgl validity */ + u8 num_sges_to_init = sgl_task_params->num_sges > + SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE : + sgl_task_params->num_sges; + u8 sge_index; + u32 val; + + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); + ctx_sgl_params->sgl_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); + ctx_sgl_params->sgl_addr.hi = val; + val = cpu_to_le32(sgl_task_params->total_buffer_size); + ctx_sgl_params->sgl_total_length = val; + ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); + + for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); + ctx_data_desc->sge[sge_index].sge_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); + ctx_data_desc->sge[sge_index].sge_addr.hi = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); + ctx_data_desc->sge[sge_index].sge_len = val; + } +} diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h new file mode 100644 index 0000000000000000000000000000000000000000..9cb45410bc457f79a02c8477dbe6de87a148f5fc --- /dev/null +++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h @@ -0,0 +1,85 @@ +/* QLogic FCoE Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _SCSI_FW_FUNCS_H +#define _SCSI_FW_FUNCS_H +#include +#include +#include + +struct scsi_sgl_task_params { + struct scsi_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + + /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last) + * -> relevant for tx only + */ + bool small_mid_sge; +}; + +struct scsi_dif_task_params { + u32 initial_ref_tag; + bool initial_ref_tag_is_valid; + u16 application_tag; + u16 application_tag_mask; + u16 dif_block_size_log; + bool dif_on_network; + bool dif_on_host; + u8 host_guard_type; + u8 protection_type; + u8 ref_tag_mask; + bool crc_seed; + + /* Enable Connection error upon DIF error (segments with DIF errors are + * dropped) + */ + bool tx_dif_conn_err_en; + bool ignore_app_tag; + bool keep_ref_tag_const; + bool validate_guard; + bool validate_app_tag; + bool validate_ref_tag; + bool forward_guard; + bool forward_app_tag; + bool forward_ref_tag; + bool forward_app_tag_with_mask; + bool forward_ref_tag_with_mask; +}; + +struct scsi_initiator_cmd_params { + /* for cdb_size > default CDB size (extended CDB > 16 bytes) -> + * pointer to the CDB buffer SGE + */ + struct scsi_sge extended_cdb_sge; + + /* Physical address of sense data buffer for sense data - 256B buffer */ + struct regpair sense_data_buffer_phys_addr; +}; + +/** + * @brief scsi_is_slow_sgl - checks for slow SGL + * + * @param num_sges - number of sges in SGL + * @param small_mid_sge - True is the SGL contains an SGE which is smaller than + * 4KB and its not the 1st or last SGE in the SGL + */ +bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge); + +/** + * @brief init_scsi_sgl_context - initializes SGL task context + * + * @param sgl_params - SGL context parameters to initialize (output parameter) + * @param data_desc - context struct containing SGEs array to set (output + * parameter) + * @param sgl_task_params - SGL parameters (input) + */ +void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params); +#endif diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 96346a1b1515e81b6c17a7035fb4051a263b8a4a..40aeb6bb96a2afd11c3264b7ea0004110033e2d1 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -26,6 +26,7 @@ #include #include "qedf_version.h" #include "qedf_dbg.h" +#include "drv_fcoe_fw_funcs.h" /* Helpers to extract upper and lower 32-bits of pointer */ #define U64_HI(val) ((u32)(((u64)(val)) >> 32)) @@ -59,19 +60,17 @@ #define UPSTREAM_KEEP 1 struct qedf_mp_req { - uint8_t tm_flags; - uint32_t req_len; void *req_buf; dma_addr_t req_buf_dma; - struct fcoe_sge *mp_req_bd; + struct scsi_sge *mp_req_bd; dma_addr_t mp_req_bd_dma; struct fc_frame_header req_fc_hdr; uint32_t resp_len; void *resp_buf; dma_addr_t resp_buf_dma; - struct fcoe_sge *mp_resp_bd; + struct scsi_sge *mp_resp_bd; dma_addr_t mp_resp_bd_dma; struct fc_frame_header resp_fc_hdr; }; @@ -119,6 +118,7 @@ struct qedf_ioreq { #define QEDF_CMD_IN_CLEANUP 0x2 #define QEDF_CMD_SRR_SENT 0x3 u8 io_req_flags; + uint8_t tm_flags; struct qedf_rport *fcport; unsigned long flags; enum qedf_ioreq_event event; @@ -130,6 +130,8 @@ struct qedf_ioreq { struct completion tm_done; struct completion abts_done; struct fcoe_task_context *task; + struct fcoe_task_params *task_params; + struct scsi_sgl_task_params *sgl_task_params; int idx; /* * Need to allocate enough room for both sense data and FCP response data @@ -199,8 +201,8 @@ struct qedf_rport { dma_addr_t sq_pbl_dma; u32 sq_pbl_size; u32 sid; -#define QEDF_RPORT_TYPE_DISK 1 -#define QEDF_RPORT_TYPE_TAPE 2 +#define QEDF_RPORT_TYPE_DISK 0 +#define QEDF_RPORT_TYPE_TAPE 1 uint dev_type; /* Disk or tape */ struct list_head peers; }; @@ -391,7 +393,7 @@ struct qedf_ctx { struct io_bdt { struct qedf_ioreq *io_req; - struct fcoe_sge *bd_tbl; + struct scsi_sge *bd_tbl; dma_addr_t bd_tbl_dma; u16 bd_valid; }; @@ -400,7 +402,7 @@ struct qedf_cmd_mgr { struct qedf_ctx *qedf; u16 idx; struct io_bdt **io_bdt_pool; -#define FCOE_PARAMS_NUM_TASKS 4096 +#define FCOE_PARAMS_NUM_TASKS 2048 struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS]; spinlock_t lock; atomic_t free_list_cnt; @@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, unsigned int timer_msec); extern int qedf_init_mp_req(struct qedf_ioreq *io_req); extern void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct fcoe_task_context *task_ctx); -extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, - u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset); + struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe); +extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport); extern void qedf_ring_doorbell(struct qedf_rport *fcport); extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *els_req); diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 59f3e5c73a139b9324a4307dc1f500a1153a4536..c505d41f6dc843825fb52fabaaca33e5dd25d1ce 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, uint16_t xid; uint32_t start_time = jiffies / HZ; uint32_t current_time; + struct fcoe_wqe *sqe; + unsigned long flags; + u16 sqe_idx; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); @@ -113,20 +116,25 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, /* Obtain exchange id */ xid = els_req->xid; + spin_lock_irqsave(&fcport->rport_lock, flags); + + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + /* Initialize task context for this IO request */ task = qedf_get_task_mem(&qedf->tasks, xid); - qedf_init_mp_task(els_req, task); + qedf_init_mp_task(els_req, task, sqe); /* Put timer on original I/O request */ if (timer_msec) qedf_cmd_timer_set(qedf, els_req, timer_msec); - qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); - /* Ring doorbell */ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " "req\n"); qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); els_err: return rc; } @@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, struct qedf_rport *fcport; unsigned long flags; struct qedf_els_cb_arg *cb_arg; + struct fcoe_wqe *sqe; + u16 sqe_idx; fcport = orig_io_req->fcport; @@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, spin_lock_irqsave(&fcport->rport_lock, flags); - qedf_add_to_sq(fcport, orig_io_req->xid, 0, - FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset); + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + orig_io_req->task_params->sqe = sqe; + + init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params, + offset); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 46debe5034af102710a574a80254433b94265271..1d7f90d0adc1a0c4f55ade57f0bf18f622372184 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) if (!cmgr->io_bdt_pool) goto free_cmd_pool; - bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge); + bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; if (bdt_info->bd_tbl) { @@ -119,6 +119,8 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) for (i = 0; i < num_ios; i++) { io_req = &cmgr->cmds[i]; + kfree(io_req->sgl_task_params); + kfree(io_req->task_params); /* Make sure we free per command sense buffer */ if (io_req->sense_buffer) dma_free_coherent(&qedf->pdev->dev, @@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) spin_lock_init(&cmgr->lock); /* - * Initialize list of qedf_ioreq. + * Initialize I/O request fields. */ xid = QEDF_MIN_XID; @@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) GFP_KERNEL); if (!io_req->sense_buffer) goto mem_err; + + /* Allocate task parameters to pass to f/w init funcions */ + io_req->task_params = kzalloc(sizeof(*io_req->task_params), + GFP_KERNEL); + if (!io_req->task_params) { + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to allocate task_params for xid=0x%x\n", + i); + goto mem_err; + } + + /* + * Allocate scatter/gather list info to pass to f/w init + * functions. + */ + io_req->sgl_task_params = kzalloc( + sizeof(struct scsi_sgl_task_params), GFP_KERNEL); + if (!io_req->sgl_task_params) { + QEDF_ERR(&(qedf->dbg_ctx), + "Failed to allocate sgl_task_params for xid=0x%x\n", + i); + goto mem_err; + } } /* Allocate pool of io_bdts - one for each qedf_ioreq */ @@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), GFP_KERNEL); if (!cmgr->io_bdt_pool[i]) { - QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " - "io_bdt_pool[%d].\n", i); + QEDF_WARN(&(qedf->dbg_ctx), + "Failed to alloc io_bdt_pool[%d].\n", i); goto mem_err; } } @@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) for (i = 0; i < num_ios; i++) { bdt_info = cmgr->io_bdt_pool[i]; bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, - QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge), + QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), &bdt_info->bd_tbl_dma, GFP_KERNEL); if (!bdt_info->bd_tbl) { - QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc " - "bdt_tbl[%d].\n", i); + QEDF_WARN(&(qedf->dbg_ctx), + "Failed to alloc bdt_tbl[%d].\n", i); goto mem_err; } } @@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) } bd_tbl->io_req = io_req; io_req->cmd_type = cmd_type; + io_req->tm_flags = 0; /* Reset sequence offset data */ io_req->rx_buf_off = 0; @@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_ctx *qedf = io_req->fcport->qedf; - uint64_t sz = sizeof(struct fcoe_sge); + uint64_t sz = sizeof(struct scsi_sge); /* clear tm flags */ - mp_req->tm_flags = 0; if (mp_req->mp_req_bd) { dma_free_coherent(&qedf->pdev->dev, sz, mp_req->mp_req_bd, mp_req->mp_req_bd_dma); @@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref) static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, int bd_index) { - struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; int frag_size, sg_frags; sg_frags = 0; @@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, frag_size = sg_len; bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); - bd[bd_index + sg_frags].size = (uint16_t)frag_size; + bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size; addr += (u64)frag_size; sg_frags++; @@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req) struct Scsi_Host *host = sc->device->host; struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); - struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; struct scatterlist *sg; int byte_count = 0; int sg_count = 0; @@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req) bd[bd_count].sge_addr.lo = (addr & 0xffffffff); bd[bd_count].sge_addr.hi = (addr >> 32); - bd[bd_count].size = (u16)sg_len; + bd[bd_count].sge_len = (u16)sg_len; return ++bd_count; } @@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req) sg_frags = 1; bd[bd_count].sge_addr.lo = U64_LO(addr); bd[bd_count].sge_addr.hi = U64_HI(addr); - bd[bd_count].size = (uint16_t)sg_len; + bd[bd_count].sge_len = (uint16_t)sg_len; } bd_count += sg_frags; @@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req) static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; - struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl; + struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; int bd_count; if (scsi_sg_count(sc)) { @@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) } else { bd_count = 0; bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; - bd[0].size = 0; + bd[0].sge_len = 0; } io_req->bd_tbl->bd_valid = bd_count; @@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, /* 4 bytes: flag info */ fcp_cmnd->fc_pri_ta = 0; - fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_tm_flags = io_req->tm_flags; fcp_cmnd->fc_flags = io_req->io_req_flags; fcp_cmnd->fc_cmdref = 0; /* Populate data direction */ - if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) - fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; - else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) + if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + } else { + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; + else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) + fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; + } fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; /* 16 bytes: CDB information */ - memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); /* 4 bytes: FCP data length */ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); - } static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, - struct qedf_ioreq *io_req, u32 *ptu_invalidate, - struct fcoe_task_context *task_ctx) + struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, + struct fcoe_wqe *sqe) { enum fcoe_task_type task_type; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; - union fcoe_data_desc_ctx *data_desc; - u32 *fcp_cmnd; + u8 fcp_cmnd[32]; u32 tmp_fcp_cmnd[8]; - int cnt, i; - int bd_count; + int bd_count = 0; struct qedf_ctx *qedf = fcport->qedf; uint16_t cq_idx = smp_processor_id() % qedf->num_queues; - u8 tmp_sgl_mode = 0; - u8 mst_sgl_mode = 0; + struct regpair sense_data_buffer_phys_addr; + u32 tx_io_size = 0; + u32 rx_io_size = 0; + int i, cnt; - memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + /* Note init_initiator_rw_fcoe_task memsets the task context */ io_req->task = task_ctx; + memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); + memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); - if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) - task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; - else + /* Set task type bassed on DMA directio of command */ + if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { task_type = FCOE_TASK_TYPE_READ_INITIATOR; - - /* Y Storm context */ - task_ctx->ystorm_st_context.expect_first_xfer = 1; - task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len; - /* Check if this is required */ - task_ctx->ystorm_st_context.ox_id = io_req->xid; - task_ctx->ystorm_st_context.task_rety_identifier = - io_req->task_retry_identifier; - - /* T Storm ag context */ - SET_FIELD(task_ctx->tstorm_ag_context.flags0, - TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE); - task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; - - /* T Storm st context */ - SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, - FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, - 1); - task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; - - task_ctx->tstorm_st_context.read_only.dev_type = - FCOE_TASK_DEV_TYPE_DISK; - task_ctx->tstorm_st_context.read_only.conf_supported = 0; - task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; - - /* Completion queue for response. */ - task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx; - task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size = - io_req->data_xfer_len; - task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val = - lport->e_d_tov; - - task_ctx->ustorm_ag_context.global_cq_num = cq_idx; - io_req->fp_idx = cq_idx; - - bd_count = bd_tbl->bd_valid; - if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) { - /* Setup WRITE task */ - struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; - - task_ctx->ystorm_st_context.task_type = - FCOE_TASK_TYPE_WRITE_INITIATOR; - data_desc = &task_ctx->ystorm_st_context.data_desc; - - if (io_req->use_slowpath) { - SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, - YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, - FCOE_SLOW_SGL); - data_desc->slow.base_sgl_addr.lo = - U64_LO(bd_tbl->bd_tbl_dma); - data_desc->slow.base_sgl_addr.hi = - U64_HI(bd_tbl->bd_tbl_dma); - data_desc->slow.remainder_num_sges = bd_count; - data_desc->slow.curr_sge_off = 0; - data_desc->slow.curr_sgl_index = 0; - qedf->slow_sge_ios++; - io_req->sge_type = QEDF_IOREQ_SLOW_SGE; - } else { - SET_FIELD(task_ctx->ystorm_st_context.sgl_mode, - YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, - (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : - FCOE_MUL_FAST_SGES); - - if (bd_count == 1) { - data_desc->single_sge.sge_addr.lo = - fcoe_bd_tbl->sge_addr.lo; - data_desc->single_sge.sge_addr.hi = - fcoe_bd_tbl->sge_addr.hi; - data_desc->single_sge.size = - fcoe_bd_tbl->size; - data_desc->single_sge.is_valid_sge = 0; - qedf->single_sge_ios++; - io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; - } else { - data_desc->fast.sgl_start_addr.lo = - U64_LO(bd_tbl->bd_tbl_dma); - data_desc->fast.sgl_start_addr.hi = - U64_HI(bd_tbl->bd_tbl_dma); - data_desc->fast.sgl_byte_offset = - data_desc->fast.sgl_start_addr.lo & - (QEDF_PAGE_SIZE - 1); - if (data_desc->fast.sgl_byte_offset > 0) - QEDF_ERR(&(qedf->dbg_ctx), - "byte_offset=%u for xid=0x%x.\n", - io_req->xid, - data_desc->fast.sgl_byte_offset); - data_desc->fast.task_reuse_cnt = - io_req->reuse_count; - io_req->reuse_count++; - if (io_req->reuse_count == QEDF_MAX_REUSE) { - *ptu_invalidate = 1; - io_req->reuse_count = 0; - } - qedf->fast_sge_ios++; - io_req->sge_type = QEDF_IOREQ_FAST_SGE; - } - } - - /* T Storm context */ - task_ctx->tstorm_st_context.read_only.task_type = - FCOE_TASK_TYPE_WRITE_INITIATOR; - - /* M Storm context */ - tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode, - YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE); - SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, - FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE, - tmp_sgl_mode); - } else { - /* Setup READ task */ - - /* M Storm context */ - struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl; - - data_desc = &task_ctx->mstorm_st_context.fp.data_desc; - task_ctx->mstorm_st_context.fp.data_2_trns_rem = - io_req->data_xfer_len; - - if (io_req->use_slowpath) { - SET_FIELD( - task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, - FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, - FCOE_SLOW_SGL); - data_desc->slow.base_sgl_addr.lo = - U64_LO(bd_tbl->bd_tbl_dma); - data_desc->slow.base_sgl_addr.hi = - U64_HI(bd_tbl->bd_tbl_dma); - data_desc->slow.remainder_num_sges = - bd_count; - data_desc->slow.curr_sge_off = 0; - data_desc->slow.curr_sgl_index = 0; - qedf->slow_sge_ios++; - io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; + tx_io_size = io_req->data_xfer_len; } else { - SET_FIELD( - task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, - FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE, - (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count : - FCOE_MUL_FAST_SGES); - - if (bd_count == 1) { - data_desc->single_sge.sge_addr.lo = - fcoe_bd_tbl->sge_addr.lo; - data_desc->single_sge.sge_addr.hi = - fcoe_bd_tbl->sge_addr.hi; - data_desc->single_sge.size = - fcoe_bd_tbl->size; - data_desc->single_sge.is_valid_sge = 0; - qedf->single_sge_ios++; - io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; - } else { - data_desc->fast.sgl_start_addr.lo = - U64_LO(bd_tbl->bd_tbl_dma); - data_desc->fast.sgl_start_addr.hi = - U64_HI(bd_tbl->bd_tbl_dma); - data_desc->fast.sgl_byte_offset = 0; - data_desc->fast.task_reuse_cnt = - io_req->reuse_count; - io_req->reuse_count++; - if (io_req->reuse_count == QEDF_MAX_REUSE) { - *ptu_invalidate = 1; - io_req->reuse_count = 0; - } - qedf->fast_sge_ios++; - io_req->sge_type = QEDF_IOREQ_FAST_SGE; - } + task_type = FCOE_TASK_TYPE_READ_INITIATOR; + rx_io_size = io_req->data_xfer_len; } - - /* Y Storm context */ - task_ctx->ystorm_st_context.expect_first_xfer = 0; - task_ctx->ystorm_st_context.task_type = - FCOE_TASK_TYPE_READ_INITIATOR; - - /* T Storm context */ - task_ctx->tstorm_st_context.read_only.task_type = - FCOE_TASK_TYPE_READ_INITIATOR; - mst_sgl_mode = GET_FIELD( - task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode, - FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE); - SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, - FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE, - mst_sgl_mode); } + /* Setup the fields for fcoe_task_params */ + io_req->task_params->context = task_ctx; + io_req->task_params->sqe = sqe; + io_req->task_params->task_type = task_type; + io_req->task_params->tx_io_size = tx_io_size; + io_req->task_params->rx_io_size = rx_io_size; + io_req->task_params->conn_cid = fcport->fw_cid; + io_req->task_params->itid = io_req->xid; + io_req->task_params->cq_rss_number = cq_idx; + io_req->task_params->is_tape_device = fcport->dev_type; + + /* Fill in information for scatter/gather list */ + if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { + bd_count = bd_tbl->bd_valid; + io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; + io_req->sgl_task_params->sgl_phys_addr.lo = + U64_LO(bd_tbl->bd_tbl_dma); + io_req->sgl_task_params->sgl_phys_addr.hi = + U64_HI(bd_tbl->bd_tbl_dma); + io_req->sgl_task_params->num_sges = bd_count; + io_req->sgl_task_params->total_buffer_size = + scsi_bufflen(io_req->sc_cmd); + io_req->sgl_task_params->small_mid_sge = + io_req->use_slowpath; + } + + /* Fill in physical address of sense buffer */ + sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); + sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); + /* fill FCP_CMND IU */ - fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque; - qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); /* Swap fcp_cmnd since FC is big endian */ cnt = sizeof(struct fcp_cmnd) / sizeof(u32); - for (i = 0; i < cnt; i++) { - *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]); - fcp_cmnd++; + tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); + } + memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); + + init_initiator_rw_fcoe_task(io_req->task_params, + io_req->sgl_task_params, + sense_data_buffer_phys_addr, + io_req->task_retry_identifier, fcp_cmnd); + + /* Increment SGL type counters */ + if (bd_count == 1) { + qedf->single_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; + } else if (io_req->use_slowpath) { + qedf->slow_sge_ios++; + io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + } else { + qedf->fast_sge_ios++; + io_req->sge_type = QEDF_IOREQ_FAST_SGE; } - - /* M Storm context - Sense buffer */ - task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = - U64_LO(io_req->sense_buffer_dma); - task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = - U64_HI(io_req->sense_buffer_dma); } void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct fcoe_task_context *task_ctx) + struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_rport *fcport = io_req->fcport; struct qedf_ctx *qedf = io_req->fcport->qedf; struct fc_frame_header *fc_hdr; - enum fcoe_task_type task_type = 0; - union fcoe_data_desc_ctx *data_desc; + struct fcoe_tx_mid_path_params task_fc_hdr; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task " - "for cmd_type = %d\n", io_req->cmd_type); + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, + "Initializing MP task for cmd_type=%d\n", + io_req->cmd_type); qedf->control_requests++; - /* Obtain task_type */ - if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) || - (io_req->cmd_type == QEDF_ELS)) { - task_type = FCOE_TASK_TYPE_MIDPATH; - } else if (io_req->cmd_type == QEDF_ABTS) { - task_type = FCOE_TASK_TYPE_ABTS; - } - + memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); /* Setup the task from io_req for easy reference */ io_req->task = task_ctx; - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n", - task_type); - - /* YSTORM only */ - { - /* Initialize YSTORM task context */ - struct fcoe_tx_mid_path_params *task_fc_hdr = - &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path; - memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); - task_ctx->ystorm_st_context.task_rety_identifier = - io_req->task_retry_identifier; - - /* Init SGL parameters */ - if ((task_type == FCOE_TASK_TYPE_MIDPATH) || - (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { - data_desc = &task_ctx->ystorm_st_context.data_desc; - data_desc->slow.base_sgl_addr.lo = - U64_LO(mp_req->mp_req_bd_dma); - data_desc->slow.base_sgl_addr.hi = - U64_HI(mp_req->mp_req_bd_dma); - data_desc->slow.remainder_num_sges = 1; - data_desc->slow.curr_sge_off = 0; - data_desc->slow.curr_sgl_index = 0; - } - - fc_hdr = &(mp_req->req_fc_hdr); - if (task_type == FCOE_TASK_TYPE_MIDPATH) { - fc_hdr->fh_ox_id = io_req->xid; - fc_hdr->fh_rx_id = htons(0xffff); - } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { - fc_hdr->fh_rx_id = io_req->xid; - } + /* Setup the fields for fcoe_task_params */ + io_req->task_params->context = task_ctx; + io_req->task_params->sqe = sqe; + io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; + io_req->task_params->tx_io_size = io_req->data_xfer_len; + /* rx_io_size tells the f/w how large a response buffer we have */ + io_req->task_params->rx_io_size = PAGE_SIZE; + io_req->task_params->conn_cid = fcport->fw_cid; + io_req->task_params->itid = io_req->xid; + /* Return middle path commands on CQ 0 */ + io_req->task_params->cq_rss_number = 0; + io_req->task_params->is_tape_device = fcport->dev_type; + + fc_hdr = &(mp_req->req_fc_hdr); + /* Set OX_ID and RX_ID based on driver task id */ + fc_hdr->fh_ox_id = io_req->xid; + fc_hdr->fh_rx_id = htons(0xffff); + + /* Set up FC header information */ + task_fc_hdr.parameter = fc_hdr->fh_parm_offset; + task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; + task_fc_hdr.type = fc_hdr->fh_type; + task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; + task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; + task_fc_hdr.rx_id = fc_hdr->fh_rx_id; + task_fc_hdr.ox_id = fc_hdr->fh_ox_id; + + /* Set up s/g list parameters for request buffer */ + tx_sgl_task_params.sgl = mp_req->mp_req_bd; + tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); + tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); + tx_sgl_task_params.num_sges = 1; + /* Set PAGE_SIZE for now since sg element is that size ??? */ + tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; + tx_sgl_task_params.small_mid_sge = 0; + + /* Set up s/g list parameters for request buffer */ + rx_sgl_task_params.sgl = mp_req->mp_resp_bd; + rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); + rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); + rx_sgl_task_params.num_sges = 1; + /* Set PAGE_SIZE for now since sg element is that size ??? */ + rx_sgl_task_params.total_buffer_size = PAGE_SIZE; + rx_sgl_task_params.small_mid_sge = 0; - /* Fill FC Header into middle path buffer */ - task_fc_hdr->parameter = fc_hdr->fh_parm_offset; - task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl; - task_fc_hdr->type = fc_hdr->fh_type; - task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl; - task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl; - task_fc_hdr->rx_id = fc_hdr->fh_rx_id; - task_fc_hdr->ox_id = fc_hdr->fh_ox_id; - - task_ctx->ystorm_st_context.data_2_trns_rem = - io_req->data_xfer_len; - task_ctx->ystorm_st_context.task_type = task_type; - } - - /* TSTORM ONLY */ - { - task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid; - task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid; - /* Always send middle-path repsonses on CQ #0 */ - task_ctx->tstorm_st_context.read_only.glbl_q_num = 0; - io_req->fp_idx = 0; - SET_FIELD(task_ctx->tstorm_ag_context.flags0, - TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, - PROTOCOLID_FCOE); - task_ctx->tstorm_st_context.read_only.task_type = task_type; - SET_FIELD(task_ctx->tstorm_st_context.read_write.flags, - FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, - 1); - task_ctx->tstorm_st_context.read_write.rx_id = 0xffff; - } - - /* MSTORM only */ - { - if (task_type == FCOE_TASK_TYPE_MIDPATH) { - /* Initialize task context */ - data_desc = &task_ctx->mstorm_st_context.fp.data_desc; - - /* Set cache sges address and length */ - data_desc->slow.base_sgl_addr.lo = - U64_LO(mp_req->mp_resp_bd_dma); - data_desc->slow.base_sgl_addr.hi = - U64_HI(mp_req->mp_resp_bd_dma); - data_desc->slow.remainder_num_sges = 1; - data_desc->slow.curr_sge_off = 0; - data_desc->slow.curr_sgl_index = 0; - /* - * Also need to fil in non-fastpath response address - * for middle path commands. - */ - task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo = - U64_LO(mp_req->mp_resp_bd_dma); - task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi = - U64_HI(mp_req->mp_resp_bd_dma); - } - } - - /* USTORM ONLY */ - { - task_ctx->ustorm_ag_context.global_cq_num = 0; - } + /* + * Last arg is 0 as previous code did not set that we wanted the + * fc header information. + */ + init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, + &task_fc_hdr, + &tx_sgl_task_params, + &rx_sgl_task_params, 0); - /* I/O stats. Middle path commands always use slow SGEs */ - qedf->slow_sge_ios++; - io_req->sge_type = QEDF_IOREQ_SLOW_SGE; + /* Midpath requests always consume 1 SGE */ + qedf->single_sge_ios++; } -void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate, - enum fcoe_task_type req_type, u32 offset) +/* Presumed that fcport->rport_lock is held */ +u16 qedf_get_sqe_idx(struct qedf_rport *fcport) { - struct fcoe_wqe *sqe; uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); + u16 rval; - sqe = &fcport->sq[fcport->sq_prod_idx]; + rval = fcport->sq_prod_idx; + /* Adjust ring index */ fcport->sq_prod_idx++; fcport->fw_sq_prod_idx++; if (fcport->sq_prod_idx == total_sqe) fcport->sq_prod_idx = 0; - switch (req_type) { - case FCOE_TASK_TYPE_WRITE_INITIATOR: - case FCOE_TASK_TYPE_READ_INITIATOR: - SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD); - if (ptu_invalidate) - SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1); - break; - case FCOE_TASK_TYPE_MIDPATH: - SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH); - break; - case FCOE_TASK_TYPE_ABTS: - SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, - SEND_FCOE_ABTS_REQUEST); - break; - case FCOE_TASK_TYPE_EXCHANGE_CLEANUP: - SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, - FCOE_EXCHANGE_CLEANUP); - break; - case FCOE_TASK_TYPE_SEQUENCE_CLEANUP: - SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, - FCOE_SEQUENCE_RECOVERY); - /* NOTE: offset param only used for sequence recovery */ - sqe->additional_info_union.seq_rec_updated_offset = offset; - break; - case FCOE_TASK_TYPE_UNSOLICITED: - break; - default: - break; - } - - sqe->task_id = xid; - - /* Make sure SQ data is coherent */ - wmb(); - + return rval; } void qedf_ring_doorbell(struct qedf_rport *fcport) @@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) struct fcoe_task_context *task_ctx; u16 xid; enum fcoe_task_type req_type = 0; - u32 ptu_invalidate = 0; + struct fcoe_wqe *sqe; + u16 sqe_idx; /* Initialize rest of io_req fileds */ io_req->data_xfer_len = scsi_bufflen(sc_cmd); @@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) return -EAGAIN; } + if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { + QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + kref_put(&io_req->refcount, qedf_release_cmd); + } + + /* Obtain free SQE */ + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + /* Get the task context */ task_ctx = qedf_get_task_mem(&qedf->tasks, xid); if (!task_ctx) { @@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) return -EINVAL; } - qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx); - - if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { - QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); - kref_put(&io_req->refcount, qedf_release_cmd); - } - - /* Obtain free SQ entry */ - qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0); + qedf_init_task(fcport, lport, io_req, task_ctx, sqe); /* Ring doorbell */ qedf_ring_doorbell(fcport); @@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) u32 r_a_tov = 0; int rc = 0; unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; r_a_tov = rdata->r_a_tov; lport = qedf->lport; @@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) spin_lock_irqsave(&fcport->rport_lock, flags); - /* Add ABTS to send queue */ - qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0); + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + io_req->task_params->sqe = sqe; - /* Ring doorbell */ + init_initiator_abort_fcoe_task(io_req->task_params); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); @@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, int qedf_init_mp_req(struct qedf_ioreq *io_req) { struct qedf_mp_req *mp_req; - struct fcoe_sge *mp_req_bd; - struct fcoe_sge *mp_resp_bd; + struct scsi_sge *mp_req_bd; + struct scsi_sge *mp_resp_bd; struct qedf_ctx *qedf = io_req->fcport->qedf; dma_addr_t addr; uint64_t sz; @@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req) } /* Allocate and map mp_req_bd and mp_resp_bd */ - sz = sizeof(struct fcoe_sge); + sz = sizeof(struct scsi_sge); mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, &mp_req->mp_req_bd_dma, GFP_KERNEL); if (!mp_req->mp_req_bd) { @@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req) mp_req_bd = mp_req->mp_req_bd; mp_req_bd->sge_addr.lo = U64_LO(addr); mp_req_bd->sge_addr.hi = U64_HI(addr); - mp_req_bd->size = QEDF_PAGE_SIZE; + mp_req_bd->sge_len = QEDF_PAGE_SIZE; /* * MP buffer is either a task mgmt command or an ELS. @@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req) addr = mp_req->resp_buf_dma; mp_resp_bd->sge_addr.lo = U64_LO(addr); mp_resp_bd->sge_addr.hi = U64_HI(addr); - mp_resp_bd->size = QEDF_PAGE_SIZE; + mp_resp_bd->sge_len = QEDF_PAGE_SIZE; return 0; } @@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, int tmo = 0; int rc = SUCCESS; unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; fcport = io_req->fcport; if (!fcport) { @@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, init_completion(&io_req->tm_done); - /* Obtain free SQ entry */ spin_lock_irqsave(&fcport->rport_lock, flags); - qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0); - /* Ring doorbell */ + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + io_req->task_params->sqe = sqe; + + init_initiator_cleanup_fcoe_task(io_req->task_params); qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); tmo = wait_for_completion_timeout(&io_req->tm_done, @@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, uint8_t tm_flags) { struct qedf_ioreq *io_req; - struct qedf_mp_req *tm_req; struct fcoe_task_context *task; - struct fc_frame_header *fc_hdr; - struct fcp_cmnd *fcp_cmnd; struct qedf_ctx *qedf = fcport->qedf; + struct fc_lport *lport = qedf->lport; int rc = 0; uint16_t xid; - uint32_t sid, did; int tmo = 0; unsigned long flags; + struct fcoe_wqe *sqe; + u16 sqe_idx; if (!sc_cmd) { QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); @@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, /* Set the return CPU to be the same as the request one */ io_req->cpu = smp_processor_id(); - tm_req = (struct qedf_mp_req *)&(io_req->mp_req); - - rc = qedf_init_mp_req(io_req); - if (rc == FAILED) { - QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init " - "failed\n"); - kref_put(&io_req->refcount, qedf_release_cmd); - goto reset_tmf_err; - } - /* Set TM flags */ - io_req->io_req_flags = 0; - tm_req->tm_flags = tm_flags; + io_req->io_req_flags = QEDF_READ; + io_req->data_xfer_len = 0; + io_req->tm_flags = tm_flags; /* Default is to return a SCSI command when an error occurs */ io_req->return_scsi_cmd_on_abts = true; - /* Fill FCP_CMND */ - qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); - fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; - memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN); - fcp_cmnd->fc_dl = 0; - - /* Fill FC header */ - fc_hdr = &(tm_req->req_fc_hdr); - sid = fcport->sid; - did = fcport->rdata->ids.port_id; - __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did, - FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | - FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = io_req->xid; @@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, /* Initialize task context for this IO request */ task = qedf_get_task_mem(&qedf->tasks, xid); - qedf_init_mp_task(io_req, task); init_completion(&io_req->tm_done); - /* Obtain free SQ entry */ spin_lock_irqsave(&fcport->rport_lock, flags); - qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0); - /* Ring doorbell */ + sqe_idx = qedf_get_sqe_idx(fcport); + sqe = &fcport->sq[sqe_idx]; + memset(sqe, 0, sizeof(struct fcoe_wqe)); + + qedf_init_task(fcport, lport, io_req, task, sqe); qedf_ring_doorbell(fcport); + spin_unlock_irqrestore(&fcport->rport_lock, flags); tmo = wait_for_completion_timeout(&io_req->tm_done, @@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { struct fcoe_cqe_rsp_info *fcp_rsp; - struct fcoe_cqe_midpath_info *mp_info; - - - /* Get TMF response length from CQE */ - mp_info = &cqe->cqe_info.midpath_info; - io_req->mp_req.resp_len = mp_info->data_placement_size; - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, - "Response len is %d.\n", io_req->mp_req.resp_len); fcp_rsp = &cqe->cqe_info.rsp_info; qedf_parse_fcp_rsp(io_req, fcp_rsp); diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile index 2b3e16b24299ee94bf752d361d7b479c7958c385..90a6925577cca6d5ea147f3028f645af844b91b6 100644 --- a/drivers/scsi/qedi/Makefile +++ b/drivers/scsi/qedi/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QEDI) := qedi.o qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \ - qedi_dbg.o + qedi_dbg.o qedi_fw_api.o qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 2bce3efc66a4b4bda8bae40768ca3e961a6d33b0..d6978cbc56f0586aa8a075191433184c50c93b01 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -14,6 +14,8 @@ #include "qedi.h" #include "qedi_iscsi.h" #include "qedi_gbl.h" +#include "qedi_fw_iscsi.h" +#include "qedi_fw_scsi.h" static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask); @@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi, resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); - resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait); - resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain); + resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait); + resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, "Freeing tid=0x%x for cid=0x%x\n", @@ -975,81 +977,6 @@ void qedi_fp_process_cqes(struct qedi_work *work) return; } -static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task, - u16 tid, uint16_t ptu_invalidate, int is_cleanup) -{ - struct iscsi_wqe *wqe; - struct iscsi_wqe_field *cont_field; - struct qedi_endpoint *ep; - struct scsi_cmnd *sc = task->sc; - struct iscsi_login_req *login_hdr; - struct qedi_cmd *cmd = task->dd_data; - - login_hdr = (struct iscsi_login_req *)task->hdr; - ep = qedi_conn->ep; - wqe = &ep->sq[ep->sq_prod_idx]; - - memset(wqe, 0, sizeof(*wqe)); - - ep->sq_prod_idx++; - ep->fw_sq_prod_idx++; - if (ep->sq_prod_idx == QEDI_SQ_SIZE) - ep->sq_prod_idx = 0; - - if (is_cleanup) { - SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, - ISCSI_WQE_TYPE_TASK_CLEANUP); - wqe->task_id = tid; - return; - } - - if (ptu_invalidate) { - SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE, - ISCSI_WQE_SET_PTU_INVALIDATE); - } - - cont_field = &wqe->cont_prevtid_union.cont_field; - - switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { - case ISCSI_OP_LOGIN: - case ISCSI_OP_TEXT: - SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, - ISCSI_WQE_TYPE_MIDDLE_PATH); - SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, - 1); - cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength); - break; - case ISCSI_OP_LOGOUT: - case ISCSI_OP_NOOP_OUT: - case ISCSI_OP_SCSI_TMFUNC: - SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, - ISCSI_WQE_TYPE_NORMAL); - break; - default: - if (!sc) - break; - - SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE, - ISCSI_WQE_TYPE_NORMAL); - cont_field->contlen_cdbsize_field = - (sc->sc_data_direction == DMA_TO_DEVICE) ? - scsi_bufflen(sc) : 0; - if (cmd->use_slowpath) - SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0); - else - SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, - (sc->sc_data_direction == - DMA_TO_DEVICE) ? - min((u16)QEDI_FAST_SGE_COUNT, - (u16)cmd->io_tbl.sge_valid) : 0); - break; - } - - wqe->task_id = tid; - /* Make sure SQ data is coherent */ - wmb(); -} - static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) { struct iscsi_db_data dbell = { 0 }; @@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) qedi_conn->iscsi_conn_id); } +static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn) +{ + struct qedi_endpoint *ep; + u16 rval; + + ep = qedi_conn->ep; + rval = ep->sq_prod_idx; + + /* Increament SQ index */ + ep->sq_prod_idx++; + ep->fw_sq_prod_idx++; + if (ep->sq_prod_idx == QEDI_SQ_SIZE) + ep->sq_prod_idx = 0; + + return rval; +} + int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, struct iscsi_task *task) { - struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_login_req_hdr login_req_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; struct iscsi_task_context *fw_task_ctx; + struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_login_req *login_hdr; - struct iscsi_login_req_hdr *fw_login_req = NULL; - struct iscsi_cached_sge_ctx *cached_sge = NULL; - struct iscsi_sge *single_sge = NULL; - struct iscsi_sge *req_sge = NULL; - struct iscsi_sge *resp_sge = NULL; + struct scsi_sge *req_sge = NULL; + struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; - s16 ptu_invalidate = 0; + struct qedi_endpoint *ep; s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; - req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; - resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; + ep = qedi_conn->ep; login_hdr = (struct iscsi_login_req *)task->hdr; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; - /* Ystorm context */ - fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req; - fw_login_req->opcode = login_hdr->opcode; - fw_login_req->version_min = login_hdr->min_version; - fw_login_req->version_max = login_hdr->max_version; - fw_login_req->flags_attr = login_hdr->flags; - fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2); - fw_login_req->isid_d = *((u32 *)login_hdr->isid); - fw_login_req->tsih = login_hdr->tsih; - qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); - fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt)); - fw_login_req->cid = qedi_conn->iscsi_conn_id; - fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn); - fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); - fw_login_req->exp_stat_sn = 0; - - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } + memset(&task_params, 0, sizeof(task_params)); + memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + /* Update header info */ + login_req_pdu_header.opcode = login_hdr->opcode; + login_req_pdu_header.version_min = login_hdr->min_version; + login_req_pdu_header.version_max = login_hdr->max_version; + login_req_pdu_header.flags_attr = login_hdr->flags; + login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid); + login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]); + + login_req_pdu_header.tsih = login_hdr->tsih; + login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength); - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; - cached_sge = - &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; - cached_sge->sge.sge_len = req_sge->sge_len; - cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); - cached_sge->sge.sge_addr.hi = - (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); - - /* Mstorm context */ - single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; - fw_task_ctx->mstorm_st_context.task_type = 0x2; - fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; - single_sge->sge_addr.lo = resp_sge->sge_addr.lo; - single_sge->sge_addr.hi = resp_sge->sge_addr.hi; - single_sge->sge_len = resp_sge->sge_len; - - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SINGLE_SGE, 1); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - fw_task_ctx->mstorm_st_context.sgl_size = 1; - fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; - - /* Ustorm context */ - fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = - ntoh24(login_hdr->dlength); - fw_task_ctx->ustorm_st_context.exp_data_sn = 0; - fw_task_ctx->ustorm_st_context.cq_rss_number = 0; - fw_task_ctx->ustorm_st_context.task_type = 0x2; - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - fw_task_ctx->ustorm_ag_context.exp_data_acked = - ntoh24(login_hdr->dlength); - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); - SET_FIELD(fw_task_ctx->ustorm_st_context.flags, - USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + login_req_pdu_header.cid = qedi_conn->iscsi_conn_id; + login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn); + login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + login_req_pdu_header.exp_stat_sn = 0; + + /* Fill tx AHS and rx buffer */ + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength); + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = ntoh24(login_hdr->dlength); + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_login_request_task(&task_params, + &login_req_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); @@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); - qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); qedi_ring_doorbell(qedi_conn); return 0; } @@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, struct iscsi_task *task) { - struct qedi_ctx *qedi = qedi_conn->qedi; - struct iscsi_logout_req_hdr *fw_logout_req = NULL; - struct iscsi_task_context *fw_task_ctx = NULL; + struct iscsi_logout_req_hdr logout_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; + struct iscsi_task_context *fw_task_ctx; struct iscsi_logout *logout_hdr = NULL; - struct qedi_cmd *qedi_cmd = NULL; - s16 tid = 0; - s16 ptu_invalidate = 0; + struct qedi_ctx *qedi = qedi_conn->qedi; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; qedi_cmd = (struct qedi_cmd *)task->dd_data; logout_hdr = (struct iscsi_logout *)task->hdr; + ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); - + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + qedi_cmd->task_id = tid; - /* Ystorm context */ - fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req; - fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST; - fw_logout_req->reason_code = 0x80 | logout_hdr->flags; - qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); - fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt)); - fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); - fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + memset(&task_params, 0, sizeof(task_params)); + memset(&logout_pdu_header, 0, sizeof(logout_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; - fw_logout_req->cid = qedi_conn->iscsi_conn_id; - fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; - - /* Mstorm context */ - fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; - - /* Ustorm context */ - fw_task_ctx->ustorm_st_context.rem_rcv_len = 0; - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0; - fw_task_ctx->ustorm_st_context.exp_data_sn = 0; - fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->ustorm_st_context.cq_rss_number = 0; - - SET_FIELD(fw_task_ctx->ustorm_st_context.flags, - USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + /* Update header info */ + logout_pdu_header.opcode = logout_hdr->opcode; + logout_pdu_header.reason_code = 0x80 | logout_hdr->flags; + qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); + logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); + logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + logout_pdu_header.cid = qedi_conn->iscsi_conn_id; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = 0; + task_params.rx_io_size = 0; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + + rval = init_initiator_logout_request_task(&task_params, + &logout_pdu_header, + NULL, NULL); + if (rval) + return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); @@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); - qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); qedi_ring_doorbell(qedi_conn); - return 0; } @@ -1533,47 +1476,46 @@ static void qedi_tmf_work(struct work_struct *work) static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask) { - struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; + struct iscsi_tmf_request_hdr tmf_pdu_header; + struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_task_context *fw_task_ctx; - struct iscsi_tmf_request_hdr *fw_tmf_request; - struct iscsi_sge *single_sge; - struct qedi_cmd *qedi_cmd; - struct qedi_cmd *cmd; + struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_task *ctask; struct iscsi_tm *tmf_hdr; - struct iscsi_sge *req_sge; - struct iscsi_sge *resp_sge; - u32 lun[2]; - s16 tid = 0, ptu_invalidate = 0; + struct qedi_cmd *qedi_cmd; + struct qedi_cmd *cmd; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; + s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; - req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; - resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; - qedi_cmd = (struct qedi_cmd *)mtask->dd_data; tmf_hdr = (struct iscsi_tm *)mtask->hdr; + qedi_cmd = (struct qedi_cmd *)mtask->dd_data; + ep = qedi_conn->ep; - tid = qedi_cmd->task_id; - qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); - fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request; - fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt)); - fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); + qedi_cmd->task_id = tid; - memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); - fw_tmf_request->lun.lo = be32_to_cpu(lun[0]); - fw_tmf_request->lun.hi = be32_to_cpu(lun[1]); + memset(&task_params, 0, sizeof(task_params)); + memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header)); - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; + /* Update header info */ + qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); + tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt)); + tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); + + memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); + tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == ISCSI_TM_FUNC_ABORT_TASK) { @@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, return 0; } cmd = (struct qedi_cmd *)ctask->dd_data; - fw_tmf_request->rtt = + tmf_pdu_header.rtt = qedi_set_itt(cmd->task_id, get_itt(tmf_hdr->rtt)); } else { - fw_tmf_request->rtt = ISCSI_RESERVED_TAG; + tmf_pdu_header.rtt = ISCSI_RESERVED_TAG; } - fw_tmf_request->opcode = tmf_hdr->opcode; - fw_tmf_request->function = tmf_hdr->flags; - fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength); - fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); - - single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; - fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; - single_sge->sge_addr.lo = resp_sge->sge_addr.lo; - single_sge->sge_addr.hi = resp_sge->sge_addr.hi; - single_sge->sge_len = resp_sge->sge_len; - - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SINGLE_SGE, 1); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - fw_task_ctx->mstorm_st_context.sgl_size = 1; - fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; - - /* Ustorm context */ - fw_task_ctx->ustorm_st_context.rem_rcv_len = 0; - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0; - fw_task_ctx->ustorm_st_context.exp_data_sn = 0; - fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->ustorm_st_context.cq_rss_number = 0; - - SET_FIELD(fw_task_ctx->ustorm_st_context.flags, - USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); - fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); - fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); + tmf_pdu_header.opcode = tmf_hdr->opcode; + tmf_pdu_header.function = tmf_hdr->flags; + tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength); + tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, - "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n", - tid, mtask->itt, qedi_conn->iscsi_conn_id); + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = 0; + task_params.rx_io_size = 0; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_tmf_request_task(&task_params, + &tmf_pdu_header); + if (rval) + return -1; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); @@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); - qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false); qedi_ring_doorbell(qedi_conn); return 0; } @@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn, int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, struct iscsi_task *task) { - struct qedi_ctx *qedi = qedi_conn->qedi; + struct iscsi_text_request_hdr text_request_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; struct iscsi_task_context *fw_task_ctx; - struct iscsi_text_request_hdr *fw_text_request; - struct iscsi_cached_sge_ctx *cached_sge; - struct iscsi_sge *single_sge; - struct qedi_cmd *qedi_cmd; - /* For 6.5 hdr iscsi_hdr */ + struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_text *text_hdr; - struct iscsi_sge *req_sge; - struct iscsi_sge *resp_sge; - s16 ptu_invalidate = 0; + struct scsi_sge *req_sge = NULL; + struct scsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; - req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; - resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; text_hdr = (struct iscsi_text *)task->hdr; + ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); if (tid == -1) return -ENOMEM; - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); qedi_cmd->task_id = tid; - /* Ystorm context */ - fw_text_request = - &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request; - fw_text_request->opcode = text_hdr->opcode; - fw_text_request->flags_attr = text_hdr->flags; + memset(&task_params, 0, sizeof(task_params)); + memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + + /* Update header info */ + text_request_pdu_header.opcode = text_hdr->opcode; + text_request_pdu_header.flags_attr = text_hdr->flags; qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); - fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt)); - fw_text_request->ttt = text_hdr->ttt; - fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn); - fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); - fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength); - - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; - - cached_sge = - &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; - cached_sge->sge.sge_len = req_sge->sge_len; - cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); - cached_sge->sge.sge_addr.hi = + text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + text_request_pdu_header.ttt = text_hdr->ttt; + text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn); + text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); + text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength); + + /* Fill tx AHS and rx buffer */ + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = req_sge->sge_len; + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = ntoh24(text_hdr->dlength); + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_text_request_task(&task_params, + &text_request_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; - /* Mstorm context */ - single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; - fw_task_ctx->mstorm_st_context.task_type = 0x2; - fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; - single_sge->sge_addr.lo = resp_sge->sge_addr.lo; - single_sge->sge_addr.hi = resp_sge->sge_addr.hi; - single_sge->sge_len = resp_sge->sge_len; - - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SINGLE_SGE, 1); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - fw_task_ctx->mstorm_st_context.sgl_size = 1; - fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; - - /* Ustorm context */ - fw_task_ctx->ustorm_ag_context.exp_data_acked = - ntoh24(text_hdr->dlength); - fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = - ntoh24(text_hdr->dlength); - fw_task_ctx->ustorm_st_context.exp_data_sn = - be32_to_cpu(text_hdr->exp_statsn); - fw_task_ctx->ustorm_st_context.cq_rss_number = 0; - fw_task_ctx->ustorm_st_context.task_type = 0x2; - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); - - /* Add command in active command list */ spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); qedi_cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); - qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); qedi_ring_doorbell(qedi_conn); - return 0; } @@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, struct iscsi_task *task, char *datap, int data_len, int unsol) { + struct iscsi_nop_out_hdr nop_out_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_task_context *fw_task_ctx; - struct iscsi_nop_out_hdr *fw_nop_out; - struct qedi_cmd *qedi_cmd; - /* For 6.5 hdr iscsi_hdr */ struct iscsi_nopout *nopout_hdr; - struct iscsi_cached_sge_ctx *cached_sge; - struct iscsi_sge *single_sge; - struct iscsi_sge *req_sge; - struct iscsi_sge *resp_sge; - u32 lun[2]; - s16 ptu_invalidate = 0; + struct scsi_sge *req_sge = NULL; + struct scsi_sge *resp_sge = NULL; + struct qedi_cmd *qedi_cmd; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; s16 tid = 0; + u16 sq_idx = 0; + int rval = 0; - req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; - resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; qedi_cmd = (struct qedi_cmd *)task->dd_data; nopout_hdr = (struct iscsi_nopout *)task->hdr; + ep = qedi_conn->ep; tid = qedi_get_task_idx(qedi); - if (tid == -1) { - QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n"); + if (tid == -1) return -ENOMEM; - } - - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + qedi_cmd->task_id = tid; - /* Ystorm context */ - fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out; - SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); - SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); + memset(&task_params, 0, sizeof(task_params)); + memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + + /* Update header info */ + nop_out_pdu_header.opcode = nopout_hdr->opcode; + SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); + SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); - memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); - fw_nop_out->lun.lo = be32_to_cpu(lun[0]); - fw_nop_out->lun.hi = be32_to_cpu(lun[1]); + memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); + nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); + nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { - fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt); - fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt); - fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; - fw_task_ctx->ystorm_st_context.state.local_comp = 1; - SET_FIELD(fw_task_ctx->ustorm_st_context.flags, - USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1); + nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt); + nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt); } else { - fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt)); - fw_nop_out->ttt = ISCSI_TTT_ALL_ONES; - fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0; + nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES; spin_lock(&qedi_conn->list_lock); list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); @@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, spin_unlock(&qedi_conn->list_lock); } - fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT; - fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); - fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); - - cached_sge = - &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge; - cached_sge->sge.sge_len = req_sge->sge_len; - cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr); - cached_sge->sge.sge_addr.hi = - (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); - - /* Mstorm context */ - fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id; - - single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge; - single_sge->sge_addr.lo = resp_sge->sge_addr.lo; - single_sge->sge_addr.hi = resp_sge->sge_addr.hi; - single_sge->sge_len = resp_sge->sge_len; - fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len; - - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; - /* Ustorm context */ - fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len; - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len; - fw_task_ctx->ustorm_st_context.exp_data_sn = 0; - fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH; - fw_task_ctx->ustorm_st_context.cq_rss_number = 0; - - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); - - fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); - fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); - - qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); + /* Fill tx AHS and rx buffer */ + if (data_len) { + tx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.req_dma_addr); + tx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); + tx_sgl_task_params.total_buffer_size = data_len; + tx_sgl_task_params.num_sges = 1; + + rx_sgl_task_params.sgl = + (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(qedi_conn->gen_pdu.resp_dma_addr); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); + rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; + rx_sgl_task_params.num_sges = 1; + } + + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = 0; + task_params.tx_io_size = data_len; + task_params.rx_io_size = resp_sge->sge_len; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + rval = init_initiator_nop_out_task(&task_params, + &nop_out_pdu_header, + &tx_sgl_task_params, + &rx_sgl_task_params); + if (rval) + return -1; + qedi_ring_doorbell(qedi_conn); return 0; } @@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, int bd_index) { - struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; int frag_size, sg_frags; sg_frags = 0; @@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; - struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; struct scatterlist *sg; int byte_count = 0; int bd_count = 0; @@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd) if (bd_count == 0) return; } else { - struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; + struct scsi_sge *bd = cmd->io_tbl.sge_tbl; bd[0].sge_addr.lo = 0; bd[0].sge_addr.hi = 0; @@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; struct scsi_cmnd *sc = task->sc; + struct iscsi_cmd_hdr cmd_pdu_header; + struct scsi_sgl_task_params tx_sgl_task_params; + struct scsi_sgl_task_params rx_sgl_task_params; + struct scsi_sgl_task_params *prx_sgl = NULL; + struct scsi_sgl_task_params *ptx_sgl = NULL; + struct iscsi_task_params task_params; + struct iscsi_conn_params conn_params; + struct scsi_initiator_cmd_params cmd_params; struct iscsi_task_context *fw_task_ctx; - struct iscsi_cached_sge_ctx *cached_sge; - struct iscsi_phys_sgl_ctx *phys_sgl; - struct iscsi_virt_sgl_ctx *virt_sgl; - struct ystorm_iscsi_task_st_ctx *yst_cxt; - struct mstorm_iscsi_task_st_ctx *mst_cxt; - struct iscsi_sgl *sgl_struct; - struct iscsi_sge *single_sge; + struct iscsi_cls_conn *cls_conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; - struct iscsi_sge *bd = cmd->io_tbl.sge_tbl; - enum iscsi_task_type task_type; - struct iscsi_cmd_hdr *fw_cmd; - u32 lun[2]; - u32 exp_data; - u16 cq_idx = smp_processor_id() % qedi->num_queues; - s16 ptu_invalidate = 0; + enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; + struct qedi_endpoint *ep; + u32 scsi_lun[2]; s16 tid = 0; - u8 num_fast_sgs; + u16 sq_idx = 0; + u16 cq_idx; + int rval = 0; - tid = qedi_get_task_idx(qedi); - if (tid == -1) - return -ENOMEM; + ep = qedi_conn->ep; + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; qedi_iscsi_map_sg_list(cmd); + int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun); - int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun); - fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid); + tid = qedi_get_task_idx(qedi); + if (tid == -1) + return -ENOMEM; + fw_task_ctx = + (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); - cmd->task_id = tid; - /* Ystorm context */ - fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd; - SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE); + cmd->task_id = tid; + memset(&task_params, 0, sizeof(task_params)); + memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header)); + memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); + memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); + memset(&conn_params, 0, sizeof(conn_params)); + memset(&cmd_params, 0, sizeof(cmd_params)); + + cq_idx = smp_processor_id() % qedi->num_queues; + /* Update header info */ + SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR, + ISCSI_ATTR_SIMPLE); if (sc->sc_data_direction == DMA_TO_DEVICE) { - if (conn->session->initial_r2t_en) { - exp_data = min((conn->session->imm_data_en * - conn->max_xmit_dlength), - conn->session->first_burst); - exp_data = min(exp_data, scsi_bufflen(sc)); - fw_task_ctx->ustorm_ag_context.exp_data_acked = - cpu_to_le32(exp_data); - } else { - fw_task_ctx->ustorm_ag_context.exp_data_acked = - min(conn->session->first_burst, scsi_bufflen(sc)); - } - - SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1); + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_WRITE, 1); task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; } else { - if (scsi_bufflen(sc)) - SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1); + SET_FIELD(cmd_pdu_header.flags_attr, + ISCSI_CMD_HDR_READ, 1); task_type = ISCSI_TASK_TYPE_INITIATOR_READ; } - fw_cmd->lun.lo = be32_to_cpu(lun[0]); - fw_cmd->lun.hi = be32_to_cpu(lun[1]); + cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); + cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); qedi_update_itt_map(qedi, tid, task->itt, cmd); - fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt)); - fw_cmd->expected_transfer_length = scsi_bufflen(sc); - fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); - fw_cmd->opcode = hdr->opcode; - qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb); - - /* Mstorm context */ - fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma; - fw_task_ctx->mstorm_st_context.sense_db.hi = - (u32)((u64)cmd->sense_buffer_dma >> 32); - fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id; - fw_task_ctx->mstorm_st_context.task_type = task_type; - - if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) { - ptu_invalidate = 1; - qedi->tid_reuse_count[tid] = 0; - } - fw_task_ctx->ystorm_st_context.state.reuse_count = - qedi->tid_reuse_count[tid]; - fw_task_ctx->mstorm_st_context.reuse_count = - qedi->tid_reuse_count[tid]++; - - /* Ustorm context */ - fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc); - fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc); - fw_task_ctx->ustorm_st_context.exp_data_sn = - be32_to_cpu(hdr->exp_statsn); - fw_task_ctx->ustorm_st_context.task_type = task_type; - fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx; - fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id; - - SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); - SET_FIELD(fw_task_ctx->ustorm_st_context.flags, - USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0); - - num_fast_sgs = (cmd->io_tbl.sge_valid ? - min((u16)QEDI_FAST_SGE_COUNT, - (u16)cmd->io_tbl.sge_valid) : 0); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs); - - fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]); - fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]); - - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n", - cmd->io_tbl.sge_valid); - - yst_cxt = &fw_task_ctx->ystorm_st_context; - mst_cxt = &fw_task_ctx->mstorm_st_context; - /* Tx path */ + cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); + cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length); + cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength); + cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn); + cmd_pdu_header.opcode = hdr->opcode; + qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb); + + /* Fill tx AHS and rx buffer */ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { - /* not considering superIO or FastIO */ - if (cmd->io_tbl.sge_valid == 1) { - cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge; - cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo; - cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi; - cached_sge->sge.sge_len = bd[0].sge_len; - qedi->cached_sgls++; - } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) { - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 1); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl; - phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma); - phys_sgl->sgl_base.hi = - (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); - phys_sgl->sgl_size = cmd->io_tbl.sge_valid; - qedi->slow_sgls++; - } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) { - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, - min((u16)QEDI_FAST_SGE_COUNT, - (u16)cmd->io_tbl.sge_valid)); - virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl; - virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma); - virt_sgl->sgl_base.hi = + tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; + tx_sgl_task_params.sgl_phys_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + tx_sgl_task_params.sgl_phys_addr.hi = (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); - virt_sgl->sgl_initial_offset = - (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1); - qedi->fast_sgls++; - } - fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid; - fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc); - } else { - /* Rx path */ - if (cmd->io_tbl.sge_valid == 1) { - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SINGLE_SGE, 1); - single_sge = &mst_cxt->sgl_union.single_sge; - single_sge->sge_addr.lo = bd[0].sge_addr.lo; - single_sge->sge_addr.hi = bd[0].sge_addr.hi; - single_sge->sge_len = bd[0].sge_len; - qedi->cached_sgls++; - } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) { - sgl_struct = &mst_cxt->sgl_union.sgl_struct; - sgl_struct->sgl_addr.lo = - (u32)(cmd->io_tbl.sge_tbl_dma); - sgl_struct->sgl_addr.hi = - (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 1); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - sgl_struct->updated_sge_size = 0; - sgl_struct->updated_sge_offset = 0; - qedi->slow_sgls++; - } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) { - sgl_struct = &mst_cxt->sgl_union.sgl_struct; - sgl_struct->sgl_addr.lo = - (u32)(cmd->io_tbl.sge_tbl_dma); - sgl_struct->sgl_addr.hi = - (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); - sgl_struct->byte_offset = - (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1); - SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags, - ISCSI_MFLAGS_SLOW_IO, 0); - SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map, - ISCSI_REG1_NUM_FAST_SGES, 0); - sgl_struct->updated_sge_size = 0; - sgl_struct->updated_sge_offset = 0; - qedi->fast_sgls++; - } - fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid; - fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc); - } - - if (cmd->io_tbl.sge_valid == 1) - /* Singel-SGL */ - qedi->use_cached_sge = true; - else { + tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); + tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; if (cmd->use_slowpath) - qedi->use_slow_sge = true; - else - qedi->use_fast_sge = true; - } + tx_sgl_task_params.small_mid_sge = true; + } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { + rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; + rx_sgl_task_params.sgl_phys_addr.lo = + (u32)(cmd->io_tbl.sge_tbl_dma); + rx_sgl_task_params.sgl_phys_addr.hi = + (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); + rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); + rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; + } + + /* Add conn param */ + conn_params.first_burst_length = conn->session->first_burst; + conn_params.max_send_pdu_length = conn->max_xmit_dlength; + conn_params.max_burst_length = conn->session->max_burst; + if (conn->session->initial_r2t_en) + conn_params.initial_r2t = true; + if (conn->session->imm_data_en) + conn_params.immediate_data = true; + + /* Add cmd params */ + cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma; + cmd_params.sense_data_buffer_phys_addr.hi = + (u32)((u64)cmd->sense_buffer_dma >> 32); + /* Fill fw input params */ + task_params.context = fw_task_ctx; + task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; + task_params.itid = tid; + task_params.cq_rss_number = cq_idx; + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) + task_params.tx_io_size = scsi_bufflen(sc); + else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) + task_params.rx_io_size = scsi_bufflen(sc); + + sq_idx = qedi_get_wqe_idx(qedi_conn); + task_params.sqe = &ep->sq[sq_idx]; + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, - "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x", + "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n", (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), - (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma), + (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc), + (u32)(cmd->io_tbl.sge_tbl_dma), (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); - /* Add command in active command list */ + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + + if (task_params.tx_io_size != 0) + ptx_sgl = &tx_sgl_task_params; + if (task_params.rx_io_size != 0) + prx_sgl = &rx_sgl_task_params; + + rval = init_initiator_rw_iscsi_task(&task_params, &conn_params, + &cmd_params, &cmd_pdu_header, + ptx_sgl, prx_sgl, + NULL); + if (rval) + return -1; + spin_lock(&qedi_conn->list_lock); list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); cmd->io_cmd_in_list = true; qedi_conn->active_cmd_count++; spin_unlock(&qedi_conn->list_lock); - qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false); qedi_ring_doorbell(qedi_conn); - if (qedi_io_tracing) - qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ); - return 0; } int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) { + struct iscsi_task_params task_params; + struct qedi_endpoint *ep; struct iscsi_conn *conn = task->conn; struct qedi_conn *qedi_conn = conn->dd_data; struct qedi_cmd *cmd = task->dd_data; - s16 ptu_invalidate = 0; + u16 sq_idx = 0; + int rval = 0; QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", cmd->task_id, get_itt(task->itt), task->state, cmd->state, qedi_conn->iscsi_conn_id); - qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true); - qedi_ring_doorbell(qedi_conn); + memset(&task_params, 0, sizeof(task_params)); + ep = qedi_conn->ep; + + sq_idx = qedi_get_wqe_idx(qedi_conn); + + task_params.sqe = &ep->sq[sq_idx]; + memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); + task_params.itid = cmd->task_id; + rval = init_cleanup_task(&task_params); + if (rval) + return rval; + + qedi_ring_doorbell(qedi_conn); return 0; } diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c new file mode 100644 index 0000000000000000000000000000000000000000..fd354d4e03ebdf7f3ff87b6fc5d2f0c8864d851b --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_api.c @@ -0,0 +1,781 @@ +/* QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include +#include +#include "qedi_hsi.h" +#include + +#include "qedi_fw_iscsi.h" +#include "qedi_fw_scsi.h" + +#define SCSI_NUM_SGES_IN_CACHE 0x4 + +static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge) +{ + return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); +} + +static +void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct scsi_sgl_task_params *sgl_task_params) +{ + u8 sge_index; + u8 num_sges; + u32 val; + + num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ? + SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges; + + /* sgl params */ + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo); + ctx_sgl_params->sgl_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi); + ctx_sgl_params->sgl_addr.hi = val; + val = cpu_to_le32(sgl_task_params->total_buffer_size); + ctx_sgl_params->sgl_total_length = val; + ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges); + + for (sge_index = 0; sge_index < num_sges; sge_index++) { + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo); + ctx_data_desc->sge[sge_index].sge_addr.lo = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi); + ctx_data_desc->sge[sge_index].sge_addr.hi = val; + val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len); + ctx_data_desc->sge[sge_index].sge_len = val; + } +} + +static u32 calc_rw_task_size(struct iscsi_task_params *task_params, + enum iscsi_task_type task_type, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_dif_task_params *dif_task_params) +{ + u32 io_size; + + if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || + task_type == ISCSI_TASK_TYPE_TARGET_READ) + io_size = task_params->tx_io_size; + else + io_size = task_params->rx_io_size; + + if (!io_size) + return 0; + + if (!dif_task_params) + return io_size; + + return !dif_task_params->dif_on_network ? + io_size : sgl_task_params->total_buffer_size; +} + +static void +init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags, + struct scsi_dif_task_params *dif_task_params) +{ + if (!dif_task_params) + return; + + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG, + dif_task_params->dif_block_size_log); + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER, + dif_task_params->dif_on_network ? 1 : 0); + SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE, + dif_task_params->dif_on_host ? 1 : 0); +} + +static void init_sqe(struct iscsi_task_params *task_params, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_dif_task_params *dif_task_params, + struct iscsi_common_hdr *pdu_header, + struct scsi_initiator_cmd_params *cmd_params, + enum iscsi_task_type task_type, + bool is_cleanup) +{ + if (!task_params->sqe) + return; + + memset(task_params->sqe, 0, sizeof(*task_params->sqe)); + task_params->sqe->task_id = cpu_to_le16(task_params->itid); + if (is_cleanup) { + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_TASK_CLEANUP); + return; + } + + switch (task_type) { + case ISCSI_TASK_TYPE_INITIATOR_WRITE: + { + u32 buf_size = 0; + u32 num_sges = 0; + + init_dif_context_flags(&task_params->sqe->prot_flags, + dif_task_params); + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + + if (task_params->tx_io_size) { + buf_size = calc_rw_task_size(task_params, task_type, + sgl_task_params, + dif_task_params); + + if (scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge)) + num_sges = ISCSI_WQE_NUM_SGES_SLOWIO; + else + num_sges = min(sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR); + } + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges); + SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, + buf_size); + + if (GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)) + SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE, + cmd_params->extended_cdb_sge.sge_len); + } + break; + case ISCSI_TASK_TYPE_INITIATOR_READ: + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_NORMAL); + + if (GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)) + SET_FIELD(task_params->sqe->contlen_cdbsize, + ISCSI_WQE_CDB_SIZE, + cmd_params->extended_cdb_sge.sge_len); + break; + case ISCSI_TASK_TYPE_LOGIN_RESPONSE: + case ISCSI_TASK_TYPE_MIDPATH: + { + bool advance_statsn = true; + + if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE) + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_LOGIN); + else + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, + ISCSI_WQE_TYPE_MIDDLE_PATH); + + if (task_type == ISCSI_TASK_TYPE_MIDPATH) { + u8 opcode = GET_FIELD(pdu_header->hdr_first_byte, + ISCSI_COMMON_HDR_OPCODE); + + if (opcode != ISCSI_OPCODE_TEXT_RESPONSE && + (opcode != ISCSI_OPCODE_NOP_IN || + pdu_header->itt == ISCSI_TTT_ALL_ONES)) + advance_statsn = false; + } + + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE, + advance_statsn ? 1 : 0); + + if (task_params->tx_io_size) { + SET_FIELD(task_params->sqe->contlen_cdbsize, + ISCSI_WQE_CONT_LEN, task_params->tx_io_size); + + if (scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge)) + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, + ISCSI_WQE_NUM_SGES_SLOWIO); + else + SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, + min(sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); + } + } + break; + default: + break; + } +} + +static void init_default_iscsi_task(struct iscsi_task_params *task_params, + struct data_hdr *pdu_header, + enum iscsi_task_type task_type) +{ + struct iscsi_task_context *context; + u16 index; + u32 val; + + context = task_params->context; + memset(context, 0, sizeof(*context)); + + for (index = 0; index < + ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data); + index++) { + val = cpu_to_le32(pdu_header->data[index]); + context->ystorm_st_context.pdu_hdr.data.data[index] = val; + } + + context->mstorm_st_context.task_type = task_type; + context->mstorm_ag_context.task_cid = + cpu_to_le16(task_params->conn_icid); + + SET_FIELD(context->ustorm_ag_context.flags1, + USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + + context->ustorm_st_context.task_type = task_type; + context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; + context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); +} + +static +void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, + struct scsi_initiator_cmd_params *cmd) +{ + union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr; + u32 val; + + if (!cmd->extended_cdb_sge.sge_len) + return; + + SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword, + ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE, + cmd->extended_cdb_sge.sge_len); + val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val; + val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val; + val = cpu_to_le32(cmd->extended_cdb_sge.sge_len); + ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val; +} + +static +void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, + struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, + u32 remaining_recv_len, + u32 expected_data_transfer_len, + u8 num_sges, bool tx_dif_conn_err_en) +{ + u32 val; + + ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len); + ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len); + val = cpu_to_le32(expected_data_transfer_len); + ustorm_st_cxt->exp_data_transfer_len = val; + SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); + SET_FIELD(ustorm_ag_cxt->flags2, + USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, + tx_dif_conn_err_en ? 1 : 0); +} + +static +void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, + struct iscsi_conn_params *conn_params, + enum iscsi_task_type task_type, + u32 task_size, + u32 exp_data_transfer_len, + u8 total_ahs_length) +{ + u32 max_unsolicited_data = 0, val; + + if (total_ahs_length && + (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE || + task_type == ISCSI_TASK_TYPE_INITIATOR_READ)) + SET_FIELD(context->ustorm_st_context.flags2, + USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1); + + switch (task_type) { + case ISCSI_TASK_TYPE_INITIATOR_WRITE: + if (!conn_params->initial_r2t) + max_unsolicited_data = conn_params->first_burst_length; + else if (conn_params->immediate_data) + max_unsolicited_data = + min(conn_params->first_burst_length, + conn_params->max_send_pdu_length); + + context->ustorm_ag_context.exp_data_acked = + cpu_to_le32(total_ahs_length == 0 ? + min(exp_data_transfer_len, + max_unsolicited_data) : + ((u32)(total_ahs_length + + ISCSI_AHS_CNTL_SIZE))); + break; + case ISCSI_TASK_TYPE_TARGET_READ: + val = cpu_to_le32(exp_data_transfer_len); + context->ustorm_ag_context.exp_data_acked = val; + break; + case ISCSI_TASK_TYPE_INITIATOR_READ: + context->ustorm_ag_context.exp_data_acked = + cpu_to_le32((total_ahs_length == 0 ? 0 : + total_ahs_length + + ISCSI_AHS_CNTL_SIZE)); + break; + case ISCSI_TASK_TYPE_TARGET_WRITE: + val = cpu_to_le32(task_size); + context->ustorm_ag_context.exp_cont_len = val; + break; + default: + break; + } +} + +static +void init_rtdif_task_context(struct rdif_task_context *rdif_context, + struct tdif_task_context *tdif_context, + struct scsi_dif_task_params *dif_task_params, + enum iscsi_task_type task_type) +{ + u32 val; + + if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host) + return; + + if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE || + task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { + rdif_context->app_tag_value = + cpu_to_le16(dif_task_params->application_tag); + rdif_context->partial_crc_value = cpu_to_le16(0xffff); + val = cpu_to_le32(dif_task_params->initial_ref_tag); + rdif_context->initial_ref_tag = val; + rdif_context->app_tag_mask = + cpu_to_le16(dif_task_params->application_tag_mask); + SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, + dif_task_params->crc_seed ? 1 : 0); + SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE, + dif_task_params->host_guard_type); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_PROTECTIONTYPE, + dif_task_params->protection_type); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1); + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_KEEPREFTAGCONST, + dif_task_params->keep_ref_tag_const ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATEAPPTAG, + (dif_task_params->validate_app_tag && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATEGUARD, + (dif_task_params->validate_guard && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_VALIDATEREFTAG, + (dif_task_params->validate_ref_tag && + dif_task_params->dif_on_network) ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_HOSTINTERFACE, + dif_task_params->dif_on_host ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_NETWORKINTERFACE, + dif_task_params->dif_on_network ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARDGUARD, + dif_task_params->forward_guard ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARDAPPTAG, + dif_task_params->forward_app_tag ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARDREFTAG, + dif_task_params->forward_ref_tag ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, + dif_task_params->forward_app_tag_with_mask ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, + dif_task_params->forward_ref_tag_with_mask ? 1 : 0); + SET_FIELD(rdif_context->flags1, + RDIF_TASK_CONTEXT_INTERVALSIZE, + dif_task_params->dif_block_size_log - 9); + SET_FIELD(rdif_context->state, + RDIF_TASK_CONTEXT_REFTAGMASK, + dif_task_params->ref_tag_mask); + SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG, + dif_task_params->ignore_app_tag); + } + + if (task_type == ISCSI_TASK_TYPE_TARGET_READ || + task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { + tdif_context->app_tag_value = + cpu_to_le16(dif_task_params->application_tag); + tdif_context->partial_crc_valueB = + cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); + tdif_context->partial_crc_value_a = + cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); + SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED, + dif_task_params->crc_seed ? 1 : 0); + + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_SETERRORWITHEOP, + dif_task_params->tx_dif_conn_err_en ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD, + dif_task_params->forward_guard ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG, + dif_task_params->forward_app_tag ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG, + dif_task_params->forward_ref_tag ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE, + dif_task_params->dif_block_size_log - 9); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE, + dif_task_params->dif_on_host ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_NETWORKINTERFACE, + dif_task_params->dif_on_network ? 1 : 0); + val = cpu_to_le32(dif_task_params->initial_ref_tag); + tdif_context->initial_ref_tag = val; + tdif_context->app_tag_mask = + cpu_to_le16(dif_task_params->application_tag_mask); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_HOSTGUARDTYPE, + dif_task_params->host_guard_type); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_PROTECTIONTYPE, + dif_task_params->protection_type); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_INITIALREFTAGVALID, + dif_task_params->initial_ref_tag_is_valid ? 1 : 0); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_KEEPREFTAGCONST, + dif_task_params->keep_ref_tag_const ? 1 : 0); + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD, + (dif_task_params->validate_guard && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATEAPPTAG, + (dif_task_params->validate_app_tag && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATEREFTAG, + (dif_task_params->validate_ref_tag && + dif_task_params->dif_on_host) ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, + dif_task_params->forward_app_tag_with_mask ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, + dif_task_params->forward_ref_tag_with_mask ? 1 : 0); + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_REFTAGMASK, + dif_task_params->ref_tag_mask); + SET_FIELD(tdif_context->flags0, + TDIF_TASK_CONTEXT_IGNOREAPPTAG, + dif_task_params->ignore_app_tag ? 1 : 0); + } +} + +static void set_local_completion_context(struct iscsi_task_context *context) +{ + SET_FIELD(context->ystorm_st_context.state.flags, + YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); + SET_FIELD(context->ustorm_st_context.flags, + USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1); +} + +static int init_rw_iscsi_task(struct iscsi_task_params *task_params, + enum iscsi_task_type task_type, + struct iscsi_conn_params *conn_params, + struct iscsi_common_hdr *pdu_header, + struct scsi_sgl_task_params *sgl_task_params, + struct scsi_initiator_cmd_params *cmd_params, + struct scsi_dif_task_params *dif_task_params) +{ + u32 exp_data_transfer_len = conn_params->max_burst_length; + struct iscsi_task_context *cxt; + bool slow_io = false; + u32 task_size, val; + u8 num_sges = 0; + + task_size = calc_rw_task_size(task_params, task_type, sgl_task_params, + dif_task_params); + + init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header, + task_type); + + cxt = task_params->context; + + val = cpu_to_le32(task_size); + cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val; + init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context, + cmd_params); + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo); + cxt->mstorm_st_context.sense_db.lo = val; + + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi); + cxt->mstorm_st_context.sense_db.hi = val; + + if (task_params->tx_io_size) { + init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags, + dif_task_params); + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + sgl_task_params); + + slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge); + + num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : + ISCSI_WQE_NUM_SGES_SLOWIO; + + if (slow_io) { + SET_FIELD(cxt->ystorm_st_context.state.flags, + YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1); + } + } else if (task_params->rx_io_size) { + init_dif_context_flags(&cxt->mstorm_st_context.dif_flags, + dif_task_params); + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + sgl_task_params); + num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge) ? + min_t(u16, sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR) : + ISCSI_WQE_NUM_SGES_SLOWIO; + cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); + } + + if (exp_data_transfer_len > task_size || + task_type != ISCSI_TASK_TYPE_TARGET_WRITE) + exp_data_transfer_len = task_size; + + init_ustorm_task_contexts(&task_params->context->ustorm_st_context, + &task_params->context->ustorm_ag_context, + task_size, exp_data_transfer_len, num_sges, + dif_task_params ? + dif_task_params->tx_dif_conn_err_en : false); + + set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params, + task_type, task_size, + exp_data_transfer_len, + GET_FIELD(pdu_header->hdr_second_dword, + ISCSI_CMD_HDR_TOTAL_AHS_LEN)); + + if (dif_task_params) + init_rtdif_task_context(&task_params->context->rdif_context, + &task_params->context->tdif_context, + dif_task_params, task_type); + + init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header, + cmd_params, task_type, false); + + return 0; +} + +int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, + struct iscsi_conn_params *conn_params, + struct scsi_initiator_cmd_params *cmd_params, + struct iscsi_cmd_hdr *cmd_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params, + struct scsi_dif_task_params *dif_task_params) +{ + if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE)) + return init_rw_iscsi_task(task_params, + ISCSI_TASK_TYPE_INITIATOR_WRITE, + conn_params, + (struct iscsi_common_hdr *)cmd_header, + tx_sgl_params, cmd_params, + dif_task_params); + else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ)) + return init_rw_iscsi_task(task_params, + ISCSI_TASK_TYPE_INITIATOR_READ, + conn_params, + (struct iscsi_common_hdr *)cmd_header, + rx_sgl_params, cmd_params, + dif_task_params); + else + return -1; +} + +int init_initiator_login_request_task(struct iscsi_task_params *task_params, + struct iscsi_login_req_hdr *login_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)login_header, + ISCSI_TASK_TYPE_MIDPATH); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, 0, + 0); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)login_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_nop_out_task(struct iscsi_task_params *task_params, + struct iscsi_nop_out_hdr *nop_out_pdu_header, + struct scsi_sgl_task_params *tx_sgl_task_params, + struct scsi_sgl_task_params *rx_sgl_task_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)nop_out_pdu_header, + ISCSI_TASK_TYPE_MIDPATH); + + if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES) + set_local_completion_context(task_params->context); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_sgl_task_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_sgl_task_params); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_sgl_task_params->total_buffer_size : 0, + 0, 0); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : + 0); + + init_sqe(task_params, tx_sgl_task_params, NULL, + (struct iscsi_common_hdr *)nop_out_pdu_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_logout_request_task(struct iscsi_task_params *task_params, + struct iscsi_logout_req_hdr *logout_hdr, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)logout_hdr, + ISCSI_TASK_TYPE_MIDPATH); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, + 0, 0); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)logout_hdr, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_tmf_request_task(struct iscsi_task_params *task_params, + struct iscsi_tmf_request_hdr *tmf_header) +{ + init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header, + ISCSI_TASK_TYPE_MIDPATH); + + init_sqe(task_params, NULL, NULL, + (struct iscsi_common_hdr *)tmf_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_initiator_text_request_task(struct iscsi_task_params *task_params, + struct iscsi_text_request_hdr *text_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params) +{ + struct iscsi_task_context *cxt; + + cxt = task_params->context; + + init_default_iscsi_task(task_params, + (struct data_hdr *)text_header, + ISCSI_TASK_TYPE_MIDPATH); + + if (task_params->tx_io_size) + init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, + &cxt->ystorm_st_context.state.data_desc, + tx_params); + + if (task_params->rx_io_size) + init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params, + &cxt->mstorm_st_context.data_desc, + rx_params); + + cxt->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_params->total_buffer_size : 0); + + init_ustorm_task_contexts(&cxt->ustorm_st_context, + &cxt->ustorm_ag_context, + task_params->rx_io_size ? + rx_params->total_buffer_size : 0, + task_params->tx_io_size ? + tx_params->total_buffer_size : 0, 0, 0); + + init_sqe(task_params, tx_params, NULL, + (struct iscsi_common_hdr *)text_header, NULL, + ISCSI_TASK_TYPE_MIDPATH, false); + + return 0; +} + +int init_cleanup_task(struct iscsi_task_params *task_params) +{ + init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH, + true); + return 0; +} diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h new file mode 100644 index 0000000000000000000000000000000000000000..b6f24f91849d16d522c13b51549b10459951fb6d --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_iscsi.h @@ -0,0 +1,117 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_FW_ISCSI_H_ +#define _QEDI_FW_ISCSI_H_ + +#include "qedi_fw_scsi.h" + +struct iscsi_task_params { + struct iscsi_task_context *context; + struct iscsi_wqe *sqe; + u32 tx_io_size; + u32 rx_io_size; + u16 conn_icid; + u16 itid; + u8 cq_rss_number; +}; + +struct iscsi_conn_params { + u32 first_burst_length; + u32 max_send_pdu_length; + u32 max_burst_length; + bool initial_r2t; + bool immediate_data; +}; + +/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param conn_params - Connection Parameters + * @param cmd_params - command specific parameters + * @param cmd_pdu_header - PDU Header Parameters + * @param sgl_task_params - Pointer to SGL task params + * @param dif_task_params - Pointer to DIF parameters struct + */ +int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params, + struct iscsi_conn_params *conn_params, + struct scsi_initiator_cmd_params *cmd_params, + struct iscsi_cmd_hdr *cmd_pdu_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params, + struct scsi_dif_task_params *dif_task_params); + +/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login + * Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param login_req_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_login_request_task(struct iscsi_task_params *task_params, + struct iscsi_login_req_hdr *login_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param nop_out_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_nop_out_task(struct iscsi_task_params *task_params, + struct iscsi_nop_out_hdr *nop_out_pdu_header, + struct scsi_sgl_task_params *tx_sgl_params, + struct scsi_sgl_task_params *rx_sgl_params); + +/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator + * Logout Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param logout_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to SGL task params + * @param rx_sgl_task_params - Pointer to SGL task params + */ +int init_initiator_logout_request_task(struct iscsi_task_params *task_params, + struct iscsi_logout_req_hdr *logout_hdr, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF + * task context. + * + * @param task_params - Pointer to task parameters struct + * @param tmf_pdu_header - PDU Header Parameters + */ +int init_initiator_tmf_request_task(struct iscsi_task_params *task_params, + struct iscsi_tmf_request_hdr *tmf_header); + +/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text + * Request task context. + * + * @param task_params - Pointer to task parameters struct + * @param text_request_pdu_header - PDU Header Parameters + * @param tx_sgl_task_params - Pointer to Tx SGL task params + * @param rx_sgl_task_params - Pointer to Rx SGL task params + */ +int init_initiator_text_request_task(struct iscsi_task_params *task_params, + struct iscsi_text_request_hdr *text_header, + struct scsi_sgl_task_params *tx_params, + struct scsi_sgl_task_params *rx_params); + +/* @brief init_cleanup_task - initializes Clean task (SQE) + * + * @param task_params - Pointer to task parameters struct + */ +int init_cleanup_task(struct iscsi_task_params *task_params); +#endif diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h new file mode 100644 index 0000000000000000000000000000000000000000..cdaf918f10198fc294bd8248daf787c42b9447a0 --- /dev/null +++ b/drivers/scsi/qedi/qedi_fw_scsi.h @@ -0,0 +1,55 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QEDI_FW_SCSI_H_ +#define _QEDI_FW_SCSI_H_ + +#include +#include +#include "qedi_hsi.h" +#include + +struct scsi_sgl_task_params { + struct scsi_sge *sgl; + struct regpair sgl_phys_addr; + u32 total_buffer_size; + u16 num_sges; + bool small_mid_sge; +}; + +struct scsi_dif_task_params { + u32 initial_ref_tag; + bool initial_ref_tag_is_valid; + u16 application_tag; + u16 application_tag_mask; + u16 dif_block_size_log; + bool dif_on_network; + bool dif_on_host; + u8 host_guard_type; + u8 protection_type; + u8 ref_tag_mask; + bool crc_seed; + bool tx_dif_conn_err_en; + bool ignore_app_tag; + bool keep_ref_tag_const; + bool validate_guard; + bool validate_app_tag; + bool validate_ref_tag; + bool forward_guard; + bool forward_app_tag; + bool forward_ref_tag; + bool forward_app_tag_with_mask; + bool forward_ref_tag_with_mask; +}; + +struct scsi_initiator_cmd_params { + struct scsi_sge extended_cdb_sge; + struct regpair sense_data_buffer_phys_addr; +}; +#endif diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 4cc474364c50568806b16520ddd66239b9f3ebfd..d1de172bebac626b9b61b2da4e1588a36519bb9d 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi, if (cmd->io_tbl.sge_tbl) dma_free_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * - sizeof(struct iscsi_sge), + sizeof(struct scsi_sge), cmd->io_tbl.sge_tbl, cmd->io_tbl.sge_tbl_dma); @@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session, struct qedi_cmd *cmd) { struct qedi_io_bdt *io = &cmd->io_tbl; - struct iscsi_sge *sge; + struct scsi_sge *sge; io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev, QEDI_ISCSI_MAX_BDS_PER_CMD * @@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn, static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn) { - struct iscsi_sge *bd_tbl; + struct scsi_sge *bd_tbl; - bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; + bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr; bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr - qedi_conn->gen_pdu.req_buf; - bd_tbl->reserved0 = 0; - bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; + bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; bd_tbl->sge_addr.hi = (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr; bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN; - bd_tbl->reserved0 = 0; } static int qedi_iscsi_send_generic_request(struct iscsi_task *task) diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index d3c06bbddb4e8195d0ba1809a1ed6ad671d402c1..3247287cb0e7e5c4b16a2ad043ec636754e77266 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -102,7 +102,7 @@ struct qedi_endpoint { #define QEDI_SQ_WQES_MIN 16 struct qedi_io_bdt { - struct iscsi_sge *sge_tbl; + struct scsi_sge *sge_tbl; dma_addr_t sge_tbl_dma; u16 sge_valid; }; diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h index 9543a1b139d4e9a00fbac1c9c8474d6bec0e7653..d61e3ac22e675bd1145003c9064e885de116d108 100644 --- a/drivers/scsi/qedi/qedi_version.h +++ b/drivers/scsi/qedi/qedi_version.h @@ -7,8 +7,8 @@ * this source tree. */ -#define QEDI_MODULE_VERSION "8.10.3.0" +#define QEDI_MODULE_VERSION "8.10.4.0" #define QEDI_DRIVER_MAJOR_VER 8 #define QEDI_DRIVER_MINOR_VER 10 -#define QEDI_DRIVER_REV_VER 3 +#define QEDI_DRIVER_REV_VER 4 #define QEDI_DRIVER_ENG_VER 0 diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 109802f776ed71cea6857eda9ae6ccc3e0b41f80..50e624fb8307922cf1cb35f244aad13f4da035df 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -111,7 +111,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) next_msg: if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) - netlink_ack(skb, nlh, err); + netlink_ack(skb, nlh, err, NULL); skb_pull(skb, rlen); } diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 78b1bb7bcf20ab1e3c39d7d3817b3ae3570825be..9fca977ef18d2fd4638132988215dd5b2d327967 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -33,17 +33,10 @@ config QCOM_SMEM The driver provides an interface to items in a heap shared among all processors in a Qualcomm platform. -config QCOM_SMD - tristate "Qualcomm Shared Memory Driver (SMD)" - depends on QCOM_SMEM - help - Say y here to enable support for the Qualcomm Shared Memory Driver - providing communication channels to remote processors in Qualcomm - platforms. - config QCOM_SMD_RPM tristate "Qualcomm Resource Power Manager (RPM) over SMD" - depends on QCOM_SMD && OF + depends on ARCH_QCOM + depends on RPMSG && OF help If you say yes to this option, support will be included for the Resource Power Manager system found in the Qualcomm 8974 based @@ -76,7 +69,8 @@ config QCOM_SMSM config QCOM_WCNSS_CTRL tristate "Qualcomm WCNSS control driver" - depends on QCOM_SMD + depends on ARCH_QCOM + depends on RPMSG help Client driver for the WCNSS_CTRL SMD channel, used to download nv firmware to a newly booted WCNSS chip. diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 1f30260b06b8f39ffa04d51e9b13b41c19c7b988..414f0de274fae462c78d188bca7369c1e4795f85 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_PM) += spm.o -obj-$(CONFIG_QCOM_SMD) += smd.o obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 6609d7e0edb045c3f26e159ee8938037de397dd2..c2346752b3eaacdc64fb30bfefd2a53bc05add83 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #define RPM_REQUEST_TIMEOUT (5 * HZ) @@ -32,7 +32,7 @@ * @ack_status: result of the rpm request */ struct qcom_smd_rpm { - struct qcom_smd_channel *rpm_channel; + struct rpmsg_endpoint *rpm_channel; struct device *dev; struct completion ack; @@ -133,7 +133,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, pkt->req.data_len = cpu_to_le32(count); memcpy(pkt->payload, buf, count); - ret = qcom_smd_send(rpm->rpm_channel, pkt, size); + ret = rpmsg_send(rpm->rpm_channel, pkt, size); if (ret) goto out; @@ -150,14 +150,16 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, } EXPORT_SYMBOL(qcom_rpm_smd_write); -static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { const struct qcom_rpm_header *hdr = data; size_t hdr_length = le32_to_cpu(hdr->length); const struct qcom_rpm_message *msg; - struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel); + struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev); const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *end = buf + hdr_length; char msgbuf[32]; @@ -196,59 +198,57 @@ static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, return 0; } -static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) +static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm; - rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL); + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; mutex_init(&rpm->lock); init_completion(&rpm->ack); - rpm->dev = &sdev->dev; - rpm->rpm_channel = sdev->channel; - qcom_smd_set_drvdata(sdev->channel, rpm); + rpm->dev = &rpdev->dev; + rpm->rpm_channel = rpdev->ept; + dev_set_drvdata(&rpdev->dev, rpm); - dev_set_drvdata(&sdev->dev, rpm); - - return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev); + return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev); } -static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev) +static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) { - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-apq8084" }, { .compatible = "qcom,rpm-msm8916" }, { .compatible = "qcom,rpm-msm8974" }, + { .compatible = "qcom,rpm-msm8996" }, {} }; MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); -static struct qcom_smd_driver qcom_smd_rpm_driver = { +static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .driver = { + .drv = { .name = "qcom_smd_rpm", - .owner = THIS_MODULE, .of_match_table = qcom_smd_rpm_of_match, }, }; static int __init qcom_smd_rpm_init(void) { - return qcom_smd_driver_register(&qcom_smd_rpm_driver); + return register_rpmsg_driver(&qcom_smd_rpm_driver); } arch_initcall(qcom_smd_rpm_init); static void __exit qcom_smd_rpm_exit(void) { - qcom_smd_driver_unregister(&qcom_smd_rpm_driver); + unregister_rpmsg_driver(&qcom_smd_rpm_driver); } module_exit(qcom_smd_rpm_exit); diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c deleted file mode 100644 index 322034ab9d374b30c38ce7e8f5ede220578cc440..0000000000000000000000000000000000000000 --- a/drivers/soc/qcom/smd.c +++ /dev/null @@ -1,1560 +0,0 @@ -/* - * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * The Qualcomm Shared Memory communication solution provides point-to-point - * channels for clients to send and receive streaming or packet based data. - * - * Each channel consists of a control item (channel info) and a ring buffer - * pair. The channel info carry information related to channel state, flow - * control and the offsets within the ring buffer. - * - * All allocated channels are listed in an allocation table, identifying the - * pair of items by name, type and remote processor. - * - * Upon creating a new channel the remote processor allocates channel info and - * ring buffer items from the smem heap and populate the allocation table. An - * interrupt is sent to the other end of the channel and a scan for new - * channels should be done. A channel never goes away, it will only change - * state. - * - * The remote processor signals it intent for bring up the communication - * channel by setting the state of its end of the channel to "opening" and - * sends out an interrupt. We detect this change and register a smd device to - * consume the channel. Upon finding a consumer we finish the handshake and the - * channel is up. - * - * Upon closing a channel, the remote processor will update the state of its - * end of the channel and signal us, we will then unregister any attached - * device and close our end of the channel. - * - * Devices attached to a channel can use the qcom_smd_send function to push - * data to the channel, this is done by copying the data into the tx ring - * buffer, updating the pointers in the channel info and signaling the remote - * processor. - * - * The remote processor does the equivalent when it transfer data and upon - * receiving the interrupt we check the channel info for new data and delivers - * this to the attached device. If the device is not ready to receive the data - * we leave it in the ring buffer for now. - */ - -struct smd_channel_info; -struct smd_channel_info_pair; -struct smd_channel_info_word; -struct smd_channel_info_word_pair; - -#define SMD_ALLOC_TBL_COUNT 2 -#define SMD_ALLOC_TBL_SIZE 64 - -/* - * This lists the various smem heap items relevant for the allocation table and - * smd channel entries. - */ -static const struct { - unsigned alloc_tbl_id; - unsigned info_base_id; - unsigned fifo_base_id; -} smem_items[SMD_ALLOC_TBL_COUNT] = { - { - .alloc_tbl_id = 13, - .info_base_id = 14, - .fifo_base_id = 338 - }, - { - .alloc_tbl_id = 266, - .info_base_id = 138, - .fifo_base_id = 202, - }, -}; - -/** - * struct qcom_smd_edge - representing a remote processor - * @dev: device for this edge - * @of_node: of_node handle for information related to this edge - * @edge_id: identifier of this edge - * @remote_pid: identifier of remote processor - * @irq: interrupt for signals on this edge - * @ipc_regmap: regmap handle holding the outgoing ipc register - * @ipc_offset: offset within @ipc_regmap of the register for ipc - * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap - * @channels: list of all channels detected on this edge - * @channels_lock: guard for modifications of @channels - * @allocated: array of bitmaps representing already allocated channels - * @smem_available: last available amount of smem triggering a channel scan - * @scan_work: work item for discovering new channels - * @state_work: work item for edge state changes - */ -struct qcom_smd_edge { - struct device dev; - - struct device_node *of_node; - unsigned edge_id; - unsigned remote_pid; - - int irq; - - struct regmap *ipc_regmap; - int ipc_offset; - int ipc_bit; - - struct list_head channels; - spinlock_t channels_lock; - - DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); - - unsigned smem_available; - - wait_queue_head_t new_channel_event; - - struct work_struct scan_work; - struct work_struct state_work; -}; - -#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) - -/* - * SMD channel states. - */ -enum smd_channel_state { - SMD_CHANNEL_CLOSED, - SMD_CHANNEL_OPENING, - SMD_CHANNEL_OPENED, - SMD_CHANNEL_FLUSHING, - SMD_CHANNEL_CLOSING, - SMD_CHANNEL_RESET, - SMD_CHANNEL_RESET_OPENING -}; - -/** - * struct qcom_smd_channel - smd channel struct - * @edge: qcom_smd_edge this channel is living on - * @qsdev: reference to a associated smd client device - * @name: name of the channel - * @state: local state of the channel - * @remote_state: remote state of the channel - * @info: byte aligned outgoing/incoming channel info - * @info_word: word aligned outgoing/incoming channel info - * @tx_lock: lock to make writes to the channel mutually exclusive - * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR - * @tx_fifo: pointer to the outgoing ring buffer - * @rx_fifo: pointer to the incoming ring buffer - * @fifo_size: size of each ring buffer - * @bounce_buffer: bounce buffer for reading wrapped packets - * @cb: callback function registered for this channel - * @recv_lock: guard for rx info modifications and cb pointer - * @pkt_size: size of the currently handled packet - * @list: lite entry for @channels in qcom_smd_edge - */ -struct qcom_smd_channel { - struct qcom_smd_edge *edge; - - struct qcom_smd_device *qsdev; - - char *name; - enum smd_channel_state state; - enum smd_channel_state remote_state; - - struct smd_channel_info_pair *info; - struct smd_channel_info_word_pair *info_word; - - struct mutex tx_lock; - wait_queue_head_t fblockread_event; - - void *tx_fifo; - void *rx_fifo; - int fifo_size; - - void *bounce_buffer; - qcom_smd_cb_t cb; - - spinlock_t recv_lock; - - int pkt_size; - - void *drvdata; - - struct list_head list; -}; - -/* - * Format of the smd_info smem items, for byte aligned channels. - */ -struct smd_channel_info { - __le32 state; - u8 fDSR; - u8 fCTS; - u8 fCD; - u8 fRI; - u8 fHEAD; - u8 fTAIL; - u8 fSTATE; - u8 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_pair { - struct smd_channel_info tx; - struct smd_channel_info rx; -}; - -/* - * Format of the smd_info smem items, for word aligned channels. - */ -struct smd_channel_info_word { - __le32 state; - __le32 fDSR; - __le32 fCTS; - __le32 fCD; - __le32 fRI; - __le32 fHEAD; - __le32 fTAIL; - __le32 fSTATE; - __le32 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_word_pair { - struct smd_channel_info_word tx; - struct smd_channel_info_word rx; -}; - -#define GET_RX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->rx.param) : \ - channel->info->rx.param; \ - }) - -#define GET_RX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->rx.param : \ - channel->info->rx.param); \ - }) - -#define SET_RX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = value; \ - }) - -#define SET_RX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = cpu_to_le32(value); \ - }) - -#define GET_TX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->tx.param) : \ - channel->info->tx.param; \ - }) - -#define GET_TX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->tx.param : \ - channel->info->tx.param); \ - }) - -#define SET_TX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = value; \ - }) - -#define SET_TX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = cpu_to_le32(value); \ - }) - -/** - * struct qcom_smd_alloc_entry - channel allocation entry - * @name: channel name - * @cid: channel index - * @flags: channel flags and edge id - * @ref_count: reference count of the channel - */ -struct qcom_smd_alloc_entry { - u8 name[20]; - __le32 cid; - __le32 flags; - __le32 ref_count; -} __packed; - -#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff -#define SMD_CHANNEL_FLAGS_STREAM BIT(8) -#define SMD_CHANNEL_FLAGS_PACKET BIT(9) - -/* - * Each smd packet contains a 20 byte header, with the first 4 being the length - * of the packet. - */ -#define SMD_PACKET_HEADER_LEN 20 - -/* - * Signal the remote processor associated with 'channel'. - */ -static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) -{ - struct qcom_smd_edge *edge = channel->edge; - - regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); -} - -/* - * Initialize the tx channel info - */ -static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) -{ - SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_TX_CHANNEL_FLAG(channel, fDSR, 0); - SET_TX_CHANNEL_FLAG(channel, fCTS, 0); - SET_TX_CHANNEL_FLAG(channel, fCD, 0); - SET_TX_CHANNEL_FLAG(channel, fRI, 0); - SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - SET_TX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - - qcom_smd_signal_channel(channel); - - channel->state = SMD_CHANNEL_CLOSED; - channel->pkt_size = 0; -} - -/* - * Set the callback for a channel, with appropriate locking - */ -static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - channel->cb = cb; - spin_unlock_irqrestore(&channel->recv_lock, flags); -}; - -/* - * Calculate the amount of data available in the rx fifo - */ -static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - - head = GET_RX_CHANNEL_INFO(channel, head); - tail = GET_RX_CHANNEL_INFO(channel, tail); - - return (head - tail) & (channel->fifo_size - 1); -} - -/* - * Set tx channel state and inform the remote processor - */ -static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, - int state) -{ - struct qcom_smd_edge *edge = channel->edge; - bool is_open = state == SMD_CHANNEL_OPENED; - - if (channel->state == state) - return; - - dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); - - SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); - SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); - SET_TX_CHANNEL_FLAG(channel, fCD, is_open); - - SET_TX_CHANNEL_INFO(channel, state, state); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - - channel->state = state; - qcom_smd_signal_channel(channel); -} - -/* - * Copy count bytes of data using 32bit accesses, if that's required. - */ -static void smd_copy_to_fifo(void __iomem *dst, - const void *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __iowrite32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_toio(dst, src, count); - } -} - -/* - * Copy count bytes of data using 32bit accesses, if that is required. - */ -static void smd_copy_from_fifo(void *dst, - const void __iomem *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __ioread32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_fromio(dst, src, count); - } -} - -/* - * Read count bytes of data from the rx fifo into buf, but don't advance the - * tail. - */ -static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, - void *buf, size_t count) -{ - bool word_aligned; - unsigned tail; - size_t len; - - word_aligned = channel->info_word; - tail = GET_RX_CHANNEL_INFO(channel, tail); - - len = min_t(size_t, count, channel->fifo_size - tail); - if (len) { - smd_copy_from_fifo(buf, - channel->rx_fifo + tail, - len, - word_aligned); - } - - if (len != count) { - smd_copy_from_fifo(buf + len, - channel->rx_fifo, - count - len, - word_aligned); - } - - return count; -} - -/* - * Advance the rx tail by count bytes. - */ -static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, - size_t count) -{ - unsigned tail; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - tail += count; - tail &= (channel->fifo_size - 1); - SET_RX_CHANNEL_INFO(channel, tail, tail); -} - -/* - * Read out a single packet from the rx fifo and deliver it to the device - */ -static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) -{ - unsigned tail; - size_t len; - void *ptr; - int ret; - - if (!channel->cb) - return 0; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - - /* Use bounce buffer if the data wraps */ - if (tail + channel->pkt_size >= channel->fifo_size) { - ptr = channel->bounce_buffer; - len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); - } else { - ptr = channel->rx_fifo + tail; - len = channel->pkt_size; - } - - ret = channel->cb(channel, ptr, len); - if (ret < 0) - return ret; - - /* Only forward the tail if the client consumed the data */ - qcom_smd_channel_advance(channel, len); - - channel->pkt_size = 0; - - return 0; -} - -/* - * Per channel interrupt handling - */ -static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) -{ - bool need_state_scan = false; - int remote_state; - __le32 pktlen; - int avail; - int ret; - - /* Handle state changes */ - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != channel->remote_state) { - channel->remote_state = remote_state; - need_state_scan = true; - } - /* Indicate that we have seen any state change */ - SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); - - /* Signal waiting qcom_smd_send() about the interrupt */ - if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) - wake_up_interruptible(&channel->fblockread_event); - - /* Don't consume any data until we've opened the channel */ - if (channel->state != SMD_CHANNEL_OPENED) - goto out; - - /* Indicate that we've seen the new data */ - SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); - - /* Consume data */ - for (;;) { - avail = qcom_smd_channel_get_rx_avail(channel); - - if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { - qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); - qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); - channel->pkt_size = le32_to_cpu(pktlen); - } else if (channel->pkt_size && avail >= channel->pkt_size) { - ret = qcom_smd_channel_recv_single(channel); - if (ret) - break; - } else { - break; - } - } - - /* Indicate that we have seen and updated tail */ - SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); - - /* Signal the remote that we've consumed the data (if requested) */ - if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - } - -out: - return need_state_scan; -} - -/* - * The edge interrupts are triggered by the remote processor on state changes, - * channel info updates or when new channels are created. - */ -static irqreturn_t qcom_smd_edge_intr(int irq, void *data) -{ - struct qcom_smd_edge *edge = data; - struct qcom_smd_channel *channel; - unsigned available; - bool kick_scanner = false; - bool kick_state = false; - - /* - * Handle state changes or data on each of the channels on this edge - */ - spin_lock(&edge->channels_lock); - list_for_each_entry(channel, &edge->channels, list) { - spin_lock(&channel->recv_lock); - kick_state |= qcom_smd_channel_intr(channel); - spin_unlock(&channel->recv_lock); - } - spin_unlock(&edge->channels_lock); - - /* - * Creating a new channel requires allocating an smem entry, so we only - * have to scan if the amount of available space in smem have changed - * since last scan. - */ - available = qcom_smem_get_free_space(edge->remote_pid); - if (available != edge->smem_available) { - edge->smem_available = available; - kick_scanner = true; - } - - if (kick_scanner) - schedule_work(&edge->scan_work); - if (kick_state) - schedule_work(&edge->state_work); - - return IRQ_HANDLED; -} - -/* - * Delivers any outstanding packets in the rx fifo, can be used after probe of - * the clients to deliver any packets that wasn't delivered before the client - * was setup. - */ -static void qcom_smd_channel_resume(struct qcom_smd_channel *channel) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - qcom_smd_channel_intr(channel); - spin_unlock_irqrestore(&channel->recv_lock, flags); -} - -/* - * Calculate how much space is available in the tx fifo. - */ -static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - unsigned mask = channel->fifo_size - 1; - - head = GET_TX_CHANNEL_INFO(channel, head); - tail = GET_TX_CHANNEL_INFO(channel, tail); - - return mask - ((head - tail) & mask); -} - -/* - * Write count bytes of data into channel, possibly wrapping in the ring buffer - */ -static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, - const void *data, - size_t count) -{ - bool word_aligned; - unsigned head; - size_t len; - - word_aligned = channel->info_word; - head = GET_TX_CHANNEL_INFO(channel, head); - - len = min_t(size_t, count, channel->fifo_size - head); - if (len) { - smd_copy_to_fifo(channel->tx_fifo + head, - data, - len, - word_aligned); - } - - if (len != count) { - smd_copy_to_fifo(channel->tx_fifo, - data + len, - count - len, - word_aligned); - } - - head += count; - head &= (channel->fifo_size - 1); - SET_TX_CHANNEL_INFO(channel, head, head); - - return count; -} - -/** - * qcom_smd_send - write data to smd channel - * @channel: channel handle - * @data: buffer of data to write - * @len: number of bytes to write - * - * This is a blocking write of len bytes into the channel's tx ring buffer and - * signal the remote end. It will sleep until there is enough space available - * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid - * polling. - */ -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) -{ - __le32 hdr[5] = { cpu_to_le32(len), }; - int tlen = sizeof(hdr) + len; - int ret; - - /* Word aligned channels only accept word size aligned data */ - if (channel->info_word && len % 4) - return -EINVAL; - - /* Reject packets that are too big */ - if (tlen >= channel->fifo_size) - return -EINVAL; - - ret = mutex_lock_interruptible(&channel->tx_lock); - if (ret) - return ret; - - while (qcom_smd_get_tx_avail(channel) < tlen) { - if (channel->state != SMD_CHANNEL_OPENED) { - ret = -EPIPE; - goto out; - } - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); - - ret = wait_event_interruptible(channel->fblockread_event, - qcom_smd_get_tx_avail(channel) >= tlen || - channel->state != SMD_CHANNEL_OPENED); - if (ret) - goto out; - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - } - - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - - qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); - qcom_smd_write_fifo(channel, data, len); - - SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); - - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - -out: - mutex_unlock(&channel->tx_lock); - - return ret; -} -EXPORT_SYMBOL(qcom_smd_send); - -static struct qcom_smd_device *to_smd_device(struct device *dev) -{ - return container_of(dev, struct qcom_smd_device, dev); -} - -static struct qcom_smd_driver *to_smd_driver(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver); -} - -static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); - const struct qcom_smd_id *match = qsdrv->smd_match_table; - const char *name = qsdev->channel->name; - - if (match) { - while (match->name[0]) { - if (!strcmp(match->name, name)) - return 1; - match++; - } - } - - return of_driver_match_device(dev, drv); -} - -/* - * Helper for opening a channel - */ -static int qcom_smd_channel_open(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - size_t bb_size; - - /* - * Packets are maximum 4k, but reduce if the fifo is smaller - */ - bb_size = min(channel->fifo_size, SZ_4K); - channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); - if (!channel->bounce_buffer) - return -ENOMEM; - - qcom_smd_channel_set_callback(channel, cb); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); - - return 0; -} - -/* - * Helper for closing and resetting a channel - */ -static void qcom_smd_channel_close(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_set_callback(channel, NULL); - - kfree(channel->bounce_buffer); - channel->bounce_buffer = NULL; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); - qcom_smd_channel_reset(channel); -} - -/* - * Probe the smd client. - * - * The remote side have indicated that it want the channel to be opened, so - * complete the state handshake and probe our client driver. - */ -static int qcom_smd_dev_probe(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - int ret; - - ret = qcom_smd_channel_open(channel, qsdrv->callback); - if (ret) - return ret; - - ret = qsdrv->probe(qsdev); - if (ret) - goto err; - - qcom_smd_channel_resume(channel); - - return 0; - -err: - dev_err(&qsdev->dev, "probe failed\n"); - - qcom_smd_channel_close(channel); - return ret; -} - -/* - * Remove the smd client. - * - * The channel is going away, for some reason, so remove the smd client and - * reset the channel state. - */ -static int qcom_smd_dev_remove(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); - - /* - * Make sure we don't race with the code receiving data. - */ - qcom_smd_channel_set_callback(channel, NULL); - - /* Wake up any sleepers in qcom_smd_send() */ - wake_up_interruptible(&channel->fblockread_event); - - /* - * We expect that the client might block in remove() waiting for any - * outstanding calls to qcom_smd_send() to wake up and finish. - */ - if (qsdrv->remove) - qsdrv->remove(qsdev); - - /* The client is now gone, close the primary channel */ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; - - return 0; -} - -static struct bus_type qcom_smd_bus = { - .name = "qcom_smd", - .match = qcom_smd_dev_match, - .probe = qcom_smd_dev_probe, - .remove = qcom_smd_dev_remove, -}; - -/* - * Release function for the qcom_smd_device object. - */ -static void qcom_smd_release_device(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - kfree(qsdev); -} - -/* - * Finds the device_node for the smd child interested in this channel. - */ -static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, - const char *channel) -{ - struct device_node *child; - const char *name; - const char *key; - int ret; - - for_each_available_child_of_node(edge_node, child) { - key = "qcom,smd-channels"; - ret = of_property_read_string(child, key, &name); - if (ret) - continue; - - if (strcmp(name, channel) == 0) - return child; - } - - return NULL; -} - -/* - * Create a smd client device for channel that is being opened. - */ -static int qcom_smd_create_device(struct qcom_smd_channel *channel) -{ - struct qcom_smd_device *qsdev; - struct qcom_smd_edge *edge = channel->edge; - struct device_node *node; - int ret; - - if (channel->qsdev) - return -EEXIST; - - dev_dbg(&edge->dev, "registering '%s'\n", channel->name); - - qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); - if (!qsdev) - return -ENOMEM; - - node = qcom_smd_match_channel(edge->of_node, channel->name); - dev_set_name(&qsdev->dev, "%s.%s", - edge->of_node->name, - node ? node->name : channel->name); - - qsdev->dev.parent = &edge->dev; - qsdev->dev.bus = &qcom_smd_bus; - qsdev->dev.release = qcom_smd_release_device; - qsdev->dev.of_node = node; - - qsdev->channel = channel; - - channel->qsdev = qsdev; - - ret = device_register(&qsdev->dev); - if (ret) { - dev_err(&edge->dev, "device_register failed: %d\n", ret); - put_device(&qsdev->dev); - } - - return ret; -} - -/* - * Destroy a smd client device for a channel that's going away. - */ -static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) -{ - struct device *dev; - - BUG_ON(!channel->qsdev); - - dev = &channel->qsdev->dev; - - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); -} - -/** - * qcom_smd_driver_register - register a smd driver - * @qsdrv: qcom_smd_driver struct - */ -int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) -{ - qsdrv->driver.bus = &qcom_smd_bus; - return driver_register(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_register); - -void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) -{ - return channel->drvdata; -} -EXPORT_SYMBOL(qcom_smd_get_drvdata); - -void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) -{ - channel->drvdata = data; -} -EXPORT_SYMBOL(qcom_smd_set_drvdata); - -/** - * qcom_smd_driver_unregister - unregister a smd driver - * @qsdrv: qcom_smd_driver struct - */ -void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) -{ - driver_unregister(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_unregister); - -static struct qcom_smd_channel * -qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_channel *ret = NULL; - unsigned long flags; - unsigned state; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (strcmp(channel->name, name)) - continue; - - state = GET_RX_CHANNEL_INFO(channel, state); - if (state != SMD_CHANNEL_OPENING && - state != SMD_CHANNEL_OPENED) - continue; - - ret = channel; - break; - } - spin_unlock_irqrestore(&edge->channels_lock, flags); - - return ret; -} - -/** - * qcom_smd_open_channel() - claim additional channels on the same edge - * @sdev: smd_device handle - * @name: channel name - * @cb: callback method to use for incoming data - * - * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't - * ready. - * - * Any channels returned must be closed with a call to qcom_smd_close_channel() - */ -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, - const char *name, - qcom_smd_cb_t cb) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_device *sdev = parent->qsdev; - struct qcom_smd_edge *edge = parent->edge; - int ret; - - /* Wait up to HZ for the channel to appear */ - ret = wait_event_interruptible_timeout(edge->new_channel_event, - (channel = qcom_smd_find_channel(edge, name)) != NULL, - HZ); - if (!ret) - return ERR_PTR(-ETIMEDOUT); - - if (channel->state != SMD_CHANNEL_CLOSED) { - dev_err(&sdev->dev, "channel %s is busy\n", channel->name); - return ERR_PTR(-EBUSY); - } - - channel->qsdev = sdev; - ret = qcom_smd_channel_open(channel, cb); - if (ret) { - channel->qsdev = NULL; - return ERR_PTR(ret); - } - - return channel; -} -EXPORT_SYMBOL(qcom_smd_open_channel); - -/** - * qcom_smd_close_channel() - close an additionally opened channel - * @channel: channel handle, returned by qcom_smd_open_channel() - */ -void qcom_smd_close_channel(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; -} -EXPORT_SYMBOL(qcom_smd_close_channel); - -/* - * Allocate the qcom_smd_channel object for a newly found smd channel, - * retrieving and validating the smem items involved. - */ -static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, - unsigned smem_info_item, - unsigned smem_fifo_item, - char *name) -{ - struct qcom_smd_channel *channel; - size_t fifo_size; - size_t info_size; - void *fifo_base; - void *info; - int ret; - - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); - if (!channel) - return ERR_PTR(-ENOMEM); - - channel->edge = edge; - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); - if (!channel->name) - return ERR_PTR(-ENOMEM); - - mutex_init(&channel->tx_lock); - spin_lock_init(&channel->recv_lock); - init_waitqueue_head(&channel->fblockread_event); - - info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto free_name_and_channel; - } - - /* - * Use the size of the item to figure out which channel info struct to - * use. - */ - if (info_size == 2 * sizeof(struct smd_channel_info_word)) { - channel->info_word = info; - } else if (info_size == 2 * sizeof(struct smd_channel_info)) { - channel->info = info; - } else { - dev_err(&edge->dev, - "channel info of size %zu not supported\n", info_size); - ret = -EINVAL; - goto free_name_and_channel; - } - - fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); - if (IS_ERR(fifo_base)) { - ret = PTR_ERR(fifo_base); - goto free_name_and_channel; - } - - /* The channel consist of a rx and tx fifo of equal size */ - fifo_size /= 2; - - dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", - name, info_size, fifo_size); - - channel->tx_fifo = fifo_base; - channel->rx_fifo = fifo_base + fifo_size; - channel->fifo_size = fifo_size; - - qcom_smd_channel_reset(channel); - - return channel; - -free_name_and_channel: - devm_kfree(&edge->dev, channel->name); - devm_kfree(&edge->dev, channel); - - return ERR_PTR(ret); -} - -/* - * Scans the allocation table for any newly allocated channels, calls - * qcom_smd_create_channel() to create representations of these and add - * them to the edge's list of channels. - */ -static void qcom_channel_scan_worker(struct work_struct *work) -{ - struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); - struct qcom_smd_alloc_entry *alloc_tbl; - struct qcom_smd_alloc_entry *entry; - struct qcom_smd_channel *channel; - unsigned long flags; - unsigned fifo_id; - unsigned info_id; - int tbl; - int i; - u32 eflags, cid; - - for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { - alloc_tbl = qcom_smem_get(edge->remote_pid, - smem_items[tbl].alloc_tbl_id, NULL); - if (IS_ERR(alloc_tbl)) - continue; - - for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { - entry = &alloc_tbl[i]; - eflags = le32_to_cpu(entry->flags); - if (test_bit(i, edge->allocated[tbl])) - continue; - - if (entry->ref_count == 0) - continue; - - if (!entry->name[0]) - continue; - - if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) - continue; - - if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) - continue; - - cid = le32_to_cpu(entry->cid); - info_id = smem_items[tbl].info_base_id + cid; - fifo_id = smem_items[tbl].fifo_base_id + cid; - - channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); - if (IS_ERR(channel)) - continue; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_add(&channel->list, &edge->channels); - spin_unlock_irqrestore(&edge->channels_lock, flags); - - dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); - set_bit(i, edge->allocated[tbl]); - - wake_up_interruptible(&edge->new_channel_event); - } - } - - schedule_work(&edge->state_work); -} - -/* - * This per edge worker scans smem for any new channels and register these. It - * then scans all registered channels for state changes that should be handled - * by creating or destroying smd client devices for the registered channels. - * - * LOCKING: edge->channels_lock only needs to cover the list operations, as the - * worker is killed before any channels are deallocated - */ -static void qcom_channel_state_worker(struct work_struct *work) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = container_of(work, - struct qcom_smd_edge, - state_work); - unsigned remote_state; - unsigned long flags; - - /* - * Register a device for any closed channel where the remote processor - * is showing interest in opening the channel. - */ - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_CLOSED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != SMD_CHANNEL_OPENING && - remote_state != SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_create_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - - /* - * Unregister the device for any channel that is opened where the - * remote processor is closing the channel. - */ - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_OPENING && - channel->state != SMD_CHANNEL_OPENED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state == SMD_CHANNEL_OPENING || - remote_state == SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_destroy_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - spin_unlock_irqrestore(&edge->channels_lock, flags); -} - -/* - * Parses an of_node describing an edge. - */ -static int qcom_smd_parse_edge(struct device *dev, - struct device_node *node, - struct qcom_smd_edge *edge) -{ - struct device_node *syscon_np; - const char *key; - int irq; - int ret; - - INIT_LIST_HEAD(&edge->channels); - spin_lock_init(&edge->channels_lock); - - INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); - INIT_WORK(&edge->state_work, qcom_channel_state_worker); - - edge->of_node = of_node_get(node); - - key = "qcom,smd-edge"; - ret = of_property_read_u32(node, key, &edge->edge_id); - if (ret) { - dev_err(dev, "edge missing %s property\n", key); - return -EINVAL; - } - - edge->remote_pid = QCOM_SMEM_HOST_ANY; - key = "qcom,remote-pid"; - of_property_read_u32(node, key, &edge->remote_pid); - - syscon_np = of_parse_phandle(node, "qcom,ipc", 0); - if (!syscon_np) { - dev_err(dev, "no qcom,ipc node\n"); - return -ENODEV; - } - - edge->ipc_regmap = syscon_node_to_regmap(syscon_np); - if (IS_ERR(edge->ipc_regmap)) - return PTR_ERR(edge->ipc_regmap); - - key = "qcom,ipc"; - ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); - if (ret < 0) { - dev_err(dev, "no offset in %s\n", key); - return -EINVAL; - } - - ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); - if (ret < 0) { - dev_err(dev, "no bit in %s\n", key); - return -EINVAL; - } - - irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { - dev_err(dev, "required smd interrupt missing\n"); - return -EINVAL; - } - - ret = devm_request_irq(dev, irq, - qcom_smd_edge_intr, IRQF_TRIGGER_RISING, - node->name, edge); - if (ret) { - dev_err(dev, "failed to request smd irq\n"); - return ret; - } - - edge->irq = irq; - - return 0; -} - -/* - * Release function for an edge. - * Reset the state of each associated channel and free the edge context. - */ -static void qcom_smd_edge_release(struct device *dev) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = to_smd_edge(dev); - - list_for_each_entry(channel, &edge->channels, list) { - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_RX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - } - - kfree(edge); -} - -/** - * qcom_smd_register_edge() - register an edge based on an device_node - * @parent: parent device for the edge - * @node: device_node describing the edge - * - * Returns an edge reference, or negative ERR_PTR() on failure. - */ -struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, - struct device_node *node) -{ - struct qcom_smd_edge *edge; - int ret; - - edge = kzalloc(sizeof(*edge), GFP_KERNEL); - if (!edge) - return ERR_PTR(-ENOMEM); - - init_waitqueue_head(&edge->new_channel_event); - - edge->dev.parent = parent; - edge->dev.release = qcom_smd_edge_release; - dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); - ret = device_register(&edge->dev); - if (ret) { - pr_err("failed to register smd edge\n"); - return ERR_PTR(ret); - } - - ret = qcom_smd_parse_edge(&edge->dev, node, edge); - if (ret) { - dev_err(&edge->dev, "failed to parse smd edge\n"); - goto unregister_dev; - } - - schedule_work(&edge->scan_work); - - return edge; - -unregister_dev: - put_device(&edge->dev); - return ERR_PTR(ret); -} -EXPORT_SYMBOL(qcom_smd_register_edge); - -static int qcom_smd_remove_device(struct device *dev, void *data) -{ - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); - - return 0; -} - -/** - * qcom_smd_unregister_edge() - release an edge and its children - * @edge: edge reference acquired from qcom_smd_register_edge - */ -int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) -{ - int ret; - - disable_irq(edge->irq); - cancel_work_sync(&edge->scan_work); - cancel_work_sync(&edge->state_work); - - ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); - if (ret) - dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); - - device_unregister(&edge->dev); - - return 0; -} -EXPORT_SYMBOL(qcom_smd_unregister_edge); - -static int qcom_smd_probe(struct platform_device *pdev) -{ - struct device_node *node; - void *p; - - /* Wait for smem */ - p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); - if (PTR_ERR(p) == -EPROBE_DEFER) - return PTR_ERR(p); - - for_each_available_child_of_node(pdev->dev.of_node, node) - qcom_smd_register_edge(&pdev->dev, node); - - return 0; -} - -static int qcom_smd_remove_edge(struct device *dev, void *data) -{ - struct qcom_smd_edge *edge = to_smd_edge(dev); - - return qcom_smd_unregister_edge(edge); -} - -/* - * Shut down all smd clients by making sure that each edge stops processing - * events and scanning for new channels, then call destroy on the devices. - */ -static int qcom_smd_remove(struct platform_device *pdev) -{ - int ret; - - ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); - if (ret) - dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); - - return ret; -} - -static const struct of_device_id qcom_smd_of_match[] = { - { .compatible = "qcom,smd" }, - {} -}; -MODULE_DEVICE_TABLE(of, qcom_smd_of_match); - -static struct platform_driver qcom_smd_driver = { - .probe = qcom_smd_probe, - .remove = qcom_smd_remove, - .driver = { - .name = "qcom-smd", - .of_match_table = qcom_smd_of_match, - }, -}; - -static int __init qcom_smd_init(void) -{ - int ret; - - ret = bus_register(&qcom_smd_bus); - if (ret) { - pr_err("failed to register smd bus: %d\n", ret); - return ret; - } - - return platform_driver_register(&qcom_smd_driver); -} -postcore_initcall(qcom_smd_init); - -static void __exit qcom_smd_exit(void) -{ - platform_driver_unregister(&qcom_smd_driver); - bus_unregister(&qcom_smd_bus); -} -module_exit(qcom_smd_exit); - -MODULE_AUTHOR("Bjorn Andersson "); -MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 520aedd29965498295b3992e4c8c76d2ea6b8c04..b9069184df193f34aa6d9c8bfa4f6502d6764574 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -14,10 +14,10 @@ #include #include #include -#include #include #include #include +#include #include #define WCNSS_REQUEST_TIMEOUT (5 * HZ) @@ -40,7 +40,7 @@ */ struct wcnss_ctrl { struct device *dev; - struct qcom_smd_channel *channel; + struct rpmsg_endpoint *channel; struct completion ack; struct completion cbc; @@ -122,11 +122,13 @@ struct wcnss_download_nv_resp { * * Handles any incoming packets from the remote WCNSS_CTRL service. */ -static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); const struct wcnss_download_nv_resp *nvresp; const struct wcnss_version_resp *version; const struct wcnss_msg_hdr *hdr = data; @@ -180,7 +182,7 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss) msg.type = WCNSS_VERSION_REQ; msg.len = sizeof(msg); - ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg)); + ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg)); if (ret < 0) return ret; @@ -238,7 +240,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) memcpy(req->fragment, data, req->frag_size); - ret = qcom_smd_send(wcnss->channel, req, req->hdr.len); + ret = rpmsg_send(wcnss->channel, req, req->hdr.len); if (ret < 0) { dev_err(wcnss->dev, "failed to send smd packet\n"); goto release_fw; @@ -274,11 +276,16 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) * @name: SMD channel name * @cb: callback to handle incoming data on the channel */ -struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv) { + struct rpmsg_channel_info chinfo; struct wcnss_ctrl *_wcnss = wcnss; - return qcom_smd_open_channel(_wcnss->channel, name, cb); + strncpy(chinfo.name, name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo); } EXPORT_SYMBOL(qcom_wcnss_open_channel); @@ -306,35 +313,34 @@ static void wcnss_async_probe(struct work_struct *work) of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev); } -static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) +static int wcnss_ctrl_probe(struct rpmsg_device *rpdev) { struct wcnss_ctrl *wcnss; - wcnss = devm_kzalloc(&sdev->dev, sizeof(*wcnss), GFP_KERNEL); + wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL); if (!wcnss) return -ENOMEM; - wcnss->dev = &sdev->dev; - wcnss->channel = sdev->channel; + wcnss->dev = &rpdev->dev; + wcnss->channel = rpdev->ept; init_completion(&wcnss->ack); init_completion(&wcnss->cbc); INIT_WORK(&wcnss->probe_work, wcnss_async_probe); - qcom_smd_set_drvdata(sdev->channel, wcnss); - dev_set_drvdata(&sdev->dev, wcnss); + dev_set_drvdata(&rpdev->dev, wcnss); schedule_work(&wcnss->probe_work); return 0; } -static void wcnss_ctrl_remove(struct qcom_smd_device *sdev) +static void wcnss_ctrl_remove(struct rpmsg_device *rpdev) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); cancel_work_sync(&wcnss->probe_work); - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id wcnss_ctrl_of_match[] = { @@ -342,18 +348,18 @@ static const struct of_device_id wcnss_ctrl_of_match[] = { {} }; -static struct qcom_smd_driver wcnss_ctrl_driver = { +static struct rpmsg_driver wcnss_ctrl_driver = { .probe = wcnss_ctrl_probe, .remove = wcnss_ctrl_remove, .callback = wcnss_ctrl_smd_callback, - .driver = { + .drv = { .name = "qcom_wcnss_ctrl", .owner = THIS_MODULE, .of_match_table = wcnss_ctrl_of_match, }, }; -module_qcom_smd_driver(wcnss_ctrl_driver); +module_rpmsg_driver(wcnss_ctrl_driver); MODULE_DESCRIPTION("Qualcomm WCNSS control driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 7961d1c56847392c6d977e065cab76f435ea31de..2b4536318ca62b6bb464c2c16dcde34b6540e0c0 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -1837,7 +1837,7 @@ static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, } static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, struct vif_params *params) + enum nl80211_iftype type, struct vif_params *params) { struct wilc_priv *priv; struct wilc_vif *vif; @@ -2099,7 +2099,6 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) { struct wilc_vif *vif; diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 11870cb3f2548e28df9018918bda3a72e6f1e2be..178f6f5d4613daa4468815649ed438d2b51cdff2 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -100,7 +100,7 @@ static int prism2_domibset_pstr32(struct wlandevice *wlandev, /* The interface functions, called by the cfg80211 layer */ static int prism2_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct wlandevice *wlandev = dev->ml_priv; @@ -666,8 +666,11 @@ void prism2_disconnected(struct wlandevice *wlandev) void prism2_roamed(struct wlandevice *wlandev) { - cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid, - NULL, 0, NULL, 0, GFP_KERNEL); + struct cfg80211_roam_info roam_info = { + .bssid = wlandev->bssid, + }; + + cfg80211_roamed(wlandev->netdev, &roam_info, GFP_KERNEL); } /* Structures for declaring wiphy interface */ diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index f4c6c90add7882e92e43cea88dd2cc0d436fb21b..1e1cbae3a0ead04e2e82e847459b39b257145d0f 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -173,6 +173,39 @@ void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable) } EXPORT_SYMBOL_GPL(serdev_device_set_flow_control); +void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->wait_until_sent) + return; + + ctrl->ops->wait_until_sent(ctrl, timeout); +} +EXPORT_SYMBOL_GPL(serdev_device_wait_until_sent); + +int serdev_device_get_tiocm(struct serdev_device *serdev) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->get_tiocm) + return -ENOTSUPP; + + return ctrl->ops->get_tiocm(ctrl); +} +EXPORT_SYMBOL_GPL(serdev_device_get_tiocm); + +int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->set_tiocm) + return -ENOTSUPP; + + return ctrl->ops->set_tiocm(ctrl, set, clear); +} +EXPORT_SYMBOL_GPL(serdev_device_set_tiocm); + static int serdev_drv_probe(struct device *dev) { const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver); diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index d05393594f1505b4e82686e417114adfcf1d1e80..487c88f6aa0e36b5f986f5c02477e65f0f55143c 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -14,6 +14,7 @@ #include #include #include +#include #define SERPORT_ACTIVE 1 @@ -46,11 +47,11 @@ static void ttyport_write_wakeup(struct tty_port *port) struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); - if (!test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags)) - return; - - if (test_bit(SERPORT_ACTIVE, &serport->flags)) + if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) && + test_bit(SERPORT_ACTIVE, &serport->flags)) serdev_controller_write_wakeup(ctrl); + + wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT); } static const struct tty_port_client_operations client_ops = { @@ -167,6 +168,36 @@ static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable tty_set_termios(tty, &ktermios); } +static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout) +{ + struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty = serport->tty; + + tty_wait_until_sent(tty, timeout); +} + +static int ttyport_get_tiocm(struct serdev_controller *ctrl) +{ + struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty = serport->tty; + + if (!tty->ops->tiocmget) + return -ENOTSUPP; + + return tty->driver->ops->tiocmget(tty); +} + +static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, unsigned int clear) +{ + struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty = serport->tty; + + if (!tty->ops->tiocmset) + return -ENOTSUPP; + + return tty->driver->ops->tiocmset(tty, set, clear); +} + static const struct serdev_controller_ops ctrl_ops = { .write_buf = ttyport_write_buf, .write_flush = ttyport_write_flush, @@ -175,6 +206,9 @@ static const struct serdev_controller_ops ctrl_ops = { .close = ttyport_close, .set_flow_control = ttyport_set_flow_control, .set_baudrate = ttyport_set_baudrate, + .wait_until_sent = ttyport_wait_until_sent, + .get_tiocm = ttyport_get_tiocm, + .set_tiocm = ttyport_set_tiocm, }; struct device *serdev_tty_port_register(struct tty_port *port, diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 6c6f82ad8d5cb63982fc35c9047e64a8a18135a0..a4734649a0f0f8b0ea3de17c5c26c3a2acc54c19 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1597,6 +1597,9 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev) of_property_read_u32(dev->of_node, "clock-frequency", &omap_up_info->uartclk); + + omap_up_info->flags = UPF_BOOT_AUTOCONF; + return omap_up_info; } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index c6fc7141d7b2814cc122d430c38d61bbde675358..677f0ddc986c11005dc9469dc0fb3adf96d1f5a5 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -446,7 +446,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { */ NULL, /* a */ &sysrq_reboot_op, /* b */ - &sysrq_crash_op, /* c & ibm_emac driver debug */ + &sysrq_crash_op, /* c */ &sysrq_showlocks_op, /* d */ &sysrq_term_op, /* e */ &sysrq_moom_op, /* f */ diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 224717e63a5300970867a663cd030d8cd62068f6..864819ff9a7d362962eb837755886423b57906fb 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -16,6 +16,7 @@ */ #include +#include #include #include #include diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 44eed8eb0725b25e3c9765e19387e7c338ab9bbb..d939ac1a499747ed80dd060a5a2984686375359e 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -176,6 +176,11 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, restart_tx = true; } + /* Deliver to monitoring devices all correctly transmitted + * packets. + */ + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_free_pkt(pkt); } if (added) @@ -383,6 +388,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) len = pkt->len; + /* Deliver to monitoring devices all received packets */ + virtio_transport_deliver_tap_pkt(pkt); + /* Only accept correctly addressed packets */ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) virtio_transport_recv_pkt(pkt); diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8f76b13d55494bddec9e81203c0734a0f6d811d7..d5990eb160bdf49a3a916c0195d04fbe521ef2b2 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -419,7 +419,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, call->state = AFS_CALL_COMPLETE; if (ret != -ECONNABORTED) { rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, - -ret, "KSD"); + ret, "KSD"); } else { abort_code = 0; offset = 0; @@ -478,12 +478,12 @@ static void afs_deliver_to_call(struct afs_call *call) case -ENOTCONN: abort_code = RX_CALL_DEAD; rxrpc_kernel_abort_call(afs_socket, call->rxcall, - abort_code, -ret, "KNC"); + abort_code, ret, "KNC"); goto save_error; case -ENOTSUPP: abort_code = RXGEN_OPCODE; rxrpc_kernel_abort_call(afs_socket, call->rxcall, - abort_code, -ret, "KIV"); + abort_code, ret, "KIV"); goto save_error; case -ENODATA: case -EBADMSG: @@ -493,7 +493,7 @@ static void afs_deliver_to_call(struct afs_call *call) if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; rxrpc_kernel_abort_call(afs_socket, call->rxcall, - abort_code, EBADMSG, "KUM"); + abort_code, -EBADMSG, "KUM"); goto save_error; } } @@ -754,7 +754,7 @@ void afs_send_empty_reply(struct afs_call *call) case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(afs_socket, call->rxcall, - RX_USER_ABORT, ENOMEM, "KOO"); + RX_USER_ABORT, -ENOMEM, "KOO"); default: _leave(" [error]"); return; @@ -792,7 +792,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(afs_socket, call->rxcall, - RX_USER_ABORT, ENOMEM, "KOO"); + RX_USER_ABORT, -ENOMEM, "KOO"); } _leave(" [error]"); } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 34fee9fb7e4fe2e27b19e6b10f2496fb944b6b13..d0d11b73b2afdd5c150e5394623c478bdf9bd824 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include "cifsfs.h" diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0f2781677cccd2063fb9abf5fb2121c8c9bce3c4..9bc0b4d6d0658d6e7c6fa267003b3caaad672bad 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 02da648041fcd58d6e37431a60d596a5bfd4ca06..fb0da096c2ce48b40828aeedd6359d7d92977d2e 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include "smb2pdu.h" diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 341251421ced00ab1be854c4145cf408cfc93c2f..5420767c9b686a7a9db30bbf932e85071c83c9f6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * LOCKING: @@ -224,6 +225,11 @@ struct eventpoll { /* used to optimize loop detection check */ int visited; struct list_head visited_list_link; + +#ifdef CONFIG_NET_RX_BUSY_POLL + /* used to track busy poll napi_id */ + unsigned int napi_id; +#endif }; /* Wait structure used by the poll hooks */ @@ -384,6 +390,77 @@ static inline int ep_events_available(struct eventpoll *ep) return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; } +#ifdef CONFIG_NET_RX_BUSY_POLL +static bool ep_busy_loop_end(void *p, unsigned long start_time) +{ + struct eventpoll *ep = p; + + return ep_events_available(ep) || busy_loop_timeout(start_time); +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* + * Busy poll if globally on and supporting sockets found && no events, + * busy loop will return if need_resched or ep_events_available. + * + * we must do our busy polling with irqs enabled + */ +static void ep_busy_loop(struct eventpoll *ep, int nonblock) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int napi_id = READ_ONCE(ep->napi_id); + + if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) + napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep); +#endif +} + +static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + if (ep->napi_id) + ep->napi_id = 0; +#endif +} + +/* + * Set epoll busy poll NAPI ID from sk. + */ +static inline void ep_set_busy_poll_napi_id(struct epitem *epi) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + struct eventpoll *ep; + unsigned int napi_id; + struct socket *sock; + struct sock *sk; + int err; + + if (!net_busy_loop_on()) + return; + + sock = sock_from_file(epi->ffd.file, &err); + if (!sock) + return; + + sk = sock->sk; + if (!sk) + return; + + napi_id = READ_ONCE(sk->sk_napi_id); + ep = epi->ep; + + /* Non-NAPI IDs can be rejected + * or + * Nothing to do if we already have this ID + */ + if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id) + return; + + /* record NAPI ID for use in next busy poll */ + ep->napi_id = napi_id; +#endif +} + /** * ep_call_nested - Perform a bound (possibly) nested call, by checking * that the recursion limit is not exceeded, and that @@ -1022,6 +1099,8 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k spin_lock_irqsave(&ep->lock, flags); + ep_set_busy_poll_napi_id(epi); + /* * If the event mask does not contain any poll(2) event, we consider the * descriptor to be disabled. This condition is likely the effect of the @@ -1363,6 +1442,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); + /* record NAPI ID of new item if present */ + ep_set_busy_poll_napi_id(epi); + /* If the file is already "ready" we drop it inside the ready list */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); @@ -1637,9 +1719,20 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, } fetch_events: + + if (!ep_events_available(ep)) + ep_busy_loop(ep, timed_out); + spin_lock_irqsave(&ep->lock, flags); if (!ep_events_available(ep)) { + /* + * Busy poll timed out. Drop NAPI ID for now, we can add + * it back in when we have moved a socket with a valid NAPI + * ID onto the ready list. + */ + ep_reset_busy_poll_napi_id(ep); + /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by diff --git a/fs/select.c b/fs/select.c index dd70937ddb60a6ece08de1b761b23908335c4578..bd4b2ccfd34680b1648570e7b20db235cb31a89c 100644 --- a/fs/select.c +++ b/fs/select.c @@ -456,7 +456,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) int retval, i, timed_out = 0; u64 slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; - unsigned long busy_end = 0; + unsigned long busy_start = 0; rcu_read_lock(); retval = max_select_fd(n, fds); @@ -559,11 +559,11 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) /* only if found POLL_BUSY_LOOP sockets && not out of time */ if (can_busy_loop && !need_resched()) { - if (!busy_end) { - busy_end = busy_loop_end_time(); + if (!busy_start) { + busy_start = busy_loop_current_time(); continue; } - if (!busy_loop_timeout(busy_end)) + if (!busy_loop_timeout(busy_start)) continue; } busy_flag = 0; @@ -847,7 +847,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, int timed_out = 0, count = 0; u64 slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; - unsigned long busy_end = 0; + unsigned long busy_start = 0; /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -900,11 +900,11 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, /* only if found POLL_BUSY_LOOP sockets && not out of time */ if (can_busy_loop && !need_resched()) { - if (!busy_end) { - busy_end = busy_loop_end_time(); + if (!busy_start) { + busy_start = busy_loop_current_time(); continue; } - if (!busy_loop_timeout(busy_end)) + if (!busy_loop_timeout(busy_start)) continue; } busy_flag = 0; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 909fc033173a7c893ffe7113f0e32568392b76ae..6bb38d76faf42a18437eb7565380b75a71096f78 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -35,6 +35,7 @@ struct bpf_map_ops { void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); void (*map_fd_put_ptr)(void *ptr); + u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); }; struct bpf_map { @@ -49,12 +50,7 @@ struct bpf_map { const struct bpf_map_ops *ops; struct work_struct work; atomic_t usercnt; -}; - -struct bpf_map_type_list { - struct list_head list_node; - const struct bpf_map_ops *ops; - enum bpf_map_type type; + struct bpf_map *inner_map_meta; }; /* function argument constraints */ @@ -167,12 +163,8 @@ struct bpf_verifier_ops { const struct bpf_insn *src, struct bpf_insn *dst, struct bpf_prog *prog); -}; - -struct bpf_prog_type_list { - struct list_head list_node; - const struct bpf_verifier_ops *ops; - enum bpf_prog_type type; + int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); }; struct bpf_prog_aux { @@ -231,11 +223,21 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr); + #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); -void bpf_register_prog_type(struct bpf_prog_type_list *tl); -void bpf_register_map_type(struct bpf_map_type_list *tl); +#define BPF_PROG_TYPE(_id, _ops) \ + extern const struct bpf_verifier_ops _ops; +#define BPF_MAP_TYPE(_id, _ops) \ + extern const struct bpf_map_ops _ops; +#include +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); @@ -275,6 +277,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); void bpf_fd_array_map_clear(struct bpf_map *map); +int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. @@ -295,10 +299,6 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else -static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) -{ -} - static inline struct bpf_prog *bpf_prog_get(u32 ufd) { return ERR_PTR(-EOPNOTSUPP); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h new file mode 100644 index 0000000000000000000000000000000000000000..03bf223f18be0001c80f4307ed4f521361984104 --- /dev/null +++ b/include/linux/bpf_types.h @@ -0,0 +1,36 @@ +/* internal file - do not include directly */ + +#ifdef CONFIG_NET +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops) +#endif +#ifdef CONFIG_BPF_EVENTS +BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops) +#endif + +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) +#ifdef CONFIG_CGROUPS +BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) +#ifdef CONFIG_PERF_EVENTS +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) +#endif +BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index a13b031dc6b807f38c0e7e687ba2cfbfe4c65fb7..5efb4db44e1ef3223d984296ccf1ca0737224d10 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -66,7 +66,10 @@ struct bpf_verifier_state_list { }; struct bpf_insn_aux_data { - enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ + union { + enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ + struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ + }; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 55e517130311980b36ad628054043130f88626af..abcda9b458ab65143acb1fb28b50225adbec83fd 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -25,6 +25,9 @@ #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7260 0xae025190 +#define PHY_ID_BCM7268 0xae025090 +#define PHY_ID_BCM7271 0xae0253b0 #define PHY_ID_BCM7278 0xae0251a0 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 diff --git a/include/linux/can/core.h b/include/linux/can/core.h index df08a41d5be5f26cfa4cdc74935f5eae7fa51385..c9a17bb1221c326f7569d423d93304fe241f72e0 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -5,7 +5,7 @@ * * Authors: Oliver Hartkopp * Urs Thuermann - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * */ @@ -17,7 +17,7 @@ #include #include -#define CAN_VERSION "20120528" +#define CAN_VERSION "20170425" /* increment this number each time you change some user-space interface */ #define CAN_ABI_VERSION "9" @@ -45,12 +45,13 @@ struct can_proto { extern int can_proto_register(const struct can_proto *cp); extern void can_proto_unregister(const struct can_proto *cp); -int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, +int can_rx_register(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data, char *ident, struct sock *sk); -extern void can_rx_unregister(struct net_device *dev, canid_t can_id, - canid_t mask, +extern void can_rx_unregister(struct net *net, struct net_device *dev, + canid_t can_id, canid_t mask, void (*func)(struct sk_buff *, void *), void *data); diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/include/linux/can/dev/peak_canfd.h similarity index 73% rename from drivers/net/can/usb/peak_usb/pcan_ucan.h rename to include/linux/can/dev/peak_canfd.h index 2147678f0225344f1f40ee4a42781f778cee588c..46dceef2cfa6c0efa6d4a52df5467dba6b6a7c2c 100644 --- a/drivers/net/can/usb/peak_usb/pcan_ucan.h +++ b/include/linux/can/dev/peak_canfd.h @@ -23,11 +23,14 @@ #define PUCAN_CMD_LISTEN_ONLY_MODE 0x003 #define PUCAN_CMD_TIMING_SLOW 0x004 #define PUCAN_CMD_TIMING_FAST 0x005 +#define PUCAN_CMD_SET_STD_FILTER 0x006 +#define PUCAN_CMD_RESERVED2 0x007 #define PUCAN_CMD_FILTER_STD 0x008 #define PUCAN_CMD_TX_ABORT 0x009 #define PUCAN_CMD_WR_ERR_CNT 0x00a #define PUCAN_CMD_SET_EN_OPTION 0x00b #define PUCAN_CMD_CLR_DIS_OPTION 0x00c +#define PUCAN_CMD_RX_BARRIER 0x010 #define PUCAN_CMD_END_OF_COLLECTION 0x3ff /* uCAN received messages list */ @@ -35,6 +38,10 @@ #define PUCAN_MSG_ERROR 0x0002 #define PUCAN_MSG_STATUS 0x0003 #define PUCAN_MSG_BUSLOAD 0x0004 + +#define PUCAN_MSG_CACHE_CRITICAL 0x0102 + +/* uCAN transmitted messages */ #define PUCAN_MSG_CAN_TX 0x1000 /* uCAN command common header */ @@ -43,6 +50,12 @@ struct __packed pucan_command { u16 args[3]; }; +/* return the opcode from the opcode_channel field of a command */ +static inline u16 pucan_cmd_get_opcode(struct pucan_command *c) +{ + return le16_to_cpu(c->opcode_channel) & 0x3ff; +} + #define PUCAN_TSLOW_BRP_BITS 10 #define PUCAN_TSLOW_TSGEG1_BITS 8 #define PUCAN_TSLOW_TSGEG2_BITS 7 @@ -108,6 +121,27 @@ struct __packed pucan_filter_std { __le32 mask; /* CAN-ID bitmask in idx range */ }; +#define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1) + +/* uCAN SET_STD_FILTER command fields */ +struct __packed pucan_std_filter { + __le16 opcode_channel; + + u8 unused; + u8 idx; + __le32 mask; /* CAN-ID bitmask in idx range */ +}; + +/* uCAN TX_ABORT commands fields */ +#define PUCAN_TX_ABORT_FLUSH 0x0001 + +struct __packed pucan_tx_abort { + __le16 opcode_channel; + + __le16 flags; + u32 unused; +}; + /* uCAN WR_ERR_CNT command fields */ #define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */ #define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */ @@ -184,6 +218,12 @@ struct __packed pucan_error_msg { u8 rx_err_cnt; }; +static inline int pucan_error_get_channel(const struct pucan_error_msg *msg) +{ + return msg->channel_type_d & 0x0f; +} + +#define PUCAN_RX_BARRIER 0x10 #define PUCAN_BUS_PASSIVE 0x20 #define PUCAN_BUS_WARNING 0x40 #define PUCAN_BUS_BUSOFF 0x80 @@ -197,6 +237,31 @@ struct __packed pucan_status_msg { u8 unused[3]; }; +static inline int pucan_status_get_channel(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & 0x0f; +} + +static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_RX_BARRIER; +} + +static inline int pucan_status_is_passive(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_PASSIVE; +} + +static inline int pucan_status_is_warning(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_WARNING; +} + +static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg) +{ + return msg->channel_p_w_b & PUCAN_BUS_BUSOFF; +} + /* uCAN transmitted message format */ #define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4)) @@ -213,32 +278,31 @@ struct __packed pucan_tx_msg { }; /* build the cmd opcode_channel field with respect to the correct endianness */ -static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev, - int opcode) +static inline __le16 pucan_cmd_opcode_channel(int index, int opcode) { - return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff)); + return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff)); } /* return the channel number part from any received message channel_dlc field */ -static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm) +static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg) { - return rm->channel_dlc & 0xf; + return msg->channel_dlc & 0xf; } /* return the dlc value from any received message channel_dlc field */ -static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm) +static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg) { - return rm->channel_dlc >> 4; + return msg->channel_dlc >> 4; } -static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em) +static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg) { - return em->channel_type_d & 0x0f; + return msg->channel_type_d & 0x0f; } -static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm) +static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg) { - return sm->channel_p_w_b & 0x0f; + return msg->channel_p_w_b & 0x0f; } #endif diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h deleted file mode 100644 index a52f47ca6c8ad9c5159c34b4f568b5d84eccbd18..0000000000000000000000000000000000000000 --- a/include/linux/can/platform/ti_hecc.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _CAN_PLATFORM_TI_HECC_H -#define _CAN_PLATFORM_TI_HECC_H - -/* - * TI HECC (High End CAN Controller) driver platform header - * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed as is WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -/** - * struct hecc_platform_data - HECC Platform Data - * - * @scc_hecc_offset: mostly 0 - should really never change - * @scc_ram_offset: SCC RAM offset - * @hecc_ram_offset: HECC RAM offset - * @mbx_offset: Mailbox RAM offset - * @int_line: Interrupt line to use - 0 or 1 - * @version: version for future use - * @transceiver_switch: platform specific callback fn for transceiver control - * - * Platform data structure to get all platform specific settings. - * this structure also accounts the fact that the IP may have different - * RAM and mailbox offsets for different SOC's - */ -struct ti_hecc_platform_data { - u32 scc_hecc_offset; - u32 scc_ram_offset; - u32 hecc_ram_offset; - u32 mbx_offset; - u32 int_line; - u32 version; - void (*transceiver_switch) (int); -}; -#endif /* !_CAN_PLATFORM_TI_HECC_H */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index c62b709b1ce087b7891f5d9c76aa2940b7f4a9a9..2d9f80848d4bd2b6e60dc06f1053ba91256c37ac 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -446,21 +446,6 @@ static inline void eth_addr_dec(u8 *addr) u64_to_ether_addr(u, addr); } -/** - * ether_addr_greater - Compare two Ethernet addresses - * @addr1: Pointer to a six-byte array containing the Ethernet address - * @addr2: Pointer other six-byte array containing the Ethernet address - * - * Compare two Ethernet addresses, returns true addr1 is greater than addr2 - */ -static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2) -{ - u64 u1 = ether_addr_to_u64(addr1); - u64 u2 = ether_addr_to_u64(addr2); - - return u1 > u2; -} - /** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 9ded8c6d8176b909cf68da0e125eef4441b7c9a9..83cc9863444b078765255b9010b015578768be09 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -60,6 +60,7 @@ enum ethtool_phys_id_state { enum { ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */ /* * Add your fresh new hash function bits above and remember to update @@ -73,6 +74,7 @@ enum { #define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) #define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) +#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) #define ETH_RSS_HASH_UNKNOWN 0 #define ETH_RSS_HASH_NO_CHANGE 0 diff --git a/include/linux/filter.h b/include/linux/filter.h index fbf7b39e81035506b73ddc860e3029119104b255..9a7786db14fa5307dff297487d2fb50245ddc60f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -412,8 +413,7 @@ struct bpf_prog { locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1, /* Do we need dst entry? */ - xdp_adjust_head:1; /* Adjusting pkt head? */ + dst_needed:1; /* Do we need dst entry? */ kmemcheck_bitfield_end(meta); enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ @@ -430,7 +430,7 @@ struct bpf_prog { }; struct sk_filter { - atomic_t refcnt; + refcount_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; @@ -693,6 +693,11 @@ static inline bool bpf_jit_is_ebpf(void) # endif } +static inline bool ebpf_jit_enabled(void) +{ + return bpf_jit_enable && bpf_jit_is_ebpf(); +} + static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return fp->jited && bpf_jit_is_ebpf(); @@ -753,6 +758,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp); #else /* CONFIG_BPF_JIT */ +static inline bool ebpf_jit_enabled(void) +{ + return false; +} + static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return false; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 970771a5f7390268abb5c43ba1e1c8df48f2caa4..0c170a3f0d8b0653b97d598c2a0c61d6b9abe1d2 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1508,14 +1508,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel) return; } -static inline void -init_cached_read_index(struct vmbus_channel *channel) -{ - struct hv_ring_buffer_info *rbi = &channel->inbound; - - rbi->cached_read_index = rbi->ring_buffer->read_index; -} - /* * Mask off host interrupt callback notifications */ @@ -1549,76 +1541,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) /* * An API to support in-place processing of incoming VMBUS packets. */ -#define VMBUS_PKT_TRAILER 8 -static inline struct vmpacket_descriptor * -get_next_pkt_raw(struct vmbus_channel *channel) +/* Get data payload associated with descriptor */ +static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) { - struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 priv_read_loc = ring_info->priv_read_index; - void *ring_buffer = hv_get_ring_buffer(ring_info); - u32 dsize = ring_info->ring_datasize; - /* - * delta is the difference between what is available to read and - * what was already consumed in place. We commit read index after - * the whole batch is processed. - */ - u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? - priv_read_loc - ring_info->ring_buffer->read_index : - (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; - u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); - - if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) - return NULL; - - return ring_buffer + priv_read_loc; + return (void *)((unsigned long)desc + (desc->offset8 << 3)); } -/* - * A helper function to step through packets "in-place" - * This API is to be called after each successful call - * get_next_pkt_raw(). - */ -static inline void put_pkt_raw(struct vmbus_channel *channel, - struct vmpacket_descriptor *desc) +/* Get data size associated with descriptor */ +static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) { - struct hv_ring_buffer_info *ring_info = &channel->inbound; - u32 packetlen = desc->len8 << 3; - u32 dsize = ring_info->ring_datasize; - - /* - * Include the packet trailer. - */ - ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; - ring_info->priv_read_index %= dsize; + return (desc->len8 << 3) - (desc->offset8 << 3); } + +struct vmpacket_descriptor * +hv_pkt_iter_first(struct vmbus_channel *channel); + +struct vmpacket_descriptor * +__hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt); + +void hv_pkt_iter_close(struct vmbus_channel *channel); + /* - * This call commits the read index and potentially signals the host. - * Here is the pattern for using the "in-place" consumption APIs: - * - * init_cached_read_index(); - * - * while (get_next_pkt_raw() { - * process the packet "in-place"; - * put_pkt_raw(); - * } - * if (packets processed in place) - * commit_rd_index(); + * Get next packet descriptor from iterator + * If at end of list, return NULL and update host. */ -static inline void commit_rd_index(struct vmbus_channel *channel) +static inline struct vmpacket_descriptor * +hv_pkt_iter_next(struct vmbus_channel *channel, + const struct vmpacket_descriptor *pkt) { - struct hv_ring_buffer_info *ring_info = &channel->inbound; - /* - * Make sure all reads are done before we update the read index since - * the writer may start writing to the read area once the read index - * is updated. - */ - virt_rmb(); - ring_info->ring_buffer->read_index = ring_info->priv_read_index; + struct vmpacket_descriptor *nxt; + + nxt = __hv_pkt_iter_next(channel, pkt); + if (!nxt) + hv_pkt_iter_close(channel); - hv_signal_on_read(channel); + return nxt; } +#define foreach_vmbus_pkt(pkt, channel) \ + for (pkt = hv_pkt_iter_first(channel); pkt; \ + pkt = hv_pkt_iter_next(channel, pkt)) #endif /* _HYPERV_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 0dd9498c694f95c166508e40896c94ccc724075c..69033353d0d1c2bc28cc831b4cea67ca49c0bfdf 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -7,7 +7,7 @@ * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (c) 2016 Intel Deutschland GmbH + * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1411,6 +1411,8 @@ struct ieee80211_ht_operation { #define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 #define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 +#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5 +#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0 /* for stbc_param */ #define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 @@ -1525,14 +1527,14 @@ enum ieee80211_vht_chanwidth { * This structure is the "VHT operation element" as * described in 802.11ac D3.0 8.4.2.161 * @chan_width: Operating channel width + * @center_freq_seg0_idx: center freq segment 0 index * @center_freq_seg1_idx: center freq segment 1 index - * @center_freq_seg2_idx: center freq segment 2 index * @basic_mcs_set: VHT Basic MCS rate set */ struct ieee80211_vht_operation { u8 chan_width; + u8 center_freq_seg0_idx; u8 center_freq_seg1_idx; - u8 center_freq_seg2_idx; __le16 basic_mcs_set; } __packed; @@ -1721,6 +1723,9 @@ enum ieee80211_statuscode { WLAN_STATUS_REJECT_DSE_BAND = 96, WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99, WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103, + /* 802.11ai */ + WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, + WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, }; @@ -2102,6 +2107,12 @@ enum ieee80211_key_len { #define FILS_NONCE_LEN 16 #define FILS_MAX_KEK_LEN 64 +#define FILS_ERP_MAX_USERNAME_LEN 16 +#define FILS_ERP_MAX_REALM_LEN 253 +#define FILS_ERP_MAX_RRK_LEN 64 + +#define PMK_MAX_LEN 48 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, @@ -2166,37 +2177,37 @@ enum ieee80211_tdls_actioncode { #define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) /** - * enum - mesh synchronization method identifier + * enum ieee80211_mesh_sync_method - mesh synchronization method identifier * * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method * that will be specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_sync_method { IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1, IEEE80211_SYNC_METHOD_VENDOR = 255, }; /** - * enum - mesh path selection protocol identifier + * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier * * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will * be specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_path_protocol { IEEE80211_PATH_PROTOCOL_HWMP = 1, IEEE80211_PATH_PROTOCOL_VENDOR = 255, }; /** - * enum - mesh path selection metric identifier + * enum ieee80211_mesh_path_metric - mesh path selection metric identifier * * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be * specified in a vendor specific information element */ -enum { +enum ieee80211_mesh_path_metric { IEEE80211_PATH_METRIC_AIRTIME = 1, IEEE80211_PATH_METRIC_VENDOR = 255, }; @@ -2305,6 +2316,32 @@ struct ieee80211_timeout_interval_ie { __le32 value; } __packed; +/** + * enum ieee80211_idle_options - BSS idle options + * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN + * protected frame to the AP to reset the idle timer at the AP for + * the station. + */ +enum ieee80211_idle_options { + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0), +}; + +/** + * struct ieee80211_bss_max_idle_period_ie + * + * This structure refers to "BSS Max idle period element" + * + * @max_idle_period: indicates the time period during which a station can + * refrain from transmitting frames to its associated AP without being + * disassociated. In units of 1000 TUs. + * @idle_options: indicates the options associated with the BSS idle capability + * as specified in &enum ieee80211_idle_options. + */ +struct ieee80211_bss_max_idle_period_ie { + __le16 max_idle_period; + u8 idle_options; +} __packed; + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -2345,13 +2382,21 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1) /* AKM suite selectors */ -#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) -#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) -#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) -#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) -#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) -#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) -#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1) +#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2) +#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3) +#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4) +#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5) +#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6) +#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) +#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) +#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) +#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) +#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) +#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) +#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) +#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) #define WLAN_MAX_KEY_LEN 32 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c5847dc75a937a5ceed4a762ffcc9f2644b499bc..0c16866a7aacc0cbc361e8d5b902252f71323f3b 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -48,6 +48,7 @@ struct br_ip_list { #define BR_MCAST_FLOOD BIT(11) #define BR_MULTICAST_TO_UNICAST BIT(12) #define BR_VLAN_TUNNEL BIT(13) +#define BR_BCAST_FLOOD BIT(14) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ee971f335a8b659f04e5a3048ca70e5bc361ee5f..a2e9d6ea1349fb85418a9ebafc55dd08d16ca6b0 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -153,8 +153,8 @@ struct in_ifaddr { int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); -void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, - struct ipv4_devconf *devconf); +void inet_netconf_notify_devconf(struct net *net, int event, int type, + int ifindex, struct ipv4_devconf *devconf); struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 71be5b330d21305af23f8f7e1779988930755ed8..e1b442996f810529a755533270b6d21c350fbd5a 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -37,6 +37,7 @@ struct ipv6_devconf { __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; #ifdef CONFIG_IPV6_ROUTE_INFO + __s32 accept_ra_rt_info_min_plen; __s32 accept_ra_rt_info_max_plen; #endif #endif @@ -70,6 +71,7 @@ struct ipv6_devconf { #endif __u32 enhanced_dad; __u32 addr_gen_mode; + __s32 disable_policy; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2fcff6b4503f6a4824bea50c189b072ef6c486cb..3fece51dcf136f7694d1202b95a0f90914b938df 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -540,6 +540,7 @@ struct mlx5_fc_stats { struct workqueue_struct *wq; struct delayed_work work; unsigned long next_query; + unsigned long sampling_interval; /* jiffies */ }; struct mlx5_eswitch; @@ -728,6 +729,7 @@ struct mlx5e_resources { u32 pdn; struct mlx5_td td; struct mlx5_core_mkey mkey; + struct mlx5_sq_bfreg bfreg; }; struct mlx5_core_dev { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 949b24b6c4794ce14909d779b7dbfd2534aa53db..1b166d2e19c57829279faa20a88796fabb5d3f5e 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -104,12 +104,18 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, u32 level, u32 flags); +struct mlx5_flow_table_attr { + int prio; + int max_fte; + u32 level; + u32 flags; + u32 underlay_qpn; +}; + struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int num_flow_table_entries, - u32 level, - u32 flags); + struct mlx5_flow_table_attr *ft_attr); + struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -134,8 +140,13 @@ struct mlx5_flow_act { u32 action; u32 flow_tag; u32 encap_id; + u32 modify_id; }; +#define MLX5_DECLARE_FLOW_ACT(name) \ + struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ + MLX5_FS_DEFAULT_FLOW_TAG, 0, 0} + /* Single destination per rule. * Group ID is implied by the match criteria. */ @@ -156,5 +167,4 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); - #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 838242697541a28fdda4d90bf7b604e25f3bfba2..7c50bd39b297e5d359043f7545be8ca04d9045c8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -227,6 +227,8 @@ enum { MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, + MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, + MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_MAX }; @@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; - u8 reserved_at_28[0x10]; + u8 log_max_modify_header_context[0x8]; + u8 max_modify_header_actions[0x8]; u8 max_ft_level[0x8]; u8 reserved_at_40[0x20]; @@ -869,7 +872,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; - u8 reserved_at_202[0x2]; + u8 reserved_at_202[0x1]; + u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; @@ -2190,6 +2194,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, }; struct mlx5_ifc_flow_context_bits { @@ -2211,7 +2216,9 @@ struct mlx5_ifc_flow_context_bits { u8 encap_id[0x20]; - u8 reserved_at_e0[0x120]; + u8 modify_header_id[0x20]; + + u8 reserved_at_100[0x100]; struct mlx5_ifc_fte_match_param_bits match_value; @@ -2287,7 +2294,9 @@ struct mlx5_ifc_tisc_bits { u8 reserved_at_120[0x8]; u8 transport_domain[0x18]; - u8 reserved_at_140[0x3c0]; + u8 reserved_at_140[0x8]; + u8 underlay_qpn[0x18]; + u8 reserved_at_160[0x3a0]; }; enum { @@ -4534,6 +4543,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits { u8 reserved_60[0x20]; }; +struct mlx5_ifc_set_action_in_bits { + u8 action_type[0x4]; + u8 field[0xc]; + u8 reserved_at_10[0x3]; + u8 offset[0x5]; + u8 reserved_at_18[0x3]; + u8 length[0x5]; + + u8 data[0x20]; +}; + +struct mlx5_ifc_add_action_in_bits { + u8 action_type[0x4]; + u8 field[0xc]; + u8 reserved_at_10[0x10]; + + u8 data[0x20]; +}; + +union mlx5_ifc_set_action_in_add_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; + u8 reserved_at_0[0x40]; +}; + +enum { + MLX5_ACTION_TYPE_SET = 0x1, + MLX5_ACTION_TYPE_ADD = 0x2, +}; + +enum { + MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1, + MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2, + MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3, + MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4, + MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5, + MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6, + MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7, + MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8, + MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9, + MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa, + MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb, + MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf, + MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13, + MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, + MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, + MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, +}; + +struct mlx5_ifc_alloc_modify_header_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 modify_header_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_modify_header_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 table_type[0x8]; + u8 reserved_at_68[0x10]; + u8 num_of_actions[0x8]; + + union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; +}; + +struct mlx5_ifc_dealloc_modify_header_context_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_modify_header_context_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 modify_header_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5013,6 +5125,7 @@ struct mlx5_ifc_modify_rq_out_bits { enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, }; @@ -8108,7 +8221,9 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_at_c0[0x140]; + u8 reserved_at_c0[0x8]; + u8 underlay_qpn[0x18]; + u8 reserved_at_e0[0x120]; }; enum { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3096370fe8319ec76f823b920ded745a10ebb6fd..bef80d0a0e30bcfb446abf14417789a34eb28af8 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -295,6 +295,16 @@ struct mlx5_av { u8 rgid[16]; }; +struct mlx5_ib_ah { + struct ib_ah ibah; + struct mlx5_av av; +}; + +static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mlx5_ib_ah, ibah); +} + struct mlx5_wqe_datagram_seg { struct mlx5_av av; }; diff --git a/include/linux/mpls.h b/include/linux/mpls.h index 9999145bc19007105db13aef9d473296f2f0c5a9..384fb22b6c436e357919656232342c0d538b8de3 100644 --- a/include/linux/mpls.h +++ b/include/linux/mpls.h @@ -3,4 +3,9 @@ #include +#define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT) +#define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT) +#define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT) +#define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT) + #endif /* _LINUX_MPLS_H */ diff --git a/include/linux/net.h b/include/linux/net.h index 0620f5e18c96b706b70fb71005b29008a11fffb1..abcfa46a2bd9a6f9eb242dacb199070a05d61014 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); +/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ +u32 kernel_sock_ip_overhead(struct sock *sk); + #define MODULE_ALIAS_NETPROTO(proto) \ MODULE_ALIAS("net-pf-" __stringify(proto)) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9a0419594e842ca00a5ecfca53823b38bad207bb..1d4737cffc7116c898b42787680b2e9b0dd46812 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -54,8 +54,9 @@ enum { */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ + NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_SCTP_BIT, + NETIF_F_GSO_ESP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -73,6 +74,8 @@ enum { NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ + NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ /* * Add your fresh new feature above and remember to update @@ -129,11 +132,14 @@ enum { #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) +#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_HW_TC __NETIF_F(HW_TC) +#define NETIF_F_HW_ESP __NETIF_F(HW_ESP) +#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 97456b2539e46d6232dda804f6a434db6fd7134f..9c23bd2efb56393e7c616d42d427101c3f5f51d2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -41,7 +41,6 @@ #include #include -#include #ifdef CONFIG_DCB #include #endif @@ -57,6 +56,8 @@ struct netpoll_info; struct device; struct phy_device; +struct dsa_switch_tree; + /* 802.11 specific */ struct wireless_dev; /* 802.15.4 specific */ @@ -236,8 +237,7 @@ struct netdev_hw_addr_list { netdev_hw_addr_list_for_each(ha, &(dev)->mc) struct hh_cache { - u16 hh_len; - u16 __pad; + unsigned int hh_len; seqlock_t hh_lock; /* cached hardware header; allow for machine alignment needs. */ @@ -786,11 +786,11 @@ struct tc_cls_u32_offload; struct tc_to_netdev { unsigned int type; union { - u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; struct tc_cls_matchall_offload *cls_mall; struct tc_cls_bpf_offload *cls_bpf; + struct tc_mqprio_qopt *mqprio; }; bool egress_dev; }; @@ -813,16 +813,31 @@ enum xdp_netdev_command { XDP_QUERY_PROG, }; +struct netlink_ext_ack; + struct netdev_xdp { enum xdp_netdev_command command; union { /* XDP_SETUP_PROG */ - struct bpf_prog *prog; + struct { + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; /* XDP_QUERY_PROG */ bool prog_attached; }; }; +#ifdef CONFIG_XFRM_OFFLOAD +struct xfrmdev_ops { + int (*xdo_dev_state_add) (struct xfrm_state *x); + void (*xdo_dev_state_delete) (struct xfrm_state *x); + void (*xdo_dev_state_free) (struct xfrm_state *x); + bool (*xdo_dev_offload_ok) (struct sk_buff *skb, + struct xfrm_state *x); +}; +#endif + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1696,6 +1711,10 @@ struct net_device { const struct ndisc_ops *ndisc_ops; #endif +#ifdef CONFIG_XFRM + const struct xfrmdev_ops *xfrmdev_ops; +#endif + const struct header_ops *header_ops; unsigned int flags; @@ -1715,7 +1734,7 @@ struct net_device { unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; - unsigned short min_header_len; + unsigned char min_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; @@ -1776,6 +1795,7 @@ struct net_device { unsigned int real_num_rx_queues; #endif + struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -1894,6 +1914,13 @@ struct net_device { }; #define to_net_dev(d) container_of(d, struct net_device, dev) +static inline bool netif_elide_gro(const struct net_device *dev) +{ + if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) + return true; + return false; +} + #define NETDEV_ALIGN 32 static inline @@ -2004,15 +2031,6 @@ void dev_net_set(struct net_device *dev, struct net *net) write_pnet(&dev->nd_net, net); } -static inline bool netdev_uses_dsa(struct net_device *dev) -{ -#if IS_ENABLED(CONFIG_NET_DSA) - if (dev->dsa_ptr != NULL) - return dsa_uses_tagged_protocol(dev->dsa_ptr); -#endif - return false; -} - /** * netdev_priv - access network device private data * @dev: network device @@ -3278,7 +3296,8 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags); +int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, + int fd, u32 flags); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3305,6 +3324,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; +extern unsigned int netdev_budget_usecs; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); @@ -3394,10 +3414,10 @@ static inline void netif_dormant_off(struct net_device *dev) } /** - * netif_dormant - test if carrier present + * netif_dormant - test if device is dormant * @dev: network device * - * Check if carrier is present on device + * Check if device is dormant. */ static inline bool netif_dormant(const struct net_device *dev) { @@ -4078,6 +4098,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -4180,6 +4201,11 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } +static inline bool netif_is_ovs_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_OVS_DATAPATH; +} + static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 1b49209dd5c7cd74644624007ce57df107c3744b..996711d8a7b4b536990c05d698243cdbb3413e44 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -41,6 +41,11 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, int flags); +static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) +{ + return subsys << 8 | msg_type; +} + void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 984b2112c77b642f75e3f121387e955aa3f22b93..a30efb437e6d1cfa42a75ec72997bf12b9cdf59d 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -109,8 +109,10 @@ struct ebt_table { #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) extern struct ebt_table *ebt_register_table(struct net *net, - const struct ebt_table *table); -extern void ebt_unregister_table(struct net *net, struct ebt_table *table); + const struct ebt_table *table, + const struct nf_hook_ops *); +extern void ebt_unregister_table(struct net *net, struct ebt_table *table, + const struct nf_hook_ops *); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index da14ab61f3634cae5c9ec1636c2774a6f388083d..c20395edf2de3e928713a9b5cd6e4292e15f4f11 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -62,11 +62,50 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) return __netlink_kernel_create(net, unit, THIS_MODULE, cfg); } +/* this can be increased when necessary - don't expose to userland */ +#define NETLINK_MAX_COOKIE_LEN 20 + +/** + * struct netlink_ext_ack - netlink extended ACK report struct + * @_msg: message string to report - don't access directly, use + * %NL_SET_ERR_MSG + * @bad_attr: attribute with error + * @cookie: cookie data to return to userspace (for success) + * @cookie_len: actual cookie data length + */ +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +/* Always use this macro, this allows later putting the + * message into a separate section or such for things + * like translation or listing all possible messages. + * Currently string formatting is not supported (due + * to the lack of an output buffer.) + */ +#define NL_SET_ERR_MSG(extack, msg) do { \ + static const char _msg[] = (msg); \ + \ + (extack)->_msg = _msg; \ +} while (0) + +#define NL_MOD_TRY_SET_ERR_MSG(extack, msg) do { \ + static const char _msg[] = KBUILD_MODNAME ": " msg; \ + struct netlink_ext_ack *_extack = (extack); \ + \ + if (_extack) \ + _extack->_msg = _msg; \ +} while (0) + extern void netlink_kernel_release(struct sock *sk); extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); -extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); +extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack); extern int netlink_has_listeners(struct sock *sk, unsigned int group); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index a58cca8bcb29d8c9bdcda71538cfa8f25c913916..ba35ba5204871a69b5d5522f170432d24aa32e96 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -12,7 +12,7 @@ #include #include -#ifdef CONFIG_OF +#if IS_ENABLED(CONFIG_OF_MDIO) extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); extern struct phy_device *of_phy_find_device(struct device_node *phy_np); extern struct phy_device *of_phy_connect(struct net_device *dev, @@ -32,7 +32,7 @@ extern int of_phy_register_fixed_link(struct device_node *np); extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); -#else /* CONFIG_OF */ +#else /* CONFIG_OF_MDIO */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { /* diff --git a/include/linux/pci.h b/include/linux/pci.h index eb3da1a04e6cdc7d3f4efab5532f53abcdf3a28f..82dec36845e61a77a2673275982bbca5fdc73bc6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1300,7 +1300,6 @@ int pci_msi_vec_count(struct pci_dev *dev); void pci_msi_shutdown(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); int pci_msix_vec_count(struct pci_dev *dev); -int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); void pci_msix_shutdown(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); @@ -1330,9 +1329,6 @@ static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } static inline void pci_msi_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msi(struct pci_dev *dev) { } static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } -static inline int pci_enable_msix(struct pci_dev *dev, - struct msix_entry *entries, int nvec) -{ return -ENOSYS; } static inline void pci_msix_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msix(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } diff --git a/include/linux/phy.h b/include/linux/phy.h index fb38573371512338a7876652d95c7f811be1bd3f..e76e4adbc7c728efdbadae302af8259d5d55dc05 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -217,6 +217,13 @@ struct mii_bus { * matching its address */ int irq[PHY_MAX_ADDR]; + + /* GPIO reset pulse width in microseconds */ + int reset_delay_us; + /* Number of reset GPIOs */ + int num_reset_gpios; + /* Array of RESET GPIO descriptors */ + struct gpio_desc **reset_gpiod; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -587,23 +594,29 @@ struct phy_driver { */ void (*link_change_notify)(struct phy_device *dev); - /* A function provided by a phy specific driver to override the - * the PHY driver framework support for reading a MMD register - * from the PHY. If not supported, return -1. This function is - * optional for PHY specific drivers, if not provided then the - * default MMD read function is used by the PHY framework. + /* + * Phy specific driver override for reading a MMD register. + * This function is optional for PHY specific drivers. When + * not provided, the default MMD read function will be used + * by phy_read_mmd(), which will use either a direct read for + * Clause 45 PHYs or an indirect read for Clause 22 PHYs. + * devnum is the MMD device number within the PHY device, + * regnum is the register within the selected MMD device. */ - int (*read_mmd_indirect)(struct phy_device *dev, int ptrad, - int devnum, int regnum); - - /* A function provided by a phy specific driver to override the - * the PHY driver framework support for writing a MMD register - * from the PHY. This function is optional for PHY specific drivers, - * if not provided then the default MMD read function is used by - * the PHY framework. + int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum); + + /* + * Phy specific driver override for writing a MMD register. + * This function is optional for PHY specific drivers. When + * not provided, the default MMD write function will be used + * by phy_write_mmd(), which will use either a direct write for + * Clause 45 PHYs, or an indirect write for Clause 22 PHYs. + * devnum is the MMD device number within the PHY device, + * regnum is the register within the selected MMD device. + * val is the value to be written. */ - void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, - int devnum, int regnum, u32 val); + int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, + u16 val); /* Get the size and type of the eeprom contained within a plug-in * module */ @@ -651,25 +664,7 @@ struct phy_fixup { * * Same rules as for phy_read(); */ -static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) -{ - if (!phydev->is_c45) - return -EOPNOTSUPP; - - return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, - MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); -} - -/** - * phy_read_mmd_indirect - reads data from the MMD registers - * @phydev: The PHY device bus - * @prtad: MMD Address - * @addr: PHY address on the MII bus - * - * Description: it reads data from the MMD registers (clause 22 to access to - * clause 45) of the specified phy address. - */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad); +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); /** * phy_read - Convenience function for reading a given PHY register @@ -752,35 +747,29 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) * * Same rules as for phy_write(); */ -static inline int phy_write_mmd(struct phy_device *phydev, int devad, - u32 regnum, u16 val) -{ - if (!phydev->is_c45) - return -EOPNOTSUPP; - - regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff); - - return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); -} - -/** - * phy_write_mmd_indirect - writes data to the MMD registers - * @phydev: The PHY device - * @prtad: MMD Address - * @devad: MMD DEVAD - * @data: data to write in the MMD register - * - * Description: Write data from the MMD registers of the specified - * phy address. - */ -void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, u32 data); +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); +#if IS_ENABLED(CONFIG_PHYLIB) struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); +void phy_device_free(struct phy_device *phydev); +#else +static inline +struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) +{ + return NULL; +} + +static inline int phy_device_register(struct phy_device *phy) +{ + return 0; +} + +static inline void phy_device_free(struct phy_device *phydev) { } +#endif /* CONFIG_PHYLIB */ void phy_device_remove(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); @@ -862,7 +851,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); -void phy_device_free(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, @@ -889,8 +877,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); +#if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); void mdio_bus_exit(void); +#endif extern struct bus_type mdio_bus_type; @@ -901,7 +891,7 @@ struct mdio_board_info { const void *platform_data; }; -#if IS_ENABLED(CONFIG_PHYLIB) +#if IS_ENABLED(CONFIG_MDIO_DEVICE) int mdiobus_register_board_info(const struct mdio_board_info *info, unsigned int n); #else diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h deleted file mode 100644 index 5ce1ab983f44d1c0191fe0a29f403cb5f6482507..0000000000000000000000000000000000000000 --- a/include/linux/platform_data/pn544.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Driver include for the PN544 NFC chip. - * - * Copyright (C) Nokia Corporation - * - * Author: Jari Vanhala - * Contact: Matti Aaltoenn - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef _PN544_H_ -#define _PN544_H_ - -#include - -enum { - NFC_GPIO_ENABLE, - NFC_GPIO_FW_RESET, - NFC_GPIO_IRQ -}; - -/* board config */ -struct pn544_nfc_platform_data { - int (*request_resources) (struct i2c_client *client); - void (*free_resources) (void); - void (*enable) (int fw); - int (*test) (void); - void (*disable) (void); - int (*get_gpio)(int type); -}; - -#endif /* _PN544_H_ */ diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h deleted file mode 100644 index cc2bdafb0c69cfe61d1ff99f69423b87d2322eee..0000000000000000000000000000000000000000 --- a/include/linux/platform_data/st21nfca.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Driver include for the ST21NFCA NFC chip. - * - * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef _ST21NFCA_HCI_H_ -#define _ST21NFCA_HCI_H_ - -#include - -#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" - -struct st21nfca_nfc_platform_data { - unsigned int gpio_ena; - unsigned int irq_polarity; - bool is_ese_present; - bool is_uicc_present; -}; - -#endif /* _ST21NFCA_HCI_H_ */ diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 52966b9bfde3740b506664bf2329596cf23695ef..fbab6e0514f07bf0f4a9ac481cb712c58d113b7f 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -100,8 +100,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 32 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 10 -#define FW_REVISION_VERSION 10 +#define FW_MINOR_VERSION 15 +#define FW_REVISION_VERSION 3 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -187,6 +187,9 @@ /* DEMS */ #define DQ_DEMS_LEGACY 0 +#define DQ_DEMS_TOE_MORE_TO_SEND 3 +#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 +#define DQ_DEMS_ROCE_CQ_CONS 7 /* XCM agg val selection */ #define DQ_XCM_AGG_VAL_SEL_WORD2 0 @@ -214,6 +217,9 @@ #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 #define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 @@ -269,6 +275,8 @@ #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -285,6 +293,9 @@ #define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) #define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) +#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3) +#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) +#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) /* TCM agg counter flag selection (HW) */ #define DQ_TCM_AGG_FLG_SHIFT_CF0 0 @@ -301,6 +312,9 @@ #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ #define DQ_PWM_OFFSET_DPM_BASE 0x0 @@ -689,6 +703,16 @@ struct iscsi_eqe_data { #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; +struct rdma_eqe_destroy_qp { + __le32 cid; + u8 reserved[4]; +}; + +union rdma_eqe_data { + struct regpair async_handle; + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; +}; + struct malicious_vf_eqe_data { u8 vf_id; u8 err_id; @@ -705,9 +729,9 @@ union event_ring_data { u8 bytes[8]; struct vf_pf_channel_eqe_data vf_pf_channel; struct iscsi_eqe_data iscsi_info; + union rdma_eqe_data rdma_data; struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; - struct regpair roce_handle; }; /* Event Ring Entry */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 4b402fb0eaad5fdf7bd29221d95596f092c0e945..34d93eb5bfba346019ba1d2c9014ab8a2fa5fd8f 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -49,6 +49,9 @@ #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 #define ETH_RX_NUM_NEXT_PAGE_BDS 2 +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 #define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 2e417a45c5f7028ba1fc2b6201fc828f1fef4247..947a635d04bb57ff15a61f1ee82c41ae27c1d4e5 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data { struct regpair terminate_params_addr; }; -struct fcoe_fast_sgl_ctx { - struct regpair sgl_start_addr; - __le32 sgl_byte_offset; - __le16 task_reuse_cnt; - __le16 init_offset_in_first_sge; -}; - struct fcoe_slow_sgl_ctx { struct regpair base_sgl_addr; __le16 curr_sge_off; @@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx { __le16 reserved; }; -struct fcoe_sge { - struct regpair sge_addr; - __le16 size; - __le16 reserved0; - u8 reserved1[3]; - u8 is_valid_sge; -}; - -union fcoe_data_desc_ctx { - struct fcoe_fast_sgl_ctx fast; - struct fcoe_slow_sgl_ctx slow; - struct fcoe_sge single_sge; -}; - union fcoe_dix_desc_ctx { struct fcoe_slow_sgl_ctx dix_sgl; - struct fcoe_sge cached_dix_sge; + struct scsi_sge cached_dix_sge; +}; + +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; }; struct fcoe_fcp_cmd_payload { @@ -172,57 +158,6 @@ enum fcoe_mode_type { MAX_FCOE_MODE_TYPE }; -struct fcoe_mstorm_fcoe_task_st_ctx_fp { - __le16 flags; -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15 - __le16 difDataResidue; - __le16 parent_id; - __le16 single_sge_saved_offset; - __le32 data_2_trns_rem; - __le32 offset_in_io; - union fcoe_dix_desc_ctx dix_desc; - union fcoe_data_desc_ctx data_desc; -}; - -struct fcoe_mstorm_fcoe_task_st_ctx_non_fp { - __le16 flags; -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15 - u8 tx_rx_sgl_mode; -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3 -#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6 - u8 rsrv2; - __le32 num_prm_zero_read; - struct regpair rsp_buf_addr; -}; - struct fcoe_rx_stat { struct regpair fcoe_rx_byte_cnt; struct regpair fcoe_rx_data_pkt_cnt; @@ -236,16 +171,6 @@ struct fcoe_rx_stat { __le32 rsrv; }; -enum fcoe_sgl_mode { - FCOE_SLOW_SGL, - FCOE_SINGLE_FAST_SGE, - FCOE_2_FAST_SGE, - FCOE_3_FAST_SGE, - FCOE_4_FAST_SGE, - FCOE_MUL_FAST_SGES, - MAX_FCOE_SGL_MODE -}; - struct fcoe_stat_ramrod_data { struct regpair stat_params_addr; }; @@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx { struct ystorm_fcoe_task_st_ctx { u8 task_type; u8 sgl_mode; -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 u8 cached_dix_sge; u8 expect_first_xfer; __le32 num_pbf_zero_write; union protection_info_union_ctx protection_info_union; __le32 data_2_trns_rem; + struct scsi_sgl_params sgl_params; + u8 reserved1[12]; union fcoe_tx_info_union_ctx tx_info_union; union fcoe_dix_desc_ctx dix_desc; - union fcoe_data_desc_ctx data_desc; + struct scsi_cached_sges data_desc; __le16 ox_id; __le16 rx_id; __le32 task_rety_identifier; - __le32 reserved1[2]; + u8 reserved2[8]; }; struct ystorm_fcoe_task_ag_ctx { @@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_write { union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; __le16 flags; -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 __le16 seq_cnt; u8 seq_id; u8 ooo_rx_seq_id; @@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx { }; struct mstorm_fcoe_task_st_ctx { - struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp; - struct fcoe_mstorm_fcoe_task_st_ctx_fp fp; + struct regpair rsp_buf_addr; + __le32 rsrv[2]; + struct scsi_sgl_params sgl_params; + __le32 data_2_trns_rem; + __le32 data_buffer_offset; + __le16 parent_id; + __le16 flags; +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 + struct scsi_cached_sges data_desc; }; struct ustorm_fcoe_task_ag_ctx { @@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx { struct fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; @@ -668,20 +622,20 @@ struct fcoe_tx_stat { struct fcoe_wqe { __le16 task_id; __le16 flags; -#define FCOE_WQE_REQ_TYPE_MASK 0xF -#define FCOE_WQE_REQ_TYPE_SHIFT 0 -#define FCOE_WQE_SGL_MODE_MASK 0x7 -#define FCOE_WQE_SGL_MODE_SHIFT 4 -#define FCOE_WQE_CONTINUATION_MASK 0x1 -#define FCOE_WQE_CONTINUATION_SHIFT 7 -#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1 -#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8 -#define FCOE_WQE_SUPER_IO_MASK 0x1 -#define FCOE_WQE_SUPER_IO_SHIFT 9 -#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 -#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10 -#define FCOE_WQE_RESERVED0_MASK 0x1F -#define FCOE_WQE_RESERVED0_SHIFT 11 +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x1 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 5 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 +#define FCOE_WQE_RESERVED_MASK 0x1 +#define FCOE_WQE_RESERVED_SHIFT 7 +#define FCOE_WQE_NUM_SGES_MASK 0xF +#define FCOE_WQE_NUM_SGES_SHIFT 8 +#define FCOE_WQE_RESERVED1_MASK 0xF +#define FCOE_WQE_RESERVED1_SHIFT 12 union fcoe_additional_info_union additional_info_union; }; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 4c5747babcf63ff32b8db664146df40545d986d2..69949f8e354b0447c7950884bd205622a4653ab2 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -39,17 +39,9 @@ /* iSCSI HSI constants */ #define ISCSI_DEFAULT_MTU (1500) -/* Current iSCSI HSI version number composed of two fields (16 bit) */ -#define ISCSI_HSI_MAJOR_VERSION (0) -#define ISCSI_HSI_MINOR_VERSION (0) - /* KWQ (kernel work queue) layer codes */ #define ISCSI_SLOW_PATH_LAYER_CODE (6) -/* CQE completion status */ -#define ISCSI_EQE_COMPLETION_SUCCESS (0x0) -#define ISCSI_EQE_RST_CONN_RCVD (0x1) - /* iSCSI parameter defaults */ #define ISCSI_DEFAULT_HEADER_DIGEST (0) #define ISCSI_DEFAULT_DATA_DIGEST (0) @@ -68,6 +60,10 @@ #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) +#define ISCSI_AHS_CNTL_SIZE 4 + +#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) + /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) @@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr { __le32 reserved7; }; -struct iscsi_sge { - struct regpair sge_addr; - __le16 sge_len; - __le16 reserved0; - __le32 reserved1; -}; - -struct iscsi_cached_sge_ctx { - struct iscsi_sge sge; - struct regpair reserved; - __le32 dsgl_curr_offset[2]; -}; - struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; @@ -229,8 +212,13 @@ struct iscsi_common_hdr { #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 - __le32 lun_reserved[4]; - __le32 data[6]; + struct regpair lun_reserved; + __le32 itt; + __le32 ttt; + __le32 cmdstat_sn; + __le32 exp_statcmd_sn; + __le32 max_cmd_sn; + __le32 data[3]; }; struct iscsi_conn_offload_params { @@ -246,8 +234,10 @@ struct iscsi_conn_offload_params { #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 u8 pbl_page_size_log; u8 pbe_page_size_log; u8 default_cq; @@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params { #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 u8 reserved0[3]; __le32 max_seq_size; __le32 max_send_pdu_length; @@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr { __le32 expected_transfer_length; __le32 cmd_sn; __le32 exp_stat_sn; - struct iscsi_sge cdb_sge; + struct scsi_sge cdb_sge; }; struct iscsi_login_req_hdr { @@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr { __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4; - __le16 time2retain; - __le16 time2wait; + __le16 time_2_retain; + __le16 time_2_wait; __le32 reserved5[1]; }; @@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr { #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; - __le32 rtt; + __le32 reserved1; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; @@ -641,7 +635,7 @@ struct iscsi_reject_hdr { #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; - __le32 reserved1; + __le32 all_ones; __le32 reserved2; __le32 stat_sn; __le32 exp_cmd_sn; @@ -688,7 +682,9 @@ struct iscsi_cqe_solicited { __le16 itid; u8 task_type; u8 fw_dbg_field; - __le32 reserved1[2]; + u8 caused_conn_err; + u8 reserved0[3]; + __le32 reserved1[1]; union iscsi_task_hdr iscsi_hdr; }; @@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type { MAX_ISCSI_CQE_UNSOLICITED_TYPE }; -struct iscsi_virt_sgl_ctx { - struct regpair sgl_base; - struct regpair dsgl_base; - __le32 sgl_initial_offset; - __le32 dsgl_initial_offset; - __le32 dsgl_curr_offset[2]; -}; - -struct iscsi_sgl_var_params { - u8 sgl_ptr; - u8 dsgl_ptr; - __le16 sge_offset; - __le16 dsge_offset; -}; - -struct iscsi_phys_sgl_ctx { - struct regpair sgl_base; - struct regpair dsgl_base; - u8 sgl_size; - u8 dsgl_size; - __le16 reserved; - struct iscsi_sgl_var_params var_params[2]; -}; - -union iscsi_data_desc_ctx { - struct iscsi_virt_sgl_ctx virt_sgl; - struct iscsi_phys_sgl_ctx phys_sgl; - struct iscsi_cached_sge_ctx cached_sge; -}; struct iscsi_debug_modes { u8 flags; @@ -771,8 +738,10 @@ struct iscsi_debug_modes { #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3 -#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 }; struct iscsi_dif_flags { @@ -806,7 +775,6 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, ISCSI_EVENT_TYPE_TCP_CONN_ERROR, - ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES, MAX_ISCSI_EQE_OPCODE }; @@ -856,31 +824,11 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, + ISCSI_CONN_ERROR_INVALID_ITT, ISCSI_ERROR_UNKNOWN, MAX_ISCSI_ERROR_TYPES }; -struct iscsi_mflags { - u8 mflags; -#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1 -#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0 -#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1 -#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1 -#define ISCSI_MFLAGS_RESERVED_MASK 0x3F -#define ISCSI_MFLAGS_RESERVED_SHIFT 2 -}; - -struct iscsi_sgl { - struct regpair sgl_addr; - __le16 updated_sge_size; - __le16 updated_sge_offset; - __le32 byte_offset; -}; - -union iscsi_mstorm_sgl { - struct iscsi_sgl sgl_struct; - struct iscsi_sge single_sge; -}; enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, @@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id { struct iscsi_reg1 { __le32 reg1_map; -#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7 -#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0 -#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF -#define ISCSI_REG1_RESERVED1_SHIFT 3 +#define ISCSI_REG1_NUM_SGES_MASK 0xF +#define ISCSI_REG1_NUM_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 4 }; union iscsi_seq_num { @@ -967,22 +915,33 @@ struct iscsi_spe_func_init { }; struct ystorm_iscsi_task_state { - union iscsi_data_desc_ctx sgl_ctx_union; - __le32 buffer_offset[2]; - __le16 bytes_nxt_dif; - __le16 rxmit_bytes_nxt_dif; - union iscsi_seq_num seq_num_union; - u8 dif_bytes_leftover; - u8 rxmit_dif_bytes_leftover; - __le16 reuse_count; - struct iscsi_dif_flags dif_flags; - u8 local_comp; + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; __le32 exp_r2t_sn; - __le32 sgl_offset[2]; + __le32 buffer_offset; + union iscsi_seq_num seq_num; + struct iscsi_dif_flags dif_flags; + u8 flags; +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 +}; + +struct ystorm_iscsi_task_rxmit_opt { + __le32 fast_rxmit_sge_offset; + __le32 scan_start_buffer_offset; + __le32 fast_rxmit_buffer_offset; + u8 scan_start_sgl_index; + u8 fast_rxmit_sgl_index; + __le16 reserved; }; struct ystorm_iscsi_task_st_ctx { struct ystorm_iscsi_task_state state; + struct ystorm_iscsi_task_rxmit_opt rxmit_opt; union iscsi_task_hdr pdu_hdr; }; @@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx { }; struct mstorm_iscsi_task_st_ctx { - union iscsi_mstorm_sgl sgl_union; - struct iscsi_dif_flags dif_flags; - struct iscsi_mflags flags; - u8 sgl_size; - u8 host_sge_index; - __le16 dix_cur_sge_offset; - __le16 dix_cur_sge_size; - __le32 data_offset_rtid; - u8 dif_offset; - u8 dix_sgl_size; - u8 dix_sge_index; + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; u8 task_type; + struct iscsi_dif_flags dif_flags; + u8 reserved0[2]; struct regpair sense_db; - struct regpair dix_sgl_cur_sge; - __le32 rem_task_size; - __le16 reuse_count; - __le16 dif_data_residue; - u8 reserved0[4]; - __le32 reserved1[1]; + __le32 expected_itt; + __le32 reserved1; }; struct ustorm_iscsi_task_st_ctx { @@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx { #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 - u8 reserved2; + struct iscsi_dif_flags dif_flags; __le16 reserved3; __le32 reserved4; __le32 reserved5; @@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx { #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 -#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4 -#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 @@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx { struct iscsi_task_context { struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2]; struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; struct regpair ystorm_ag_padding[2]; struct tdif_task_context tdif_context; @@ -1272,32 +1221,22 @@ struct iscsi_uhqe { #define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 }; -struct iscsi_wqe_field { - __le32 contlen_cdbsize_field; -#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF -#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0 -#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF -#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24 -}; - -union iscsi_wqe_field_union { - struct iscsi_wqe_field cont_field; - __le32 prev_tid; -}; struct iscsi_wqe { __le16 task_id; u8 flags; #define ISCSI_WQE_WQE_TYPE_MASK 0x7 #define ISCSI_WQE_WQE_TYPE_SHIFT 0 -#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7 -#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3 -#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1 -#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6 +#define ISCSI_WQE_NUM_SGES_MASK 0xF +#define ISCSI_WQE_NUM_SGES_SHIFT 3 #define ISCSI_WQE_RESPONSE_MASK 0x1 #define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; - union iscsi_wqe_field_union cont_prevtid_union; + __le32 contlen_cdbsize; +#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_CDB_SIZE_SHIFT 24 }; enum iscsi_wqe_type { @@ -1318,17 +1257,15 @@ struct iscsi_xhqe { u8 total_ahs_length; u8 opcode; u8 flags; -#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7 -#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0 -#define ISCSI_XHQE_FINAL_MASK 0x1 -#define ISCSI_XHQE_FINAL_SHIFT 3 -#define ISCSI_XHQE_SUPER_IO_MASK 0x1 -#define ISCSI_XHQE_SUPER_IO_SHIFT 4 -#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 -#define ISCSI_XHQE_STATUS_BIT_SHIFT 5 -#define ISCSI_XHQE_RESERVED_MASK 0x3 -#define ISCSI_XHQE_RESERVED_SHIFT 6 - union iscsi_seq_num seq_num_union; +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 0 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 +#define ISCSI_XHQE_NUM_SGES_MASK 0xF +#define ISCSI_XHQE_NUM_SGES_SHIFT 2 +#define ISCSI_XHQE_RESERVED0_MASK 0x3 +#define ISCSI_XHQE_RESERVED0_SHIFT 6 + union iscsi_seq_num seq_num; __le16 reserved1; }; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 4cd1f0ccfa367a5215dd121ca22efa6267e7c51e..d66d16a559e19e37516abe08939fa9e0a712aa60 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -158,15 +158,27 @@ struct qed_tunn_params { struct qed_eth_cb_ops { struct qed_common_cb_ops common; void (*force_mac) (void *dev, u8 *mac, bool forced); + void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port); }; #define QED_MAX_PHC_DRIFT_PPB 291666666 enum qed_ptp_filter_type { - QED_PTP_FILTER_L2, - QED_PTP_FILTER_IPV4, - QED_PTP_FILTER_IPV4_IPV6, - QED_PTP_FILTER_L2_IPV4_IPV6 + QED_PTP_FILTER_NONE, + QED_PTP_FILTER_ALL, + QED_PTP_FILTER_V1_L4_EVENT, + QED_PTP_FILTER_V1_L4_GEN, + QED_PTP_FILTER_V2_L4_EVENT, + QED_PTP_FILTER_V2_L4_GEN, + QED_PTP_FILTER_V2_L2_EVENT, + QED_PTP_FILTER_V2_L2_GEN, + QED_PTP_FILTER_V2_EVENT, + QED_PTP_FILTER_V2_GEN +}; + +enum qed_ptp_hwtstamp_tx_type { + QED_PTP_HWTSTAMP_TX_OFF, + QED_PTP_HWTSTAMP_TX_ON, }; #ifdef CONFIG_DCB @@ -229,8 +241,8 @@ struct qed_eth_dcbnl_ops { #endif struct qed_eth_ptp_ops { - int (*hwtstamp_tx_on)(struct qed_dev *); - int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type); + int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type, + enum qed_ptp_hwtstamp_tx_type); int (*read_rx_ts)(struct qed_dev *, u64 *); int (*read_tx_ts)(struct qed_dev *, u64 *); int (*read_cc)(struct qed_dev *, u64 *); @@ -301,6 +313,14 @@ struct qed_eth_ops { int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); + + int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, + dma_addr_t mapping, u16 length, + u16 vport_id, u16 rx_queue_id, + bool add_filter); + + int (*configure_arfs_searcher)(struct qed_dev *cdev, + bool en_searcher); }; const struct qed_eth_ops *qed_get_eth_ops(void); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index fde56c436f7177e27173656f4fa4480752351c6d..5544d7b2f2bb257387e6f1eb9d5907e9ce89c08b 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -144,6 +144,7 @@ struct qed_dcbx_operational_params { bool enabled; bool ieee; bool cee; + bool local; u32 err; }; @@ -178,6 +179,12 @@ struct qed_eth_pf_params { * to update_pf_params routine invoked before slowpath start */ u16 num_cons; + + /* To enable arfs, previous to HW-init a positive number needs to be + * set [as filters require allocated searcher ILT memory]. + * This will set the maximal number of configured steering-filters. + */ + u32 num_arfs_filters; }; struct qed_fcoe_pf_params { @@ -263,7 +270,6 @@ struct qed_rdma_pf_params { * the doorbell BAR). */ u32 min_dpis; /* number of requested DPIs */ - u32 num_mrs; /* number of requested memory regions */ u32 num_qps; /* number of requested Queue Pairs */ u32 num_srqs; /* number of requested SRQ */ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ @@ -300,6 +306,11 @@ struct qed_sb_info { struct qed_dev *cdev; }; +enum qed_dev_type { + QED_DEV_TYPE_BB, + QED_DEV_TYPE_AH, +}; + struct qed_dev_info { unsigned long pci_mem_start; unsigned long pci_mem_end; @@ -325,6 +336,13 @@ struct qed_dev_info { u16 mtu; bool wol_support; + + enum qed_dev_type dev_type; + + /* Output parameters for qede */ + bool vxlan_enable; + bool gre_enable; + bool geneve_enable; }; enum qed_sb_type { @@ -421,6 +439,7 @@ struct qed_int_info { }; struct qed_common_cb_ops { + void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, struct qed_link_output *link); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); @@ -752,7 +771,7 @@ enum qed_mf_mode { QED_MF_NPAR, }; -struct qed_eth_stats { +struct qed_eth_stats_common { u64 no_buff_discards; u64 packet_too_big_discard; u64 ttl0_discard; @@ -784,11 +803,6 @@ struct qed_eth_stats { u64 rx_256_to_511_byte_packets; u64 rx_512_to_1023_byte_packets; u64 rx_1024_to_1518_byte_packets; - u64 rx_1519_to_1522_byte_packets; - u64 rx_1519_to_2047_byte_packets; - u64 rx_2048_to_4095_byte_packets; - u64 rx_4096_to_9216_byte_packets; - u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -805,14 +819,8 @@ struct qed_eth_stats { u64 tx_256_to_511_byte_packets; u64 tx_512_to_1023_byte_packets; u64 tx_1024_to_1518_byte_packets; - u64 tx_1519_to_2047_byte_packets; - u64 tx_2048_to_4095_byte_packets; - u64 tx_4096_to_9216_byte_packets; - u64 tx_9217_to_16383_byte_packets; u64 tx_pause_frames; u64 tx_pfc_frames; - u64 tx_lpi_entry_count; - u64 tx_total_collisions; u64 brb_truncates; u64 brb_discards; u64 rx_mac_bytes; @@ -827,6 +835,34 @@ struct qed_eth_stats { u64 tx_mac_ctrl_frames; }; +struct qed_eth_stats_bb { + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; +}; + +struct qed_eth_stats_ah { + u64 rx_1519_to_max_byte_packets; + u64 tx_1519_to_max_byte_packets; +}; + +struct qed_eth_stats { + struct qed_eth_stats_common common; + + union { + struct qed_eth_stats_bb bb; + struct qed_eth_stats_ah ah; + }; +}; + #define QED_SB_IDX 0x0002 #define RX_PI 0 diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index f70bb81b8b6acfda1701ffcd073163db05a82770..3414649133d2e44fe36b9c12e12f42ba6970ed9d 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -67,6 +67,8 @@ struct qed_dev_iscsi_info { void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; + + u8 num_cqs; }; struct qed_iscsi_id_params { diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h index f742d4312c9d96f1498554edee09654e065e9e74..cbb2ff0ce4bc34c5179a5ad77167ac0b721f6a30 100644 --- a/include/linux/qed/qed_roce_if.h +++ b/include/linux/qed/qed_roce_if.h @@ -240,6 +240,7 @@ struct qed_rdma_add_user_out_params { u64 dpi_addr; u64 dpi_phys_addr; u32 dpi_size; + u16 wid_count; }; enum roce_mode { @@ -533,6 +534,7 @@ enum qed_rdma_type { struct qed_dev_rdma_info { struct qed_dev_info common; enum qed_rdma_type rdma_type; + u8 user_dpm_enabled; }; struct qed_rdma_ops { diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index f773aa5e746ff47bb886aa19f568a24108dd0d1e..72c770f9f6669a5169f1780f8cae524bc6c3e0b4 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -52,7 +52,8 @@ #define RDMA_MAX_PDS (64 * 1024) #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index bad02df213dfccd11cd25fa1b1e0decaf17f92c6..866f063026dedc6540d87d595bcacf9071f14681 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -38,4 +38,21 @@ #define ROCE_MAX_QPS (32 * 1024) +enum roce_async_events_type { + ROCE_ASYNC_EVENT_NONE = 0, + ROCE_ASYNC_EVENT_COMM_EST = 1, + ROCE_ASYNC_EVENT_SQ_DRAINED, + ROCE_ASYNC_EVENT_SRQ_LIMIT, + ROCE_ASYNC_EVENT_LAST_WQE_REACHED, + ROCE_ASYNC_EVENT_CQ_ERR, + ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, + ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, + ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, + ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, + ROCE_ASYNC_EVENT_SRQ_EMPTY, + ROCE_ASYNC_EVENT_DESTROY_QP_DONE, + MAX_ROCE_ASYNC_EVENTS_TYPE +}; + #endif /* __ROCE_COMMON__ */ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 03f3e37ab059d5e4b48aa2b7f80366016b27fdf5..08df82a096b62de80e51f34122f4809f0eb6d27e 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -40,6 +40,8 @@ #define BDQ_ID_IMM_DATA (1) #define BDQ_NUM_IDS (2) +#define SCSI_NUM_SGES_SLOW_SGL_THR 8 + #define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) struct scsi_bd { @@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data { __le16 reserved0[3]; }; +struct scsi_sge { + struct regpair sge_addr; + __le32 sge_len; + __le32 reserved; +}; + +struct scsi_cached_sges { + struct scsi_sge sge[4]; +}; + struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; @@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; -struct scsi_sge { - struct regpair sge_addr; - __le16 sge_len; - __le16 reserved0; - __le32 reserved1; +enum scsi_sgl_mode { + SCSI_TX_SLOW_SGL, + SCSI_FAST_SGL, + MAX_SCSI_SGL_MODE +}; + +struct scsi_sgl_params { + struct regpair sgl_addr; + __le32 sgl_total_length; + __le32 sge_offset; + __le16 sgl_num_sges; + u8 sgl_index; + u8 reserved; }; struct scsi_terminate_extra_params { diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 46fe7856f1b22c828474257ceedf03c182958ec5..a5e843268f0e9431eacd07ad5cc74e10be690b0d 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -173,6 +173,7 @@ enum tcp_seg_placement_event { TCP_EVENT_ADD_ISLE_RIGHT, TCP_EVENT_ADD_ISLE_LEFT, TCP_EVENT_JOIN, + TCP_EVENT_DELETE_ISLES, TCP_EVENT_NOP, MAX_TCP_SEG_PLACEMENT_EVENT }; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 092292b6675e2cf08b1138410a8488bc87986495..7d56a7ea2b2e8cdcf471da6aaa6ac0be3c51acaa 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -49,6 +49,21 @@ /* Base bits plus 1 bit for nulls marker */ #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) +/* Maximum chain length before rehash + * + * The maximum (not average) chain length grows with the size of the hash + * table, at a rate of (log N)/(log log N). + * + * The value of 16 is selected so that even if the hash table grew to + * 2^32 you would not expect the maximum chain length to exceed it + * unless we are under attack (or extremely unlucky). + * + * As this limit is only to detect attacks, we don't need to set it to a + * lower value as you'd need the chain length to vastly exceed 16 to have + * any real effect on the system. + */ +#define RHT_ELASTICITY 16u + struct rhash_head { struct rhash_head __rcu *next; }; @@ -110,29 +125,25 @@ struct rhashtable; * @key_len: Length of key * @key_offset: Offset of key in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed - * @insecure_max_entries: Maximum number of entries (may be exceeded) * @max_size: Maximum size while expanding * @min_size: Minimum size while shrinking - * @nulls_base: Base value to generate nulls marker - * @insecure_elasticity: Set to true to disable chain length checks - * @automatic_shrinking: Enable automatic shrinking of tables * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) + * @automatic_shrinking: Enable automatic shrinking of tables + * @nulls_base: Base value to generate nulls marker * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @obj_hashfn: Function to hash object * @obj_cmpfn: Function to compare key with object */ struct rhashtable_params { - size_t nelem_hint; - size_t key_len; - size_t key_offset; - size_t head_offset; - unsigned int insecure_max_entries; + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; unsigned int max_size; - unsigned int min_size; - u32 nulls_base; - bool insecure_elasticity; + u16 min_size; bool automatic_shrinking; - size_t locks_mul; + u8 locks_mul; + u32 nulls_base; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; rht_obj_cmpfn_t obj_cmpfn; @@ -143,8 +154,8 @@ struct rhashtable_params { * @tbl: Bucket table * @nelems: Number of elements in table * @key_len: Key length for hashfn - * @elasticity: Maximum chain length before rehash * @p: Configuration parameters + * @max_elems: Maximum number of elements in table * @rhlist: True if this is an rhltable * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping @@ -154,8 +165,8 @@ struct rhashtable { struct bucket_table __rcu *tbl; atomic_t nelems; unsigned int key_len; - unsigned int elasticity; struct rhashtable_params p; + unsigned int max_elems; bool rhlist; struct work_struct run_work; struct mutex mutex; @@ -318,8 +329,7 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht, static inline bool rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) { - return ht->p.insecure_max_entries && - atomic_read(&ht->nelems) >= ht->p.insecure_max_entries; + return atomic_read(&ht->nelems) >= ht->max_elems; } /* The bucket lock is selected based on the hash and protects mutations @@ -726,7 +736,7 @@ static inline void *__rhashtable_insert_fast( return rhashtable_insert_slow(ht, key, obj); } - elasticity = ht->elasticity; + elasticity = RHT_ELASTICITY; pprev = rht_bucket_insert(ht, tbl, hash); data = ERR_PTR(-ENOMEM); if (!pprev) @@ -915,6 +925,28 @@ static inline int rhashtable_lookup_insert_fast( return ret == NULL ? 0 : -EEXIST; } +/** + * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Just like rhashtable_lookup_insert_fast(), but this function returns the + * object if it exists, NULL if it did not and the insertion was successful, + * and an ERR_PTR otherwise. + */ +static inline void *rhashtable_lookup_get_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + BUG_ON(ht->p.obj_hashfn); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, + false); +} + /** * rhashtable_lookup_insert_key - search and insert object to hash table * with explicit key diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h index 8ec8b6439b25edb956bb06bcf116c190a46ce62c..f27917e0a10114f9c72e228c42b6f60efd51b541 100644 --- a/include/linux/rpmsg/qcom_smd.h +++ b/include/linux/rpmsg/qcom_smd.h @@ -6,7 +6,7 @@ struct qcom_smd_edge; -#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD) +#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, struct device_node *node); diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 9519da6253a83bac113219c41ecafda95be3a029..37395b8eb8f1e39fb8166c6d215d6abe8e056690 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -15,6 +15,8 @@ #include #include +#include +#include struct serdev_controller; struct serdev_device; @@ -81,6 +83,9 @@ struct serdev_controller_ops { void (*close)(struct serdev_controller *); void (*set_flow_control)(struct serdev_controller *, bool); unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); + void (*wait_until_sent)(struct serdev_controller *, long); + int (*get_tiocm)(struct serdev_controller *); + int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); }; /** @@ -186,6 +191,9 @@ int serdev_device_open(struct serdev_device *); void serdev_device_close(struct serdev_device *); unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); void serdev_device_set_flow_control(struct serdev_device *, bool); +void serdev_device_wait_until_sent(struct serdev_device *, long); +int serdev_device_get_tiocm(struct serdev_device *); +int serdev_device_set_tiocm(struct serdev_device *, int, int); int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); void serdev_device_write_flush(struct serdev_device *); int serdev_device_write_room(struct serdev_device *); @@ -223,6 +231,15 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev return 0; } static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} +static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} +static inline int serdev_device_get_tiocm(struct serdev_device *serdev) +{ + return -ENOTSUPP; +} +static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) +{ + return -ENOTSUPP; +} static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count) { return -ENODEV; @@ -238,6 +255,36 @@ static inline int serdev_device_write_room(struct serdev_device *sdev) #endif /* CONFIG_SERIAL_DEV_BUS */ +static inline bool serdev_device_get_cts(struct serdev_device *serdev) +{ + int status = serdev_device_get_tiocm(serdev); + return !!(status & TIOCM_CTS); +} + +static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms) +{ + unsigned long timeout; + bool signal; + + timeout = jiffies + msecs_to_jiffies(timeout_ms); + while (time_is_after_jiffies(timeout)) { + signal = serdev_device_get_cts(serdev); + if (signal == state) + return 0; + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable) +{ + if (enable) + return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0); + else + return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); +} + /* * serdev hooks into TTY core */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 53383bce27f126e88c607e6fce901f5f6a241070..a098d95b3d84d0246adc88651af9978d6810fb01 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -413,14 +413,15 @@ struct ubuf_info { * the end of the header data, ie. at skb->end. */ struct skb_shared_info { + unsigned short _unused; unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; - unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; u32 tskey; __be32 ip6_frag_id; @@ -491,6 +492,8 @@ enum { SKB_GSO_TUNNEL_REMCSUM = 1 << 14, SKB_GSO_SCTP = 1 << 15, + + SKB_GSO_ESP = 1 << 16, }; #if BITS_PER_LONG > 32 diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h deleted file mode 100644 index f148e0ffbec75f443af5bdb23e8ca318b5743786..0000000000000000000000000000000000000000 --- a/include/linux/soc/qcom/smd.h +++ /dev/null @@ -1,139 +0,0 @@ -#ifndef __QCOM_SMD_H__ -#define __QCOM_SMD_H__ - -#include -#include - -struct qcom_smd; -struct qcom_smd_channel; -struct qcom_smd_lookup; - -/** - * struct qcom_smd_id - struct used for matching a smd device - * @name: name of the channel - */ -struct qcom_smd_id { - char name[20]; -}; - -/** - * struct qcom_smd_device - smd device struct - * @dev: the device struct - * @channel: handle to the smd channel for this device - */ -struct qcom_smd_device { - struct device dev; - struct qcom_smd_channel *channel; -}; - -typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); - -/** - * struct qcom_smd_driver - smd driver struct - * @driver: underlying device driver - * @smd_match_table: static channel match table - * @probe: invoked when the smd channel is found - * @remove: invoked when the smd channel is closed - * @callback: invoked when an inbound message is received on the channel, - * should return 0 on success or -EBUSY if the data cannot be - * consumed at this time - */ -struct qcom_smd_driver { - struct device_driver driver; - const struct qcom_smd_id *smd_match_table; - - int (*probe)(struct qcom_smd_device *dev); - void (*remove)(struct qcom_smd_device *dev); - qcom_smd_cb_t callback; -}; - -#if IS_ENABLED(CONFIG_QCOM_SMD) - -int qcom_smd_driver_register(struct qcom_smd_driver *drv); -void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); - -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, - const char *name, - qcom_smd_cb_t cb); -void qcom_smd_close_channel(struct qcom_smd_channel *channel); -void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); -void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); - - -struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, - struct device_node *node); -int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); - -#else - -static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) -{ - return -ENXIO; -} - -static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline struct qcom_smd_channel * -qcom_smd_open_channel(struct qcom_smd_channel *channel, - const char *name, - qcom_smd_cb_t cb) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return NULL; -} - -static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return NULL; -} - -static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline int qcom_smd_send(struct qcom_smd_channel *channel, - const void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; -} - -static inline struct qcom_smd_edge * -qcom_smd_register_edge(struct device *parent, - struct device_node *node) -{ - return ERR_PTR(-ENXIO); -} - -static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; -} - -#endif - -#define module_qcom_smd_driver(__smd_driver) \ - module_driver(__smd_driver, qcom_smd_driver_register, \ - qcom_smd_driver_unregister) - - -#endif diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h index eab64976a73b0e1aa2c15de6a06ae65e1333de99..a4dd4d7c711dc000fc3f1fe93a794dbff245b418 100644 --- a/include/linux/soc/qcom/wcnss_ctrl.h +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -1,16 +1,19 @@ #ifndef __WCNSS_CTRL_H__ #define __WCNSS_CTRL_H__ -#include +#include #if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) -struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, + rpmsg_rx_cb_t cb, void *priv); #else -static inline struct qcom_smd_channel* -qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, + const char *name, + rpmsg_rx_cb_t cb, + void *priv) { WARN_ON(1); return ERR_PTR(-ENXIO); diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index a0596ca0e80ac77aeb0afa29648532ef51a5deae..a2f8109bb215751427e99b3c026badfe7113b875 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -24,6 +24,7 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); +u64 sock_gen_cookie(struct sock *sk); int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index fc273e9d5f67625b9ddf611746d1a09ff555e4ab..3921cb9dfadb9b4a9bbe30854ccada75725b8005 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -28,6 +28,9 @@ #include +#define MTL_MAX_RX_QUEUES 8 +#define MTL_MAX_TX_QUEUES 8 + #define STMMAC_RX_COE_NONE 0 #define STMMAC_RX_COE_TYPE1 1 #define STMMAC_RX_COE_TYPE2 2 @@ -44,6 +47,18 @@ #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ +/* MTL algorithms identifiers */ +#define MTL_TX_ALGORITHM_WRR 0x0 +#define MTL_TX_ALGORITHM_WFQ 0x1 +#define MTL_TX_ALGORITHM_DWRR 0x2 +#define MTL_TX_ALGORITHM_SP 0x3 +#define MTL_RX_ALGORITHM_SP 0x4 +#define MTL_RX_ALGORITHM_WSP 0x5 + +/* RX/TX Queue Mode */ +#define MTL_QUEUE_AVB 0x0 +#define MTL_QUEUE_DCB 0x1 + /* The MDC clock could be set higher than the IEEE 802.3 * specified frequency limit 0f 2.5 MHz, by programming a clock divider * of value different than the above defined values. The resultant MDIO @@ -109,6 +124,26 @@ struct stmmac_axi { bool axi_rb; }; +struct stmmac_rxq_cfg { + u8 mode_to_use; + u8 chan; + u8 pkt_route; + bool use_prio; + u32 prio; +}; + +struct stmmac_txq_cfg { + u8 weight; + u8 mode_to_use; + /* Credit Base Shaper parameters */ + u32 send_slope; + u32 idle_slope; + u32 high_credit; + u32 low_credit; + bool use_prio; + u32 prio; +}; + struct plat_stmmacenet_data { int bus_id; int phy_addr; @@ -133,6 +168,12 @@ struct plat_stmmacenet_data { int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; + u8 rx_queues_to_use; + u8 tx_queues_to_use; + u8 rx_sched_algorithm; + u8 tx_sched_algorithm; + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; void (*fix_mac_speed)(void *priv, unsigned int speed); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index cfc2d9506ce8077af1ec92eb7086fd52ce4fe1ac..b6d5adcee8fcb611de202993623cc80274d262e4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -233,12 +233,14 @@ struct tcp_sock { u8 syn_data:1, /* SYN includes data */ syn_fastopen:1, /* SYN includes Fast Open option */ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ + syn_fastopen_ch:1, /* Active TFO re-enabling probe */ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ /* RTT measurement */ + struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ @@ -331,16 +333,16 @@ struct tcp_sock { /* Receiver side RTT estimation */ struct { - u32 rtt; - u32 seq; - u32 time; + u32 rtt_us; + u32 seq; + struct skb_mstamp time; } rcv_rtt_est; /* Receiver queue space */ struct { - int space; - u32 seq; - u32 time; + int space; + u32 seq; + struct skb_mstamp time; } rcvq_space; /* TCP-specific MTU probe information. */ diff --git a/include/linux/udp.h b/include/linux/udp.h index c0f530809d1f3db7323e51a52224eb49d8f97da0..6cb4061a720d2df5e5f9467de8269529195ce827 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -115,6 +115,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) -#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) +#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 6e0ce8c7b8cb5a9fcb985a5a5078f82267d03092..7dffa5624ea62bee992e547c44ed4a86a577b0a7 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -64,6 +64,8 @@ struct usbnet { struct usb_anchor deferred; struct tasklet_struct bh; + struct pcpu_sw_netstats __percpu *stats64; + struct work_struct kevent; unsigned long flags; # define EVENT_TX_HALT 0 @@ -261,10 +263,10 @@ extern void usbnet_pause_rx(struct usbnet *); extern void usbnet_resume_rx(struct usbnet *); extern void usbnet_purge_paused_rxq(struct usbnet *); -extern int usbnet_get_settings(struct net_device *net, - struct ethtool_cmd *cmd); -extern int usbnet_set_settings(struct net_device *net, - struct ethtool_cmd *cmd); +extern int usbnet_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *cmd); +extern int usbnet_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *cmd); extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); @@ -278,5 +280,7 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags); extern void usbnet_status_stop(struct usbnet *dev); extern void usbnet_update_max_qlen(struct usbnet *dev); +extern void usbnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); #endif /* __LINUX_USB_USBNET_H */ diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 584f9a647ad4acca191ff6116a47c14da1385fa3..ab13f0743da8ed095edb3be854f5c4430e828728 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -153,5 +153,6 @@ void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); +void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt); #endif /* _LINUX_VIRTIO_VSOCK_H */ diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index 5ab4c9901ccc12979e32d4788c35050717a00d7f..a71378007e61dc94e7f43bf671b275c291376db8 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -198,6 +198,21 @@ static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr, ipaddr->s6_addr[8] ^= 0x02; } +static inline void lowpan_iphc_uncompress_eui48_lladdr(struct in6_addr *ipaddr, + const void *lladdr) +{ + /* fe:80::XXXX:XXff:feXX:XXXX + * \_________________/ + * hwaddr + */ + ipaddr->s6_addr[0] = 0xFE; + ipaddr->s6_addr[1] = 0x80; + memcpy(&ipaddr->s6_addr[8], lladdr, 3); + ipaddr->s6_addr[11] = 0xFF; + ipaddr->s6_addr[12] = 0xFE; + memcpy(&ipaddr->s6_addr[13], lladdr + 3, 3); +} + #ifdef DEBUG /* print data in line */ static inline void raw_dump_inline(const char *caller, char *msg, diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 17c6fd84e287808eca08503b87a56a5d6fc0cc5c..1aeb25dd42a7d07282308f1995888ecdf01f99b8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -262,8 +262,8 @@ int register_inet6addr_notifier(struct notifier_block *nb); int unregister_inet6addr_notifier(struct notifier_block *nb); int inet6addr_notifier_call_chain(unsigned long val, void *v); -void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, - struct ipv6_devconf *devconf); +void inet6_netconf_notify_devconf(struct net *net, int event, int type, + int ifindex, struct ipv6_devconf *devconf); /** * __in6_dev_get - get inet6_dev pointer from netdevice diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1061a472a3e35b88a7df587fdeefcdb310c793bc..b5f5187f488cc1e663912ec1e12811d49ecb3fd5 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -39,7 +39,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t); int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, void *, size_t, size_t *, bool, u32 *); -void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, +bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, const char *); void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f32ed9ac181a47c00757596fc3b8c5733426c468..f9fb566e75cfd8ca1531dd733d443f9a6c929c62 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -188,4 +188,17 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, void vsock_remove_sock(struct vsock_sock *vsk); void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); +/**** TAP ****/ + +struct vsock_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +int vsock_init_tap(void); +int vsock_add_tap(struct vsock_tap *vt); +int vsock_remove_tap(struct vsock_tap *vt); +void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque); + #endif /* __AF_VSOCK_H__ */ diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 5ee3c689c86370b4b325309d337611c9b51af5d1..0697fd41308777a4801dcf8728f2ff5aa0210804 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -282,7 +282,7 @@ struct l2cap_conn_rsp { #define L2CAP_CR_BAD_KEY_SIZE 0x0007 #define L2CAP_CR_ENCRYPTION 0x0008 #define L2CAP_CR_INVALID_SCID 0x0009 -#define L2CAP_CR_SCID_IN_USE 0x0010 +#define L2CAP_CR_SCID_IN_USE 0x000A /* connect/create channel status */ #define L2CAP_CS_NO_INFO 0x0000 diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 4190af53a46ad1a5f7868f4add65363e5ada9b20..da4acefe39c81abbc11af58c88eb283159d1de26 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -21,6 +21,8 @@ SOFTWARE IS DISCLAIMED. */ +#include + #ifndef __RFCOMM_H #define __RFCOMM_H @@ -174,7 +176,7 @@ struct rfcomm_dlc { struct mutex lock; unsigned long state; unsigned long flags; - atomic_t refcnt; + refcount_t refcnt; u8 dlci; u8 addr; u8 priority; @@ -247,12 +249,12 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel); static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d) { - atomic_inc(&d->refcnt); + refcount_inc(&d->refcnt); } static inline void rfcomm_dlc_put(struct rfcomm_dlc *d) { - if (atomic_dec_and_test(&d->refcnt)) + if (refcount_dec_and_test(&d->refcnt)) rfcomm_dlc_free(d); } diff --git a/include/net/bonding.h b/include/net/bonding.h index 3c857778a6ca6870f7e7d5604adcd263380e4708..b00508d22e0a6adc37ee4e69187377fea3bc102d 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -153,7 +153,8 @@ struct slave { unsigned long last_link_up; unsigned long last_rx; unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; - s8 link; /* one of BOND_LINK_XXXX */ + s8 link; /* one of BOND_LINK_XXXX */ + s8 link_new_state; /* one of BOND_LINK_XXXX */ s8 new_link; u8 backup:1, /* indicates backup slave. Value corresponds with BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ @@ -165,7 +166,7 @@ struct slave { u32 link_failure_count; u32 speed; u16 queue_id; - u8 perm_hwaddr[ETH_ALEN]; + u8 perm_hwaddr[MAX_ADDR_LEN]; struct ad_slave_info *ad_info; struct tlb_slave_info tlb_info; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -401,6 +402,16 @@ static inline bool bond_slave_can_tx(struct slave *slave) bond_is_active_slave(slave); } +static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) +{ + if (len == ETH_ALEN) { + ether_addr_copy(dst, src); + return; + } + + memcpy(dst, src, len); +} + #define BOND_PRI_RESELECT_ALWAYS 0 #define BOND_PRI_RESELECT_BETTER 1 #define BOND_PRI_RESELECT_FAILURE 2 @@ -504,13 +515,17 @@ static inline bool bond_is_slave_inactive(struct slave *slave) return slave->inactive; } -static inline void bond_set_slave_link_state(struct slave *slave, int state, - bool notify) +static inline void bond_propose_link_state(struct slave *slave, int state) { - if (slave->link == state) + slave->link_new_state = state; +} + +static inline void bond_commit_link_state(struct slave *slave, bool notify) +{ + if (slave->link == slave->link_new_state) return; - slave->link = state; + slave->link = slave->link_new_state; if (notify) { bond_queue_slave_event(slave); bond_lower_state_changed(slave); @@ -523,6 +538,13 @@ static inline void bond_set_slave_link_state(struct slave *slave, int state, } } +static inline void bond_set_slave_link_state(struct slave *slave, int state, + bool notify) +{ + bond_propose_link_state(slave, state); + bond_commit_link_state(slave, notify); +} + static inline void bond_slave_link_notify(struct bonding *bond) { struct list_head *iter; @@ -592,6 +614,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, int level); int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); +void bond_work_init_all(struct bonding *bond); #ifdef CONFIG_PROC_FS void bond_create_proc_entry(struct bonding *bond); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index c0452de83086e51738a249bea5fa86d6fb121760..8ffd434676b7a270af73534f3dbcc26f370fe215 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -35,83 +35,101 @@ struct napi_struct; extern unsigned int sysctl_net_busy_read __read_mostly; extern unsigned int sysctl_net_busy_poll __read_mostly; +/* 0 - Reserved to indicate value not set + * 1..NR_CPUS - Reserved for sender_cpu + * NR_CPUS+1..~0 - Region available for NAPI IDs + */ +#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) + static inline bool net_busy_loop_on(void) { return sysctl_net_busy_poll; } -static inline u64 busy_loop_us_clock(void) +static inline bool sk_can_busy_loop(const struct sock *sk) { - return local_clock() >> 10; + return sk->sk_ll_usec && !signal_pending(current); } -static inline unsigned long sk_busy_loop_end_time(struct sock *sk) -{ - return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); -} +bool sk_busy_loop_end(void *p, unsigned long start_time); -/* in poll/select we use the global sysctl_net_ll_poll value */ -static inline unsigned long busy_loop_end_time(void) +void napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), + void *loop_end_arg); + +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline unsigned long net_busy_loop_on(void) { - return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll); + return 0; } -static inline bool sk_can_busy_loop(const struct sock *sk) +static inline bool sk_can_busy_loop(struct sock *sk) { - return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current); + return false; } +#endif /* CONFIG_NET_RX_BUSY_POLL */ -static inline bool busy_loop_timeout(unsigned long end_time) +static inline unsigned long busy_loop_current_time(void) { - unsigned long now = busy_loop_us_clock(); - - return time_after(now, end_time); +#ifdef CONFIG_NET_RX_BUSY_POLL + return (unsigned long)(local_clock() >> 10); +#else + return 0; +#endif } -bool sk_busy_loop(struct sock *sk, int nonblock); - -/* used in the NIC receive handler to mark the skb */ -static inline void skb_mark_napi_id(struct sk_buff *skb, - struct napi_struct *napi) +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline bool busy_loop_timeout(unsigned long start_time) { - skb->napi_id = napi->napi_id; -} +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); + if (bp_usec) { + unsigned long end_time = start_time + bp_usec; + unsigned long now = busy_loop_current_time(); -#else /* CONFIG_NET_RX_BUSY_POLL */ -static inline unsigned long net_busy_loop_on(void) -{ - return 0; + return time_after(now, end_time); + } +#endif + return true; } -static inline unsigned long busy_loop_end_time(void) +static inline bool sk_busy_loop_timeout(struct sock *sk, + unsigned long start_time) { - return 0; -} +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); -static inline bool sk_can_busy_loop(struct sock *sk) -{ - return false; -} + if (bp_usec) { + unsigned long end_time = start_time + bp_usec; + unsigned long now = busy_loop_current_time(); -static inline void skb_mark_napi_id(struct sk_buff *skb, - struct napi_struct *napi) -{ + return time_after(now, end_time); + } +#endif + return true; } -static inline bool busy_loop_timeout(unsigned long end_time) +static inline void sk_busy_loop(struct sock *sk, int nonblock) { - return true; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int napi_id = READ_ONCE(sk->sk_napi_id); + + if (napi_id >= MIN_NAPI_ID) + napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk); +#endif } -static inline bool sk_busy_loop(struct sock *sk, int nonblock) +/* used in the NIC receive handler to mark the skb */ +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) { - return false; +#ifdef CONFIG_NET_RX_BUSY_POLL + skb->napi_id = napi->napi_id; +#endif } -#endif /* CONFIG_NET_RX_BUSY_POLL */ - /* used in the protocol hanlder to propagate the napi_id to the socket */ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ead1aa6d003ef97b09847e8175b704da31c341d1..6e90f1a4950f1dfa333478d9d0104210307006af 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -363,6 +363,8 @@ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) /** * struct vif_params - describes virtual interface parameters + * @flags: monitor interface flags, unchanged if 0, otherwise + * %MONITOR_FLAG_CHANGED will be set * @use_4addr: use 4-address frames * @macaddr: address to use for this virtual interface. * If this parameter is set to zero address the driver may @@ -370,13 +372,17 @@ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy) * This feature is only fully supported by drivers that enable the * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating ** only p2p devices with specified MAC. - * @vht_mumimo_groups: MU-MIMO groupID. used for monitoring only - * packets belonging to that MU-MIMO groupID. + * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets + * belonging to that MU-MIMO groupID; %NULL if not changed + * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring + * MU-MIMO packets going to the specified station; %NULL if not changed */ struct vif_params { + u32 flags; int use_4addr; u8 macaddr[ETH_ALEN]; - u8 vht_mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN]; + const u8 *vht_mumimo_groups; + const u8 *vht_mumimo_follow_addr; }; /** @@ -1211,6 +1217,7 @@ static inline int cfg80211_get_station(struct net_device *dev, * Monitor interface configuration flags. Note that these must be the bits * according to the nl80211 flags. * + * @MONITOR_FLAG_CHANGED: set if the flags were changed * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP * @MONITOR_FLAG_CONTROL: pass control frames @@ -1219,6 +1226,7 @@ static inline int cfg80211_get_station(struct net_device *dev, * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address */ enum monitor_flags { + MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID, MONITOR_FLAG_FCSFAIL = 1<= 0). + */ +struct cfg80211_connect_resp_params { + int status; + const u8 *bssid; + struct cfg80211_bss *bss; + const u8 *req_ie; + size_t req_ie_len; + const u8 *resp_ie; + size_t resp_ie_len; + const u8 *fils_kek; + size_t fils_kek_len; + bool update_erp_next_seq_num; + u16 fils_erp_next_seq_num; + const u8 *pmk; + size_t pmk_len; + const u8 *pmkid; + enum nl80211_timeout_reason timeout_reason; +}; + +/** + * cfg80211_connect_done - notify cfg80211 of connection result + * + * @dev: network device + * @params: connection response parameters + * @gfp: allocation flags + * + * It should be called by the underlying driver once execution of the connection + * request from connect() has been completed. This is similar to + * cfg80211_connect_bss(), but takes a structure pointer for connection response + * parameters. Only one of the functions among cfg80211_connect_bss(), + * cfg80211_connect_result(), cfg80211_connect_timeout(), + * and cfg80211_connect_done() should be called. + */ +void cfg80211_connect_done(struct net_device *dev, + struct cfg80211_connect_resp_params *params, + gfp_t gfp); + /** * cfg80211_connect_bss - notify cfg80211 of connection result * @@ -5152,13 +5309,31 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_result(), but with the option of identifying the exact bss - * entry for the connection. Only one of these functions should be called. + * entry for the connection. Only one of the functions among + * cfg80211_connect_bss(), cfg80211_connect_result(), + * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ -void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, - struct cfg80211_bss *bss, const u8 *req_ie, - size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, int status, gfp_t gfp, - enum nl80211_timeout_reason timeout_reason); +static inline void +cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, + struct cfg80211_bss *bss, const u8 *req_ie, + size_t req_ie_len, const u8 *resp_ie, + size_t resp_ie_len, int status, gfp_t gfp, + enum nl80211_timeout_reason timeout_reason) +{ + struct cfg80211_connect_resp_params params; + + memset(¶ms, 0, sizeof(params)); + params.status = status; + params.bssid = bssid; + params.bss = bss; + params.req_ie = req_ie; + params.req_ie_len = req_ie_len; + params.resp_ie = resp_ie; + params.resp_ie_len = resp_ie_len; + params.timeout_reason = timeout_reason; + + cfg80211_connect_done(dev, ¶ms, gfp); +} /** * cfg80211_connect_result - notify cfg80211 of connection result @@ -5177,7 +5352,8 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, * It should be called by the underlying driver once execution of the connection * request from connect() has been completed. This is similar to * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only - * one of these functions should be called. + * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(), + * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, @@ -5204,7 +5380,9 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid, * in a sequence where no explicit authentication/association rejection was * received from the AP. This could happen, e.g., due to not being able to send * out the Authentication or Association Request frame or timing out while - * waiting for the response. + * waiting for the response. Only one of the functions among + * cfg80211_connect_bss(), cfg80211_connect_result(), + * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called. */ static inline void cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, @@ -5216,51 +5394,46 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, } /** - * cfg80211_roamed - notify cfg80211 of roaming + * struct cfg80211_roam_info - driver initiated roaming information * - * @dev: network device * @channel: the channel of the new AP - * @bssid: the BSSID of the new AP + * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set) + * @bssid: the BSSID of the new AP (may be %NULL if %bss is set) * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length - * @gfp: allocation flags - * - * It should be called by the underlying driver whenever it roamed - * from one AP to another while connected. */ -void cfg80211_roamed(struct net_device *dev, - struct ieee80211_channel *channel, - const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); +struct cfg80211_roam_info { + struct ieee80211_channel *channel; + struct cfg80211_bss *bss; + const u8 *bssid; + const u8 *req_ie; + size_t req_ie_len; + const u8 *resp_ie; + size_t resp_ie_len; +}; /** - * cfg80211_roamed_bss - notify cfg80211 of roaming + * cfg80211_roamed - notify cfg80211 of roaming * * @dev: network device - * @bss: entry of bss to which STA got roamed - * @req_ie: association request IEs (maybe be %NULL) - * @req_ie_len: association request IEs length - * @resp_ie: association response IEs (may be %NULL) - * @resp_ie_len: assoc response IEs length + * @info: information about the new BSS. struct &cfg80211_roam_info. * @gfp: allocation flags * - * This is just a wrapper to notify cfg80211 of roaming event with driver - * passing bss to avoid a race in timeout of the bss entry. It should be - * called by the underlying driver whenever it roamed from one AP to another - * while connected. Drivers which have roaming implemented in firmware - * may use this function to avoid a race in bss entry timeout where the bss - * entry of the new AP is seen in the driver, but gets timed out by the time - * it is accessed in __cfg80211_roamed() due to delay in scheduling + * This function may be called with the driver passing either the BSSID of the + * new AP or passing the bss entry to avoid a race in timeout of the bss entry. + * It should be called by the underlying driver whenever it roamed from one AP + * to another while connected. Drivers which have roaming implemented in + * firmware should pass the bss entry to avoid a race in bss entry timeout where + * the bss entry of the new AP is seen in the driver, but gets timed out by the + * time it is accessed in __cfg80211_roamed() due to delay in scheduling * rdev->event_work. In case of any failures, the reference is released - * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise, - * it will be released while diconneting from the current bss. + * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be + * released while diconneting from the current bss. */ -void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); +void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, + gfp_t gfp); /** * cfg80211_disconnected - notify cfg80211 that connection was dropped diff --git a/include/net/devlink.h b/include/net/devlink.h index d29e5fc8258216b9d79604bc99b69b66cce3e443..ed7687bbf5d08f1bb5327f3f8c9e3e543e98e7c2 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -25,6 +25,8 @@ struct devlink { struct list_head list; struct list_head port_list; struct list_head sb_list; + struct list_head dpipe_table_list; + struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; possible_net_t _net; @@ -49,6 +51,178 @@ struct devlink_sb_pool_info { enum devlink_sb_threshold_type threshold_type; }; +/** + * struct devlink_dpipe_field - dpipe field object + * @name: field name + * @id: index inside the headers field array + * @bitwidth: bitwidth + * @mapping_type: mapping type + */ +struct devlink_dpipe_field { + const char *name; + unsigned int id; + unsigned int bitwidth; + enum devlink_dpipe_field_mapping_type mapping_type; +}; + +/** + * struct devlink_dpipe_header - dpipe header object + * @name: header name + * @id: index, global/local detrmined by global bit + * @fields: fields + * @fields_count: number of fields + * @global: indicates if header is shared like most protocol header + * or driver specific + */ +struct devlink_dpipe_header { + const char *name; + unsigned int id; + struct devlink_dpipe_field *fields; + unsigned int fields_count; + bool global; +}; + +/** + * struct devlink_dpipe_match - represents match operation + * @type: type of match + * @header_index: header index (packets can have several headers of same + * type like in case of tunnels) + * @header: header + * @fieled_id: field index + */ +struct devlink_dpipe_match { + enum devlink_dpipe_match_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +/** + * struct devlink_dpipe_action - represents action operation + * @type: type of action + * @header_index: header index (packets can have several headers of same + * type like in case of tunnels) + * @header: header + * @fieled_id: field index + */ +struct devlink_dpipe_action { + enum devlink_dpipe_action_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +/** + * struct devlink_dpipe_value - represents value of match/action + * @action: action + * @match: match + * @mapping_value: in case the field has some mapping this value + * specified the mapping value + * @mapping_valid: specify if mapping value is valid + * @value_size: value size + * @value: value + * @mask: bit mask + */ +struct devlink_dpipe_value { + union { + struct devlink_dpipe_action *action; + struct devlink_dpipe_match *match; + }; + unsigned int mapping_value; + bool mapping_valid; + unsigned int value_size; + void *value; + void *mask; +}; + +/** + * struct devlink_dpipe_entry - table entry object + * @index: index of the entry in the table + * @match_values: match values + * @matche_values_count: count of matches tuples + * @action_values: actions values + * @action_values_count: count of actions values + * @counter: value of counter + * @counter_valid: Specify if value is valid from hardware + */ +struct devlink_dpipe_entry { + u64 index; + struct devlink_dpipe_value *match_values; + unsigned int match_values_count; + struct devlink_dpipe_value *action_values; + unsigned int action_values_count; + u64 counter; + bool counter_valid; +}; + +/** + * struct devlink_dpipe_dump_ctx - context provided to driver in order + * to dump + * @info: info + * @cmd: devlink command + * @skb: skb + * @nest: top attribute + * @hdr: hdr + */ +struct devlink_dpipe_dump_ctx { + struct genl_info *info; + enum devlink_command cmd; + struct sk_buff *skb; + struct nlattr *nest; + void *hdr; +}; + +struct devlink_dpipe_table_ops; + +/** + * struct devlink_dpipe_table - table object + * @priv: private + * @name: table name + * @size: maximum number of entries + * @counters_enabled: indicates if counters are active + * @counter_control_extern: indicates if counter control is in dpipe or + * external tool + * @table_ops: table operations + * @rcu: rcu + */ +struct devlink_dpipe_table { + void *priv; + struct list_head list; + const char *name; + u64 size; + bool counters_enabled; + bool counter_control_extern; + struct devlink_dpipe_table_ops *table_ops; + struct rcu_head rcu; +}; + +/** + * struct devlink_dpipe_table_ops - dpipe_table ops + * @actions_dump - dumps all tables actions + * @matches_dump - dumps all tables matches + * @entries_dump - dumps all active entries in the table + * @counters_set_update - when changing the counter status hardware sync + * maybe needed to allocate/free counter related + * resources + */ +struct devlink_dpipe_table_ops { + int (*actions_dump)(void *priv, struct sk_buff *skb); + int (*matches_dump)(void *priv, struct sk_buff *skb); + int (*entries_dump)(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx); + int (*counters_set_update)(void *priv, bool enable); +}; + +/** + * struct devlink_dpipe_headers - dpipe headers + * @headers - header array can be shared (global bit) or driver specific + * @headers_count - count of headers + */ +struct devlink_dpipe_headers { + struct devlink_dpipe_header **headers; + unsigned int headers_count; +}; + struct devlink_ops { int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); @@ -94,6 +268,8 @@ struct devlink_ops { int (*eswitch_mode_set)(struct devlink *devlink, u16 mode); int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode); + int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); + int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode); }; static inline void *devlink_priv(struct devlink *devlink) @@ -132,6 +308,26 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u16 egress_pools_count, u16 ingress_tc_count, u16 egress_tc_count); void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index); +int devlink_dpipe_table_register(struct devlink *devlink, + const char *table_name, + struct devlink_dpipe_table_ops *table_ops, + void *priv, u64 size, + bool counter_control_extern); +void devlink_dpipe_table_unregister(struct devlink *devlink, + const char *table_name); +int devlink_dpipe_headers_register(struct devlink *devlink, + struct devlink_dpipe_headers *dpipe_headers); +void devlink_dpipe_headers_unregister(struct devlink *devlink); +bool devlink_dpipe_table_counter_enabled(struct devlink *devlink, + const char *table_name); +int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx); +int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx, + struct devlink_dpipe_entry *entry); +int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx); +int devlink_dpipe_action_put(struct sk_buff *skb, + struct devlink_dpipe_action *action); +int devlink_dpipe_match_put(struct sk_buff *skb, + struct devlink_dpipe_match *match); #else @@ -200,6 +396,71 @@ static inline void devlink_sb_unregister(struct devlink *devlink, { } +static inline int +devlink_dpipe_table_register(struct devlink *devlink, + const char *table_name, + struct devlink_dpipe_table_ops *table_ops, + void *priv, u64 size, + bool counter_control_extern) +{ + return 0; +} + +static inline void devlink_dpipe_table_unregister(struct devlink *devlink, + const char *table_name) +{ +} + +static inline int devlink_dpipe_headers_register(struct devlink *devlink, + struct devlink_dpipe_headers * + dpipe_headers) +{ + return 0; +} + +static inline void devlink_dpipe_headers_unregister(struct devlink *devlink) +{ +} + +static inline bool devlink_dpipe_table_counter_enabled(struct devlink *devlink, + const char *table_name) +{ + return false; +} + +static inline int +devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx) +{ + return 0; +} + +static inline int +devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx, + struct devlink_dpipe_entry *entry) +{ + return 0; +} + +static inline int +devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx) +{ + return 0; +} + +static inline int +devlink_dpipe_action_put(struct sk_buff *skb, + struct devlink_dpipe_action *action) +{ + return 0; +} + +static inline int +devlink_dpipe_match_put(struct sk_buff *skb, + struct devlink_dpipe_match *match) +{ + return 0; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 4e13e695f0251d5c762c3089065f4eeb429033eb..8e24677b1c62a5842f81496d7d18c071d2cf0af6 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -19,6 +19,7 @@ #include #include #include +#include struct tc_action; struct phy_device; @@ -31,6 +32,8 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_EDSA, DSA_TAG_PROTO_BRCM, DSA_TAG_PROTO_QCA, + DSA_TAG_PROTO_MTK, + DSA_TAG_PROTO_LAN9303, DSA_TAG_LAST, /* MUST BE LAST */ }; @@ -122,7 +125,7 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; - int (*rcv)(struct sk_buff *skb, + struct sk_buff * (*rcv)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); @@ -182,6 +185,7 @@ struct dsa_port { unsigned int ageing_time; u8 stp_state; struct net_device *bridge_dev; + struct devlink_port devlink_port; }; struct dsa_switch { @@ -233,6 +237,13 @@ struct dsa_switch { u32 phys_mii_mask; struct mii_bus *slave_mii_bus; + /* Ageing Time limits in msecs */ + unsigned int ageing_time_min; + unsigned int ageing_time_max; + + /* devlink used to represent this switch device */ + struct devlink *devlink; + /* Dynamically allocated ports, keep last */ size_t num_ports; struct dsa_port ports[]; @@ -248,6 +259,11 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) return !!((ds->dsa_port_mask) & (1 << p)); } +static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p) +{ + return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p); +} + static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) { return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev; @@ -287,7 +303,7 @@ struct dsa_notifier_bridge_info { struct dsa_switch_ops { /* - * Probing and setup. + * Legacy probing. */ const char *(*probe)(struct device *dsa_dev, struct device *host_dev, int sw_addr, @@ -442,6 +458,14 @@ struct dsa_switch_ops { bool ingress); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); + + /* + * Cross-chip operations + */ + int (*crosschip_bridge_join)(struct dsa_switch *ds, int sw_index, + int port, struct net_device *br); + void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index, + int port, struct net_device *br); }; struct dsa_switch_driver { @@ -449,9 +473,11 @@ struct dsa_switch_driver { const struct dsa_switch_ops *ops; }; +/* Legacy driver registration */ void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); + struct net_device *dsa_dev_to_net_device(struct device *dev); static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) @@ -459,6 +485,15 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) return dst->rcv != NULL; } +static inline bool netdev_uses_dsa(struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + if (dev->dsa_ptr != NULL) + return dsa_uses_tagged_protocol(dev->dsa_ptr); +#endif + return false; +} + struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds, struct device *dev); diff --git a/include/net/esp.h b/include/net/esp.h index a43be85aedc44594265695e98452ae491b852d83..c41994d1bfef0bce761227828638d466a01d6ebc 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -10,4 +10,23 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) return (struct ip_esp_hdr *)skb_transport_header(skb); } +struct esp_info { + struct ip_esp_hdr *esph; + __be64 seqno; + int tfclen; + int tailen; + int plen; + int clen; + int len; + int nfrags; + __u8 proto; + bool inplace; +}; + +int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); +int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); +int esp_input_done2(struct sk_buff *skb, int err); +int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); +int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); +int esp6_input_done2(struct sk_buff *skb, int err); #endif diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 8dbfdf728cd8ce901b3b05f0e58b4eeee25051fe..76c7300626d675bd70374efe45e5608c65f44f84 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -141,7 +141,10 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, struct fib_lookup_arg *); int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, u32 flags); +bool fib_rule_matchall(const struct fib_rule *rule); -int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh); -int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh); +int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); +int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); #endif diff --git a/include/net/flow.h b/include/net/flow.h index 6984f1913dc124ab97627a4a5ecb2aba9f93e901..bae198b3039e6e66829c2717c1baf2baebbec6a2 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -202,7 +202,7 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) typedef unsigned long flow_compare_t; -static inline size_t flow_key_size(u16 family) +static inline unsigned int flow_key_size(u16 family) { switch (family) { case AF_INET: diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index ac9703018a3a63bd392aa37f31b1afdf54d69aa0..8d21d448daa9d19ad58a753e373035f2a1be59bb 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -41,6 +41,13 @@ struct flow_dissector_key_vlan { u16 padding; }; +struct flow_dissector_key_mpls { + u32 mpls_ttl:8, + mpls_bos:1, + mpls_tc:3, + mpls_label:20; +}; + struct flow_dissector_key_keyid { __be32 keyid; }; @@ -169,6 +176,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */ + FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/include/net/flowcache.h b/include/net/flowcache.h index 9caf3bfc8d2dafcc89064ec54e78682244d71fb7..51eb971e897378e25329b06f80678f522eec2ea6 100644 --- a/include/net/flowcache.h +++ b/include/net/flowcache.h @@ -8,7 +8,7 @@ struct flow_cache_percpu { struct hlist_head *hash_table; - int hash_count; + unsigned int hash_count; u32 hash_rnd; int hash_rnd_recalc; struct tasklet_struct flush_tasklet; @@ -18,8 +18,8 @@ struct flow_cache { u32 hash_shift; struct flow_cache_percpu __percpu *percpu; struct hlist_node node; - int low_watermark; - int high_watermark; + unsigned int low_watermark; + unsigned int high_watermark; struct timer_list rnd_timer; }; #endif /* _NET_FLOWCACHE_H */ diff --git a/include/net/genetlink.h b/include/net/genetlink.h index a34275be360001e89740fe4325a53905683e9778..68b88192b00c6200aa0b8b2628554952d0d79a1c 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -84,6 +84,7 @@ struct nlattr **genl_family_attrbuf(const struct genl_family *family); * @attrs: netlink attributes * @_net: network namespace * @user_ptr: user pointers + * @extack: extended ACK report struct */ struct genl_info { u32 snd_seq; @@ -94,6 +95,7 @@ struct genl_info { struct nlattr ** attrs; possible_net_t _net; void * user_ptr[2]; + struct netlink_ext_ack *extack; }; static inline struct net *genl_info_net(struct genl_info *info) @@ -106,6 +108,16 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) write_pnet(&info->_net, net); } +#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg) + +static inline int genl_err_attr(struct genl_info *info, int err, + struct nlattr *attr) +{ + info->extack->bad_attr = attr; + + return err; +} + /** * struct genl_ops - generic netlink operations * @cmd: command identifier @@ -162,14 +174,16 @@ genlmsg_nlhdr(void *user_hdr, const struct genl_family *family) * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @policy: validation policy - * */ + * @extack: extended ACK report struct + */ static inline int genlmsg_parse(const struct nlmsghdr *nlh, const struct genl_family *family, struct nlattr *tb[], int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype, - policy); + policy, extack); } /** diff --git a/include/net/ip.h b/include/net/ip.h index bf264a8db1ce3b1b29b9a5cc9732df323154e7e5..821cedcc8e73b68af72d1918242c25d757c63d24 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -33,6 +33,8 @@ #include #include +#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ + struct sock; struct inet_skb_parm { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 1b1cf33cbfb02eaf4eb1eeb92be474076fdeebe4..08fbc7f7d8d70ffc43f93881b0fbeab5b3fe8abb 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -33,6 +33,8 @@ struct __ip6_tnl_parm { __be16 o_flags; __be32 i_key; __be32 o_key; + + __u32 fwmark; }; /* IPv6 tunnel */ diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 368bb4024b78c411d02a842f340f9fa11a9b5f7e..6692c5758b332d468f1e0611ecc4f3e03ae03b2b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -213,6 +213,11 @@ struct fib_entry_notifier_info { u32 tb_id; }; +struct fib_rule_notifier_info { + struct fib_notifier_info info; /* must be first */ + struct fib_rule *rule; +}; + struct fib_nh_notifier_info { struct fib_notifier_info info; /* must be first */ struct fib_nh *fib_nh; @@ -232,9 +237,21 @@ enum fib_event_type { int register_fib_notifier(struct notifier_block *nb, void (*cb)(struct notifier_block *nb)); int unregister_fib_notifier(struct notifier_block *nb); +int call_fib_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info); int call_fib_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info); +void fib_notify(struct net *net, struct notifier_block *nb); +#ifdef CONFIG_IP_MULTIPLE_TABLES +void fib_rules_notify(struct net *net, struct notifier_block *nb); +#else +static inline void fib_rules_notify(struct net *net, struct notifier_block *nb) +{ +} +#endif + struct fib_table { struct hlist_node tb_hlist; u32 tb_id; @@ -299,6 +316,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, return err; } +static inline bool fib4_rule_default(const struct fib_rule *rule) +{ + return true; +} + #else /* CONFIG_IP_MULTIPLE_TABLES */ int __net_init fib4_rules_init(struct net *net); void __net_exit fib4_rules_exit(struct net *net); @@ -343,6 +365,8 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, return err; } +bool fib4_rule_default(const struct fib_rule *rule); + #endif /* CONFIG_IP_MULTIPLE_TABLES */ /* Exported by fib_frontend.c */ @@ -371,17 +395,13 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned int nh_flags); -extern u32 fib_multipath_secret __read_mostly; - -static inline int fib_multipath_hash(__be32 saddr, __be32 daddr) -{ - return jhash_2words((__force u32)saddr, (__force u32)daddr, - fib_multipath_secret) >> 1; -} - +#ifdef CONFIG_IP_ROUTE_MULTIPATH +int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4, + const struct sk_buff *skb); +#endif void fib_select_multipath(struct fib_result *res, int hash); void fib_select_path(struct net *net, struct fib_result *res, - struct flowi4 *fl4, int mp_hash); + struct flowi4 *fl4, const struct sk_buff *skb); /* Exported by fib_trie.c */ void fib_trie_init(void); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 95056796657cf9b11c24ae1497f1d5c01a9fd178..520809912f0392e84b12cc8ce99d82e3e27f5d78 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -132,6 +132,7 @@ struct ip_tunnel { unsigned int prl_count; /* # of entries in PRL */ unsigned int ip_tnl_net_id; struct gro_cells gro_cells; + __u32 fwmark; bool collect_md; bool ignore_df; }; @@ -273,9 +274,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p); + struct ip_tunnel_parm *p, __u32 fwmark); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p); + struct ip_tunnel_parm *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); struct ip_tunnel_encap_ops { diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 7bdfa7d783639d8b65c18bd7f5a6ea5fa4fbb7da..4f4f786255efe16725cbdea7bb29362ea6023185 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -12,6 +12,8 @@ #include /* for struct list_head */ #include /* for struct rwlock_t */ #include /* for struct atomic_t */ +#include /* for struct refcount_t */ + #include #include #include @@ -525,7 +527,7 @@ struct ip_vs_conn { struct netns_ipvs *ipvs; /* counter and timer */ - atomic_t refcnt; /* reference count */ + refcount_t refcnt; /* reference count */ struct timer_list timer; /* Expiration timer */ volatile unsigned long timeout; /* timeout */ @@ -667,7 +669,7 @@ struct ip_vs_dest { atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ - atomic_t refcnt; /* reference counter */ + refcount_t refcnt; /* reference counter */ struct ip_vs_stats stats; /* statistics */ unsigned long idle_start; /* start time, jiffies */ @@ -1211,14 +1213,14 @@ struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, */ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) { - return atomic_inc_not_zero(&cp->refcnt); + return refcount_inc_not_zero(&cp->refcnt); } /* put back the conn without restarting its timer */ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) { smp_mb__before_atomic(); - atomic_dec(&cp->refcnt); + refcount_dec(&cp->refcnt); } void ip_vs_conn_put(struct ip_vs_conn *cp); void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); @@ -1347,8 +1349,6 @@ int ip_vs_protocol_init(void); void ip_vs_protocol_cleanup(void); void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); int *ip_vs_create_timeout_table(int *table, int size); -int ip_vs_set_state_timeout(int *table, int num, const char *const *names, - const char *name, int to); void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg); @@ -1410,18 +1410,18 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp); static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) { - atomic_inc(&dest->refcnt); + refcount_inc(&dest->refcnt); } static inline void ip_vs_dest_put(struct ip_vs_dest *dest) { smp_mb__before_atomic(); - atomic_dec(&dest->refcnt); + refcount_dec(&dest->refcnt); } static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) { - if (atomic_dec_and_test(&dest->refcnt)) + if (refcount_dec_and_test(&dest->refcnt)) kfree(dest); } @@ -1553,13 +1553,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb) enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (!ct || !nf_ct_is_untracked(ct)) { - struct nf_conn *untracked; - + if (ct) { nf_conntrack_put(&ct->ct_general); - untracked = nf_ct_untracked_get(); - nf_conntrack_get(&untracked->ct_general); - nf_ct_set(skb, untracked, IP_CT_NEW); + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); } #endif } @@ -1618,7 +1614,7 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, if (!(cp->flags & IP_VS_CONN_F_NFCT)) return false; ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct)) + if (ct) return true; #endif return false; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a3bab3c5ecfb302e5df2d9e5d5ec36e2c368ef9b..4d05a9443344a54499e666ae2732fc320052d02b 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -299,6 +299,8 @@ struct ieee80211_vif_chanctx_switch { * context had been assigned. * @BSS_CHANGED_OCB: OCB join status changed * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed + * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected + * keep alive) changed. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -325,6 +327,7 @@ enum ieee80211_bss_change { BSS_CHANGED_BANDWIDTH = 1<<21, BSS_CHANGED_OCB = 1<<22, BSS_CHANGED_MU_GROUPS = 1<<23, + BSS_CHANGED_KEEP_ALIVE = 1<<24, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -501,6 +504,10 @@ struct ieee80211_mu_group_data { * implies disabled. As with the cfg80211 callback, a change here should * cause an event to be sent indicating where the current value is in * relation to the newly configured threshold. + * @cqm_rssi_low: Connection quality monitor RSSI lower threshold, a zero value + * implies disabled. This is an alternative mechanism to the single + * threshold event and can't be enabled simultaneously with it. + * @cqm_rssi_high: Connection quality monitor RSSI upper threshold. * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The * may filter ARP queries targeted for other addresses than listed here. @@ -529,6 +536,13 @@ struct ieee80211_mu_group_data { * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS * if it has associated clients without P2P PS support. + * @max_idle_period: the time period during which the station can refrain from + * transmitting frames to its associated AP without being disassociated. + * In units of 1000 TUs. Zero value indicates that the AP did not include + * a (valid) BSS Max Idle Period Element. + * @protected_keep_alive: if set, indicates that the station should send an RSN + * protected frame to the AP to reset the idle timer at the AP for the + * station. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -553,6 +567,8 @@ struct ieee80211_bss_conf { u16 ht_operation_mode; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; + s32 cqm_rssi_low; + s32 cqm_rssi_high; struct cfg80211_chan_def chandef; struct ieee80211_mu_group_data mu_group; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; @@ -567,6 +583,8 @@ struct ieee80211_bss_conf { enum nl80211_tx_power_setting txpower_type; struct ieee80211_p2p_noa_attr p2p_noa_attr; bool allow_p2p_go_ps; + u16 max_idle_period; + bool protected_keep_alive; }; /** @@ -942,6 +960,19 @@ struct ieee80211_tx_info { }; }; +/** + * struct ieee80211_tx_status - extended tx staus info for rate control + * + * @sta: Station that the packet was transmitted for + * @info: Basic tx status information + * @skb: Packet skb (can be NULL if not provided by the driver) + */ +struct ieee80211_tx_status { + struct ieee80211_sta *sta; + struct ieee80211_tx_info *info; + struct sk_buff *skb; +}; + /** * struct ieee80211_scan_ies - descriptors for different blocks of IEs * @@ -1039,16 +1070,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * (including FCS) was received. * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the SYNC preamble was received. - * @RX_FLAG_SHORTPRE: Short preamble was used for this frame - * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index - * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index - * @RX_FLAG_40MHZ: HT40 (40 MHz) was used - * @RX_FLAG_SHORT_GI: Short guard interval was used * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present. * Valid only for data frames (mainly A-MPDU) - * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if - * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT - * to hw.radiotap_mcs_details to advertise that fact * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference * number (@ampdu_reference) must be populated and be a distinct number for * each A-MPDU @@ -1061,7 +1084,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was * done by the hardware - * @RX_FLAG_LDPC: LDPC was used * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without * processing it in any regular way. * This is useful if drivers offload some frames but still want to report @@ -1070,9 +1092,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * monitor interfaces. * This is useful if drivers offload some frames but still want to report * them for sniffing purposes. - * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 - * @RX_FLAG_10MHZ: 10 MHz (half channel) was used - * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU * subframes instead of a one huge frame for performance reasons. * All, but the last MSDU from an A-MSDU should have this flag set. E.g. @@ -1100,50 +1119,54 @@ enum mac80211_rx_flags { RX_FLAG_FAILED_FCS_CRC = BIT(5), RX_FLAG_FAILED_PLCP_CRC = BIT(6), RX_FLAG_MACTIME_START = BIT(7), - RX_FLAG_SHORTPRE = BIT(8), - RX_FLAG_HT = BIT(9), - RX_FLAG_40MHZ = BIT(10), - RX_FLAG_SHORT_GI = BIT(11), - RX_FLAG_NO_SIGNAL_VAL = BIT(12), - RX_FLAG_HT_GF = BIT(13), - RX_FLAG_AMPDU_DETAILS = BIT(14), - RX_FLAG_PN_VALIDATED = BIT(15), - RX_FLAG_DUP_VALIDATED = BIT(16), - RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), - RX_FLAG_AMPDU_IS_LAST = BIT(18), - RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), - RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20), - RX_FLAG_MACTIME_END = BIT(21), - RX_FLAG_VHT = BIT(22), - RX_FLAG_LDPC = BIT(23), - RX_FLAG_ONLY_MONITOR = BIT(24), - RX_FLAG_SKIP_MONITOR = BIT(25), - RX_FLAG_STBC_MASK = BIT(26) | BIT(27), - RX_FLAG_10MHZ = BIT(28), - RX_FLAG_5MHZ = BIT(29), - RX_FLAG_AMSDU_MORE = BIT(30), - RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), - RX_FLAG_MIC_STRIPPED = BIT_ULL(32), - RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33), - RX_FLAG_ICV_STRIPPED = BIT_ULL(34), + RX_FLAG_NO_SIGNAL_VAL = BIT(8), + RX_FLAG_AMPDU_DETAILS = BIT(9), + RX_FLAG_PN_VALIDATED = BIT(10), + RX_FLAG_DUP_VALIDATED = BIT(11), + RX_FLAG_AMPDU_LAST_KNOWN = BIT(12), + RX_FLAG_AMPDU_IS_LAST = BIT(13), + RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14), + RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15), + RX_FLAG_MACTIME_END = BIT(16), + RX_FLAG_ONLY_MONITOR = BIT(17), + RX_FLAG_SKIP_MONITOR = BIT(18), + RX_FLAG_AMSDU_MORE = BIT(19), + RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20), + RX_FLAG_MIC_STRIPPED = BIT(21), + RX_FLAG_ALLOW_SAME_PN = BIT(22), + RX_FLAG_ICV_STRIPPED = BIT(23), }; -#define RX_FLAG_STBC_SHIFT 26 - /** - * enum mac80211_rx_vht_flags - receive VHT flags + * enum mac80211_rx_encoding_flags - MCS & bandwidth flags * - * These flags are used with the @vht_flag member of - * &struct ieee80211_rx_status. - * @RX_VHT_FLAG_80MHZ: 80 MHz was used - * @RX_VHT_FLAG_160MHZ: 160 MHz was used - * @RX_VHT_FLAG_BF: packet was beamformed - */ + * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame + * @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used + * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used + * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, + * if the driver fills this value it should add + * %IEEE80211_RADIOTAP_MCS_HAVE_FMT + * to hw.radiotap_mcs_details to advertise that fact + * @RX_ENC_FLAG_LDPC: LDPC was used + * @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 + * @RX_ENC_FLAG_BF: packet was beamformed + */ +enum mac80211_rx_encoding_flags { + RX_ENC_FLAG_SHORTPRE = BIT(0), + RX_ENC_FLAG_40MHZ = BIT(1), + RX_ENC_FLAG_SHORT_GI = BIT(2), + RX_ENC_FLAG_HT_GF = BIT(3), + RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5), + RX_ENC_FLAG_LDPC = BIT(6), + RX_ENC_FLAG_BF = BIT(7), +}; -enum mac80211_rx_vht_flags { - RX_VHT_FLAG_80MHZ = BIT(0), - RX_VHT_FLAG_160MHZ = BIT(1), - RX_VHT_FLAG_BF = BIT(2), +#define RX_ENC_FLAG_STBC_SHIFT 4 + +enum mac80211_rx_encoding { + RX_ENC_LEGACY = 0, + RX_ENC_HT, + RX_ENC_VHT, }; /** @@ -1173,9 +1196,11 @@ enum mac80211_rx_vht_flags { * @antenna: antenna used * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) - * @vht_nss: number of streams (VHT only) + * @nss: number of streams (VHT and HE only) * @flag: %RX_FLAG_\* - * @vht_flag: %RX_VHT_FLAG_\* + * @encoding: &enum mac80211_rx_encoding + * @bw: &enum rate_info_bw + * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU @@ -1186,11 +1211,12 @@ struct ieee80211_rx_status { u64 boottime_ns; u32 device_timestamp; u32 ampdu_reference; - u64 flag; + u32 flag; u16 freq; - u8 vht_flag; + u8 enc_flags; + u8 encoding:2, bw:3; u8 rate_idx; - u8 vht_nss; + u8 nss; u8 rx_flags; u8 band; u8 antenna; @@ -4199,6 +4225,23 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); +/** + * ieee80211_tx_status_ext - extended transmit status callback + * + * This function can be used as a replacement for ieee80211_tx_status + * in drivers that may want to provide extra information that does not + * fit into &struct ieee80211_tx_info. + * + * Calls to this function for a single hardware must be synchronized + * against each other. Calls to this function, ieee80211_tx_status_ni() + * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @status: tx status information + */ +void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status); + /** * ieee80211_tx_status_noskb - transmit status callback without skb * @@ -4215,9 +4258,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, * (NULL for multicast packets) * @info: tx status information */ -void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - struct ieee80211_tx_info *info); +static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_status status = { + .sta = sta, + .info = info, + }; + + ieee80211_tx_status_ext(hw, &status); +} /** * ieee80211_tx_status_ni - transmit status callback (in process context) @@ -5438,9 +5489,6 @@ void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, * RTS threshold * @short_preamble: whether mac80211 will request short-preamble transmission * if the selected rate supports it - * @max_rate_idx: user-requested maximum (legacy) rate - * (deprecated; this will be removed once drivers get updated to use - * rate_idx_mask) * @rate_idx_mask: user-requested (legacy) rate mask * @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use) * @bss: whether this frame is sent out in AP or IBSS mode @@ -5452,7 +5500,6 @@ struct ieee80211_tx_rate_control { struct sk_buff *skb; struct ieee80211_tx_rate reported_rate; bool rts, short_preamble; - u8 max_rate_idx; u32 rate_idx_mask; u8 *rate_idx_mcs_mask; bool bss; @@ -5474,10 +5521,9 @@ struct rate_control_ops { void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); - void (*tx_status_noskb)(void *priv, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info); + void (*tx_status_ext)(void *priv, + struct ieee80211_supported_band *sband, + void *priv_sta, struct ieee80211_tx_status *st); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h index 179253f9dcfd986ef806331044bc4973f1cc7d6e..9d22bf67ac86623eaaea2c49fbe1140747ce67e8 100644 --- a/include/net/mpls_iptunnel.h +++ b/include/net/mpls_iptunnel.h @@ -14,11 +14,12 @@ #ifndef _NET_MPLS_IPTUNNEL_H #define _NET_MPLS_IPTUNNEL_H 1 -#define MAX_NEW_LABELS 2 - struct mpls_iptunnel_encap { - u32 label[MAX_NEW_LABELS]; u8 labels; + u8 ttl_propagate; + u8 default_ttl; + u8 reserved1; + u32 label[0]; }; static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 8a0214654b6b10bc480d7e6dc8195555ca58dc9a..1036c902d2c9904ed084fcd5a4d8dc70b7902cbe 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -439,8 +439,10 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh, * IGMP */ int igmp6_init(void); +int igmp6_late_init(void); void igmp6_cleanup(void); +void igmp6_late_cleanup(void); int igmp6_event_query(struct sk_buff *skb); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 5ebf6949116097f60e668b0c2c4c48dd1639e5e8..e4dd3a2140341049fabfbff1ec55406fbe11d5b2 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -314,7 +314,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl, } void neigh_destroy(struct neighbour *neigh); int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); -int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); +int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, + u32 nlmsg_pid); void __neigh_set_probe_once(struct neighbour *neigh); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); @@ -449,7 +450,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq; - int hh_len; + unsigned int hh_len; do { seq = read_seqbegin(&hh->hh_lock); @@ -458,7 +459,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb /* this is inlined by gcc */ memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); } else { - int hh_alen = HH_DATA_ALIGN(hh_len); + unsigned int hh_alen = HH_DATA_ALIGN(hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index af8fe8a909dc0ca62e54056cf4be9d0d4ea82477..fe80bb48ab1f0c7b1665a10a9bf2388b32eb5013 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -140,6 +141,9 @@ struct net { #endif #if IS_ENABLED(CONFIG_MPLS) struct netns_mpls mpls; +#endif +#if IS_ENABLED(CONFIG_CAN) + struct netns_can can; #endif struct sock *diag_nlsk; atomic_t fnhe_genid; diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 6ff32815641b21ba0d3c20214806c4f16aa217ca..919e4e8af3272b3d66580e4c60c79bdc39b47616 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -14,7 +14,6 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index c59b82456f89cd421fde702f13c7cfe59f73266a..eaea968f86570db7010011b1af4a3608f2dee8f9 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -5,7 +5,6 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; #ifdef CONFIG_NF_CT_PROTO_DCCP extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 19605878da4739d04f0642b20f8641ed8601d2eb..8ece3612d0cd6bf8fe4ae6c347c2cb30da2f553f 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -50,25 +50,6 @@ union nf_conntrack_expect_proto { #define NF_CT_ASSERT(x) #endif -struct nf_conntrack_helper; - -/* Must be kept in sync with the classes defined by helpers */ -#define NF_CT_MAX_EXPECT_CLASSES 4 - -/* nf_conn feature for connections that have a helper */ -struct nf_conn_help { - /* Helper. if any */ - struct nf_conntrack_helper __rcu *helper; - - struct hlist_head expectations; - - /* Current number of expected connections */ - u8 expecting[NF_CT_MAX_EXPECT_CLASSES]; - - /* private helper information. */ - char data[]; -}; - #include #include @@ -243,14 +224,6 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); -/* Fake conntrack entry for untracked connections */ -DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); -static inline struct nf_conn *nf_ct_untracked_get(void) -{ - return raw_cpu_ptr(&nf_conntrack_untracked); -} -void nf_ct_untracked_status_or(unsigned long bits); - /* Iterate over all conntracks: if iter returns true, it's deleted. */ void nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), @@ -281,11 +254,6 @@ static inline int nf_ct_is_dying(const struct nf_conn *ct) return test_bit(IPS_DYING_BIT, &ct->status); } -static inline int nf_ct_is_untracked(const struct nf_conn *ct) -{ - return test_bit(IPS_UNTRACKED_BIT, &ct->status); -} - /* Packet is received from loopback */ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) { diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 84ec7ca5f195db411b7e07c6dce641a286b5ea0a..81d7f8a3094557c3cd518be809c524b4448bbe1b 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -65,7 +65,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb); int ret = NF_ACCEPT; - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { if (!nf_ct_is_confirmed(ct)) ret = __nf_conntrack_confirm(skb); if (likely(ret == NF_ACCEPT)) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 12d967b587264af71c84d62faca95a4e7ace096f..2a10c6570fccfcd633351a3b881bff4d51e2f45c 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -20,11 +20,11 @@ enum nf_ct_ecache_state { struct nf_conntrack_ecache { unsigned long cache; /* bitops want long */ - unsigned long missed; /* missed events */ + u16 missed; /* missed events */ u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ + enum nf_ct_ecache_state state:8;/* ecache state */ u32 portid; /* netlink portid of destroyer */ - enum nf_ct_ecache_state state; /* ecache state */ }; static inline struct nf_conntrack_ecache * diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 5ed33ea4718ef5a101689432825ebb1a48fcc0d9..2ba54feaccd8d4a6ebccc84fd5e21f45bff665ca 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -5,6 +5,8 @@ #ifndef _NF_CONNTRACK_EXPECT_H #define _NF_CONNTRACK_EXPECT_H +#include + #include #include @@ -37,7 +39,7 @@ struct nf_conntrack_expect { struct timer_list timeout; /* Usage count. */ - atomic_t use; + refcount_t use; /* Flags */ unsigned int flags; @@ -71,6 +73,7 @@ struct nf_conntrack_expect_policy { }; #define NF_CT_EXPECT_CLASS_DEFAULT 0 +#define NF_CT_EXPECT_MAX_CNT 255 int nf_conntrack_expect_pernet_init(struct net *net); void nf_conntrack_expect_pernet_fini(struct net *net); @@ -102,6 +105,7 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) void nf_ct_remove_expectations(struct nf_conn *ct); void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); +bool nf_ct_remove_expect(struct nf_conntrack_expect *exp); /* Allocate space for an expectation: this is mandatory before calling nf_ct_expect_related. You will have to call put afterwards. */ diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 1c3035dda31f66a33d50cf2617d3a70288812383..4944bc9153cf5d307c4168079d9eb4b6a2c09f21 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -43,8 +43,8 @@ enum nf_ct_ext_id { /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { struct rcu_head rcu; - u16 offset[NF_CT_EXT_NUM]; - u16 len; + u8 offset[NF_CT_EXT_NUM]; + u8 len; char data[0]; }; @@ -69,12 +69,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) ((id##_TYPE *)__nf_ct_ext_find((ext), (id))) /* Destroy all relationships */ -void __nf_ct_ext_destroy(struct nf_conn *ct); -static inline void nf_ct_ext_destroy(struct nf_conn *ct) -{ - if (ct->ext) - __nf_ct_ext_destroy(ct); -} +void nf_ct_ext_destroy(struct nf_conn *ct); /* Free operation. If you want to free a object referred from private area, * please implement __nf_ct_ext_free() and call it. @@ -86,15 +81,7 @@ static inline void nf_ct_ext_free(struct nf_conn *ct) } /* Add this type, returns pointer to data or NULL. */ -void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, - size_t var_alloc_len, gfp_t gfp); - -#define nf_ct_ext_add(ct, id, gfp) \ - ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp))) -#define nf_ct_ext_add_length(ct, id, len, gfp) \ - ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp))) - -#define NF_CT_EXT_F_PREALLOC 0x0001 +void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); struct nf_ct_ext_type { /* Destroys relationships (can be NULL). */ @@ -102,15 +89,11 @@ struct nf_ct_ext_type { enum nf_ct_ext_id id; - unsigned int flags; - /* Length and min alignment. */ u8 len; u8 align; - /* initial size of nf_ct_ext. */ - u8 alloc_size; }; -int nf_ct_extend_register(struct nf_ct_ext_type *type); -void nf_ct_extend_unregister(struct nf_ct_ext_type *type); +int nf_ct_extend_register(const struct nf_ct_ext_type *type); +void nf_ct_extend_unregister(const struct nf_ct_ext_type *type); #endif /* _NF_CONNTRACK_EXTEND_H */ diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 1eaac1f4cd6aef2b12cfe51621a5ce6da5c4ba13..e04fa7691e5d6873cd04e94816227d4a41275fa2 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -29,9 +29,6 @@ struct nf_conntrack_helper { struct module *me; /* pointer to self */ const struct nf_conntrack_expect_policy *expect_policy; - /* length of internal data, ie. sizeof(struct nf_ct_*_master) */ - size_t data_len; - /* Tuple of things we will help (compared against server response) */ struct nf_conntrack_tuple tuple; @@ -49,9 +46,33 @@ struct nf_conntrack_helper { unsigned int expect_class_max; unsigned int flags; - unsigned int queue_num; /* For user-space helpers. */ + + /* For user-space helpers: */ + unsigned int queue_num; + /* length of userspace private data stored in nf_conn_help->data */ + u16 data_len; }; +/* Must be kept in sync with the classes defined by helpers */ +#define NF_CT_MAX_EXPECT_CLASSES 4 + +/* nf_conn feature for connections that have a helper */ +struct nf_conn_help { + /* Helper. if any */ + struct nf_conntrack_helper __rcu *helper; + + struct hlist_head expectations; + + /* Current number of expected connections */ + u8 expecting[NF_CT_MAX_EXPECT_CLASSES]; + + /* private helper information. */ + char data[32] __aligned(8); +}; + +#define NF_CT_HELPER_BUILD_BUG_ON(structsize) \ + BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data)) + struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum); @@ -62,7 +83,7 @@ void nf_ct_helper_init(struct nf_conntrack_helper *helper, u16 l3num, u16 protonum, const char *name, u16 default_port, u16 spec_port, u32 id, const struct nf_conntrack_expect_policy *exp_pol, - u32 expect_class_max, u32 data_len, + u32 expect_class_max, int (*help)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo), diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 85e993e278d5e1e7a886e772dd69f5031214410d..7032e044bbe2a364ae0cfdb629f2e0c20ac81e8a 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -58,6 +58,9 @@ struct nf_conntrack_l4proto { unsigned int dataoff, u_int8_t pf, unsigned int hooknum); + /* called by gc worker if table is full */ + bool (*can_early_drop)(const struct nf_conn *ct); + /* Print out the per-protocol part of the tuple. Return like seq_* */ void (*print_tuple)(struct seq_file *s, const struct nf_conntrack_tuple *); diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h index b0ca402c1f72e20bae492c634f663b8846e788a5..a2fcb5271726d3f5c9e20ccfb521385b59951610 100644 --- a/include/net/netfilter/nf_conntrack_synproxy.h +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -52,6 +52,8 @@ struct synproxy_stats { struct synproxy_net { struct nf_conn *tmpl; struct synproxy_stats __percpu *stats; + unsigned int hook_ref4; + unsigned int hook_ref6; }; extern unsigned int synproxy_net_id; diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 5cc5e9e6171a03db471407c708578b4794c22b92..d40b89355fdd345617cf21593922753613190a11 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -12,7 +13,7 @@ struct ctnl_timeout { struct list_head head; struct rcu_head rcu_head; - atomic_t refcnt; + refcount_t refcnt; char name[CTNL_TIMEOUT_NAME_MAX]; __u16 l3num; struct nf_conntrack_l4proto *l4proto; diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index c327a431a6f38103598b82182d463dd728f70443..05c82a1a42679cf336c4ecc2addc30bdaceecbbe 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -67,7 +67,7 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum, { #if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \ IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6) - return nat->masq_index && hooknum == NF_INET_POST_ROUTING && + return nat && nat->masq_index && hooknum == NF_INET_POST_ROUTING && CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL && nat->masq_index != out->ifindex; #else diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 01bcc6bfbcc9034e399a938a6a6dd83b2bed6ef7..fbfa5acf4f14628f6334a36943bd1ab01844bb6f 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -7,31 +7,31 @@ struct sk_buff; /* These return true or false. */ -int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, unsigned int match_offset, - unsigned int match_len, const char *rep_buffer, - unsigned int rep_len, bool adjust); +bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned int match_offset, + unsigned int match_len, const char *rep_buffer, + unsigned int rep_len, bool adjust); -static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len) +static inline bool nf_nat_mangle_tcp_packet(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned int match_offset, + unsigned int match_len, + const char *rep_buffer, + unsigned int rep_len) { return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, match_offset, match_len, rep_buffer, rep_len, true); } -int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, unsigned int match_offset, - unsigned int match_len, const char *rep_buffer, - unsigned int rep_len); +bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned int match_offset, + unsigned int match_len, const char *rep_buffer, + unsigned int rep_len); /* Setup NAT on this expected conntrack so it follows master, but goes * to port ct->master->saved_proto. */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 09948d10e38e0b939b9764caafd7f3b9a9be41d5..4454719ff849fde065f64eba042d7f58cd971078 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -24,8 +24,7 @@ struct nf_queue_entry { struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *entry, unsigned int queuenum); - void (*nf_hook_drop)(struct net *net, - const struct nf_hook_entry *hooks); + unsigned int (*nf_hook_drop)(struct net *net); }; void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0136028652bdb8b3c20813b01b2fa8cfb16ba012..028faec8fc2799b0c176ac88b54fc6700e89f896 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -413,10 +413,11 @@ static inline struct nft_set *nft_set_container_of(const void *priv) return (void *)priv - offsetof(struct nft_set, data); } -struct nft_set *nf_tables_set_lookup(const struct nft_table *table, - const struct nlattr *nla, u8 genmask); -struct nft_set *nf_tables_set_lookup_byid(const struct net *net, - const struct nlattr *nla, u8 genmask); +struct nft_set *nft_set_lookup(const struct net *net, + const struct nft_table *table, + const struct nlattr *nla_set_name, + const struct nlattr *nla_set_id, + u8 genmask); static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { @@ -910,6 +911,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai return container_of(chain, struct nft_base_chain, chain); } +static inline bool nft_is_base_chain(const struct nft_chain *chain) +{ + return chain->flags & NFT_BASE_CHAIN; +} + int __nft_release_basechain(struct nft_ctx *ctx); unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); @@ -1044,7 +1050,8 @@ struct nft_object_type { unsigned int maxattr; struct module *owner; const struct nla_policy *policy; - int (*init)(const struct nlattr * const tb[], + int (*init)(const struct nft_ctx *ctx, + const struct nlattr *const tb[], struct nft_object *obj); void (*destroy)(struct nft_object *obj); int (*dump)(struct sk_buff *skb, diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 5ceb2205e4e3ed93461ed4a3956b227f99ac9494..381af9469e6ada01e4acffd4efc3ebc88e66a019 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -32,6 +32,6 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs, void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); -void nft_fib_store_result(void *reg, enum nft_fib_result r, +void nft_fib_store_result(void *reg, const struct nft_fib *priv, const struct nft_pktinfo *pkt, int index); #endif diff --git a/include/net/netlink.h b/include/net/netlink.h index b239fcd33d8091268fd793fd45cc755264fa592f..01709172b3d38455e7e5510228d6b4a0ad6e94a6 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -233,14 +233,17 @@ struct nl_info { }; int netlink_rcv_skb(struct sk_buff *skb, - int (*cb)(struct sk_buff *, struct nlmsghdr *)); + int (*cb)(struct sk_buff *, struct nlmsghdr *, + struct netlink_ext_ack *)); int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, unsigned int group, int report, gfp_t flags); int nla_validate(const struct nlattr *head, int len, int maxtype, - const struct nla_policy *policy); + const struct nla_policy *policy, + struct netlink_ext_ack *extack); int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, - int len, const struct nla_policy *policy); + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack); int nla_policy_len(const struct nla_policy *, int); struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); @@ -374,18 +377,20 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining) * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @policy: validation policy + * @extack: extended ACK report struct * * See nla_parse() */ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) return -EINVAL; return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), policy); + nlmsg_attrlen(nlh, hdrlen), policy, extack); } /** @@ -409,16 +414,19 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, * @hdrlen: length of familiy specific header * @maxtype: maximum attribute type to be expected * @policy: validation policy + * @extack: extended ACK report struct */ static inline int nlmsg_validate(const struct nlmsghdr *nlh, int hdrlen, int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) return -EINVAL; return nla_validate(nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), maxtype, policy); + nlmsg_attrlen(nlh, hdrlen), maxtype, policy, + extack); } /** @@ -739,14 +747,17 @@ nla_find_nested(const struct nlattr *nla, int attrtype) * @maxtype: maximum attribute type to be expected * @nla: attribute containing the nested attributes * @policy: validation policy + * @extack: extended ACK report struct * * See nla_parse() */ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, const struct nlattr *nla, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { - return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy); + return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy, + extack); } /** @@ -1252,6 +1263,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) * @start: container attribute * @maxtype: maximum attribute type to be expected * @policy: validation policy + * @extack: extended ACK report struct * * Validates all attributes in the nested attribute stream against the * specified policy. Attributes with a type exceeding maxtype will be @@ -1260,9 +1272,11 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) * Returns 0 on success or a negative error code. */ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { - return nla_validate(nla_data(start), nla_len(start), maxtype, policy); + return nla_validate(nla_data(start), nla_len(start), maxtype, policy, + extack); } /** diff --git a/include/net/netns/can.h b/include/net/netns/can.h new file mode 100644 index 0000000000000000000000000000000000000000..b106e6ae2e5b46dc119a322ebf22437e88158962 --- /dev/null +++ b/include/net/netns/can.h @@ -0,0 +1,40 @@ +/* + * can in net namespaces + */ + +#ifndef __NETNS_CAN_H__ +#define __NETNS_CAN_H__ + +#include + +struct dev_rcv_lists; +struct s_stats; +struct s_pstats; + +struct netns_can { +#if IS_ENABLED(CONFIG_PROC_FS) + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde_version; + struct proc_dir_entry *pde_stats; + struct proc_dir_entry *pde_reset_stats; + struct proc_dir_entry *pde_rcvlist_all; + struct proc_dir_entry *pde_rcvlist_fil; + struct proc_dir_entry *pde_rcvlist_inv; + struct proc_dir_entry *pde_rcvlist_sff; + struct proc_dir_entry *pde_rcvlist_eff; + struct proc_dir_entry *pde_rcvlist_err; + struct proc_dir_entry *bcmproc_dir; +#endif + + /* receive filters subscribed for 'all' CAN devices */ + struct dev_rcv_lists *can_rx_alldev_list; + spinlock_t can_rcvlists_lock; + struct timer_list can_stattimer;/* timer for statistics update */ + struct s_stats *can_stats; /* packet statistics */ + struct s_pstats *can_pstats; /* receive list statistics */ + + /* CAN GW per-net gateway jobs */ + struct hlist_head cgw_list; +}; + +#endif /* __NETNS_CAN_H__ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 622d2da27135586d164c228b81e71afb922d5d8c..cd686c4fb32dc5409a08f818d48228bffa6f6778 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -33,7 +33,6 @@ struct inet_timewait_death_row { atomic_t tw_count; struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; - int sysctl_tw_recycle; int sysctl_max_tw_buckets; }; @@ -96,6 +95,8 @@ struct netns_ipv4 { /* Shall we try to damage output packets if routing dev changes? */ int sysctl_ip_dynaddr; int sysctl_ip_early_demux; + int sysctl_tcp_early_demux; + int sysctl_udp_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; @@ -152,6 +153,7 @@ struct netns_ipv4 { #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH int sysctl_fib_multipath_use_neigh; + int sysctl_fib_multipath_hash_policy; #endif unsigned int fib_seq; /* protected by rtnl_mutex */ diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h index d29203651c01700d9406157ef8dd016ea55a4cbb..6608b3693385e771147f78da13afa87cc6355d83 100644 --- a/include/net/netns/mpls.h +++ b/include/net/netns/mpls.h @@ -9,8 +9,11 @@ struct mpls_route; struct ctl_table_header; struct netns_mpls { + int ip_ttl_propagate; + int default_ttl; size_t platform_labels; struct mpls_route __rcu * __rcu *platform_label; + struct ctl_table_header *ctl; }; diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 1a3de8b34ad27a8f3f8cf74ddd3eb6af61ced72e..bbdc73a3239dffbcafe8cf19f5997ddbeb7ada04 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -27,6 +27,7 @@ #include #include +#define nfc_dbg(dev, fmt, ...) dev_dbg((dev), "NFC: " fmt, ##__VA_ARGS__) #define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__) #define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index f1b76b8e6d2d296177116d0ef0f254d175551cbe..bec46f63f10ced844f8aec2b19bebf8b3dc01167 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -92,7 +92,7 @@ int unregister_qdisc(struct Qdisc_ops *qops); void qdisc_get_default(char *id, size_t len); int qdisc_set_default(const char *id); -void qdisc_hash_add(struct Qdisc *q); +void qdisc_hash_add(struct Qdisc *q, bool invisible); void qdisc_hash_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); diff --git a/include/net/protocol.h b/include/net/protocol.h index bf36ca34af7ad255b9eb821cbed0a70abad993f5..65ba335b0e7e66bb7f1b4bd279d31e616e0dd31e 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -40,6 +40,7 @@ /* This is used to register protocols. */ struct net_protocol { void (*early_demux)(struct sk_buff *skb); + void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); unsigned int no_policy:1, @@ -54,7 +55,7 @@ struct net_protocol { #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { void (*early_demux)(struct sk_buff *skb); - + void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, @@ -92,12 +93,12 @@ struct inet_protosw { #define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */ #define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */ -extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; +extern struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS]; extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS]; #if IS_ENABLED(CONFIG_IPV6) -extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; +extern struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; #endif int inet_add_protocol(const struct net_protocol *prot, unsigned char num); diff --git a/include/net/route.h b/include/net/route.h index c0874c87c173717f2c13c8af06d2482a76190243..2cc0e14c63598ce3d3be88bb04d2fd433d676129 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -113,13 +113,13 @@ struct in_device; int ip_rt_init(void); void rt_cache_flush(struct net *net); void rt_flush_dev(struct net_device *dev); -struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp, - int mp_hash); +struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp, + const struct sk_buff *skb); static inline struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp) { - return __ip_route_output_key_hash(net, flp, -1); + return __ip_route_output_key_hash(net, flp, NULL); } struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 106de5f7bf0675e29114cde65b7ea8a4e557c1ce..78fa5fe32947d590351917c6353caa243eb0cf1b 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -4,7 +4,8 @@ #include #include -typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *); +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, + struct netlink_ext_ack *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *); @@ -158,7 +159,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, int rtnl_delete_link(struct net_device *dev); int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len); +int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, + struct netlink_ext_ack *exterr); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index aeec4086afb2446dadb1fb8c54ad54a909634380..22e52093bfda7fe651f4c0aafc95d18e22b5f6fd 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -66,6 +66,7 @@ struct Qdisc { #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : * qdisc_tree_decrease_qlen() should stop. */ +#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; @@ -203,14 +204,14 @@ struct tcf_proto_ops { const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto*); - bool (*destroy)(struct tcf_proto*, bool); + void (*destroy)(struct tcf_proto*); unsigned long (*get)(struct tcf_proto*, u32 handle); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, unsigned long *, bool); - int (*delete)(struct tcf_proto*, unsigned long); + int (*delete)(struct tcf_proto*, unsigned long, bool*); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); /* rtnetlink specific */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index b6f682ec184a62a1e7b9d2a0ea159293cca12a76..47113f2c4b0a2b6c596d2f28018a1e1941cb6ede 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -293,6 +293,22 @@ struct sctp_chunk *sctp_process_strreset_inreq( struct sctp_association *asoc, union sctp_params param, struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_tsnreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_addstrm_out( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_addstrm_in( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_resp( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); /* Prototypes for statetable processing. */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 138f8615acf0993d8015f6c1d2eee32966dccad0..a8b38e123f9744368cb6c0fa96d28556d44aee14 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1315,6 +1315,8 @@ struct sctp_inithdr_host { struct sctp_stream_out { __u16 ssn; __u8 state; + __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; + __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; }; struct sctp_stream_in { @@ -1887,6 +1889,7 @@ struct sctp_association { __u32 strreset_outseq; /* Update after receiving response */ __u32 strreset_inseq; /* Update after receiving request */ + __u32 strreset_result[2]; /* save the results of last 2 responses */ struct sctp_chunk *strreset_chunk; /* save request chunk */ diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 324b5965fc4de505ca98fbbb9aedc0d3a0039742..1060494ac230b80caca57f6963dca2692ae10b9d 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -132,6 +132,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( const struct sctp_association *asoc, __u16 flags, __u16 stream_num, __u16 *stream_list, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( + const struct sctp_association *asoc, __u16 flags, + __u32 local_tsn, __u32 remote_tsn, gfp_t gfp); + +struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event( + const struct sctp_association *asoc, __u16 flags, + __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index 0caee631a8364fe6e49ab8cacba864d019be8b47..fe236b3429f0d8caeb1adc367b5b4a20591c848b 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -6,10 +6,10 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); -u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, +u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport, u32 *tsoff); +u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport, u32 *tsoff); -u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, - __be16 sport, __be16 dport, u32 *tsoff); u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, diff --git a/include/net/sock.h b/include/net/sock.h index 03252d53975de7ad0da66d35802738830b0e3367..66349e49d468646ce724485bb8e74952825f0d6c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1783,11 +1783,8 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) sk_tx_queue_clear(sk); sk->sk_dst_pending_confirm = 0; - /* - * This can be called while sk is owned by the caller only, - * with no state that can be checked in a rcu_dereference_check() cond - */ - old_dst = rcu_dereference_raw(sk->sk_dst_cache); + old_dst = rcu_dereference_protected(sk->sk_dst_cache, + lockdep_sock_is_held(sk)); rcu_assign_pointer(sk->sk_dst_cache, dst); dst_release(old_dst); } @@ -2242,6 +2239,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); +#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC) static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { @@ -2252,8 +2250,10 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) __sock_recv_ts_and_drops(msg, sk, skb); - else + else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) sk->sk_stamp = skb->tstamp; + else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) + sk->sk_stamp = 0; } void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); @@ -2365,6 +2365,8 @@ bool sk_ns_capable(const struct sock *sk, bool sk_capable(const struct sock *sk, int cap); bool sk_net_capable(const struct sock *sk, int cap); +void sk_get_meminfo(const struct sock *sk, u32 *meminfo); + extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index dfbd6ee0bc7cd196c052e700da43deddb8d1dfef..a46c3f2ace702932dc95c3021e7b13d72a2a4777 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -2,6 +2,7 @@ #define __NET_TC_PED_H #include +#include struct tcf_pedit_key_ex { enum pedit_header_type htype; @@ -17,4 +18,48 @@ struct tcf_pedit { }; #define to_pedit(a) ((struct tcf_pedit *)a) +static inline bool is_tcf_pedit(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_PEDIT) + return true; +#endif + return false; +} + +static inline int tcf_pedit_nkeys(const struct tc_action *a) +{ + return to_pedit(a)->tcfp_nkeys; +} + +static inline u32 tcf_pedit_htype(const struct tc_action *a, int index) +{ + if (to_pedit(a)->tcfp_keys_ex) + return to_pedit(a)->tcfp_keys_ex[index].htype; + + return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; +} + +static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index) +{ + if (to_pedit(a)->tcfp_keys_ex) + return to_pedit(a)->tcfp_keys_ex[index].cmd; + + return __PEDIT_CMD_MAX; +} + +static inline u32 tcf_pedit_mask(const struct tc_action *a, int index) +{ + return to_pedit(a)->tcfp_keys[index].mask; +} + +static inline u32 tcf_pedit_val(const struct tc_action *a, int index) +{ + return to_pedit(a)->tcfp_keys[index].val; +} + +static inline u32 tcf_pedit_offset(const struct tc_action *a, int index) +{ + return to_pedit(a)->tcfp_keys[index].off; +} #endif /* __NET_TC_PED_H */ diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index 48cca321ee6c4f3e44f8f546d05d5f32b470ccc1..c2090df944ff53224ef0500c05f62672f0d5e0b3 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -13,9 +13,6 @@ #include #include -#define VLAN_F_POP 0x1 -#define VLAN_F_PUSH 0x2 - struct tcf_vlan { struct tc_action common; int tcfv_action; @@ -49,4 +46,9 @@ static inline __be16 tcf_vlan_push_proto(const struct tc_action *a) return to_vlan(a)->tcfv_push_proto; } +static inline u8 tcf_vlan_push_prio(const struct tc_action *a) +{ + return to_vlan(a)->tcfv_push_prio; +} + #endif /* __NET_TC_VLAN_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6ec4ea652f3f55e53675dbe09f29599af179c41a..270e5cc43c99e7030e95af218095cf9f283950bc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -78,6 +78,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* Maximal number of ACKs sent quickly to accelerate slow-start. */ #define TCP_MAX_QUICKACKS 16U +/* Maximal number of window scale according to RFC1323 */ +#define TCP_MAX_WSCALE 14U + /* urg_data states */ #define TCP_URG_VALID 0x0100 #define TCP_URG_NOTYET 0x0200 @@ -406,11 +409,7 @@ void tcp_clear_retrans(struct tcp_sock *tp); void tcp_update_metrics(struct sock *sk); void tcp_init_metrics(struct sock *sk); void tcp_metrics_init(void); -bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, - bool paws_check, bool timestamps); -bool tcp_remember_stamp(struct sock *sk); -bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); -void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); +bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); void tcp_disable_fack(struct tcp_sock *tp); void tcp_close(struct sock *sk, long timeout); void tcp_init_sock(struct sock *sk); @@ -1005,7 +1004,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, struct rate_sample *rs); void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, - struct skb_mstamp *now, struct rate_sample *rs); + struct rate_sample *rs); void tcp_rate_check_app_limited(struct sock *sk); /* These functions determine how the current flow behaves in respect of SACK @@ -1252,9 +1251,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, static inline int tcp_win_from_space(int space) { - return sysctl_tcp_adv_win_scale<=0 ? - (space>>(-sysctl_tcp_adv_win_scale)) : - space - (space>>sysctl_tcp_adv_win_scale); + int tcp_adv_win_scale = sysctl_tcp_adv_win_scale; + + return tcp_adv_win_scale <= 0 ? + (space>>(-tcp_adv_win_scale)) : + space - (space>>tcp_adv_win_scale); } /* Note: caller must be prepared to deal with negative returns */ @@ -1505,6 +1506,12 @@ struct tcp_fastopen_context { struct rcu_head rcu; }; +extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; +void tcp_fastopen_active_disable(struct sock *sk); +bool tcp_fastopen_active_should_disable(struct sock *sk); +void tcp_fastopen_active_disable_ofo_check(struct sock *sk); +void tcp_fastopen_active_timeout_reset(void); + /* Latencies incurred by various limits for a sender. They are * chronograph-like stats that are mutually exclusive. */ @@ -1814,9 +1821,8 @@ struct tcp_request_sock_ops { __u16 *mss); #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, - const struct request_sock *req, - bool *strict); - __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff); + const struct request_sock *req); + __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff); int (*send_synack)(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, @@ -1847,10 +1853,9 @@ void tcp_v4_init(void); void tcp_init(void); /* tcp_recovery.c */ -extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); +extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, - const struct skb_mstamp *xmit_time, - const struct skb_mstamp *ack_time); + const struct skb_mstamp *xmit_time); extern void tcp_rack_reo_timeout(struct sock *sk); /* diff --git a/include/net/udp.h b/include/net/udp.h index c9d8b8e848e05c2e7228f287f88ccdb57b2e10c2..3391dbd739595a76150453c28468ce8bb55530f8 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -372,4 +372,5 @@ void udp_encap_enable(void); #if IS_ENABLED(CONFIG_IPV6) void udpv6_encap_enable(void); #endif + #endif /* _UDP_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 14d82bf16692a6d0ed66721b1548ba152dbc18de..6793a30c66b1f054516a27229c29037fcdfa2f6f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -120,6 +120,13 @@ struct xfrm_state_walk { struct xfrm_address_filter *filter; }; +struct xfrm_state_offload { + struct net_device *dev; + unsigned long offload_handle; + unsigned int num_exthdrs; + u8 flags; +}; + /* Full description of state of transformer. */ struct xfrm_state { possible_net_t xs_net; @@ -207,6 +214,8 @@ struct xfrm_state { struct xfrm_lifetime_cur curlft; struct tasklet_hrtimer mtimer; + struct xfrm_state_offload xso; + /* used to fix curlft->add_time when changing date */ long saved_tmo; @@ -222,6 +231,8 @@ struct xfrm_state { struct xfrm_mode *inner_mode_iaf; struct xfrm_mode *outer_mode; + const struct xfrm_type_offload *type_offload; + /* Security context */ struct xfrm_sec_ctx *security; @@ -314,12 +325,14 @@ void km_state_expired(struct xfrm_state *x, int hard, u32 portid); int __xfrm_state_delete(struct xfrm_state *x); struct xfrm_state_afinfo { - unsigned int family; - unsigned int proto; - __be16 eth_proto; - struct module *owner; - const struct xfrm_type *type_map[IPPROTO_MAX]; - struct xfrm_mode *mode_map[XFRM_MODE_MAX]; + unsigned int family; + unsigned int proto; + __be16 eth_proto; + struct module *owner; + const struct xfrm_type *type_map[IPPROTO_MAX]; + const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX]; + struct xfrm_mode *mode_map[XFRM_MODE_MAX]; + int (*init_flags)(struct xfrm_state *x); void (*init_tempsel)(struct xfrm_selector *sel, const struct flowi *fl); @@ -380,6 +393,18 @@ struct xfrm_type { int xfrm_register_type(const struct xfrm_type *type, unsigned short family); int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); +struct xfrm_type_offload { + char *description; + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *pskb); + int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb); + int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features); +}; + +int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); +int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); + struct xfrm_mode { /* * Remove encapsulation header. @@ -428,6 +453,16 @@ struct xfrm_mode { */ int (*output)(struct xfrm_state *x, struct sk_buff *skb); + /* + * Adjust pointers into the packet and do GSO segmentation. + */ + struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features); + + /* + * Adjust pointers into the packet when IPsec is done at layer2. + */ + void (*xmit)(struct xfrm_state *x, struct sk_buff *skb); + struct xfrm_state_afinfo *afinfo; struct module *owner; unsigned int encap; @@ -586,7 +621,6 @@ struct xfrm_migrate { struct xfrm_mgr { struct list_head list; - char *id; int (*notify)(struct xfrm_state *x, const struct km_event *c); int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp); struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); @@ -817,12 +851,12 @@ static inline void xfrm_state_hold(struct xfrm_state *x) } static inline bool addr_match(const void *token1, const void *token2, - int prefixlen) + unsigned int prefixlen) { const __be32 *a1 = token1; const __be32 *a2 = token2; - int pdw; - int pbi; + unsigned int pdw; + unsigned int pbi; pdw = prefixlen >> 5; /* num of whole u32 in prefix */ pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ @@ -846,9 +880,9 @@ static inline bool addr_match(const void *token1, const void *token2, static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) { /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ - if (prefixlen == 0) + if (sizeof(long) == 4 && prefixlen == 0) return true; - return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen))); + return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen))); } static __inline__ @@ -1533,6 +1567,7 @@ struct xfrmk_spdinfo { struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); int xfrm_state_delete(struct xfrm_state *x); int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); +int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); @@ -1615,6 +1650,11 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) } #endif +struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, + const xfrm_address_t *saddr, + const xfrm_address_t *daddr, + int family); + struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); @@ -1820,6 +1860,61 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) } #endif +#ifdef CONFIG_XFRM_OFFLOAD +void __net_init xfrm_dev_init(void); +int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); +int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, + struct xfrm_user_offload *xuo); +bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); + +static inline void xfrm_dev_state_delete(struct xfrm_state *x) +{ + struct xfrm_state_offload *xso = &x->xso; + + if (xso->dev) + xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); +} + +static inline void xfrm_dev_state_free(struct xfrm_state *x) +{ + struct xfrm_state_offload *xso = &x->xso; + struct net_device *dev = xso->dev; + + if (dev && dev->xfrmdev_ops) { + dev->xfrmdev_ops->xdo_dev_state_free(x); + xso->dev = NULL; + dev_put(dev); + } +} +#else +static inline void __net_init xfrm_dev_init(void) +{ +} + +static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) +{ + return 0; +} + +static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) +{ + return 0; +} + +static inline void xfrm_dev_state_delete(struct xfrm_state *x) +{ +} + +static inline void xfrm_dev_state_free(struct xfrm_state *x) +{ +} + +static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + return false; +} +#endif + static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) { if (attrs[XFRMA_MARK]) diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h index c3a53fd47ff1a20a322c71e1f9a4fd89c3462ed6..52c8425d144b349988a16366ab69d0366bd0c065 100644 --- a/include/trace/events/bpf.h +++ b/include/trace/events/bpf.h @@ -321,11 +321,14 @@ TRACE_EVENT(bpf_map_next_key, __dynamic_array(u8, key, map->key_size) __dynamic_array(u8, nxt, map->key_size) __field(bool, key_trunc) + __field(bool, key_null) __field(int, ufd) ), TP_fast_assign( - memcpy(__get_dynamic_array(key), key, map->key_size); + if (key) + memcpy(__get_dynamic_array(key), key, map->key_size); + __entry->key_null = !key; memcpy(__get_dynamic_array(nxt), key_next, map->key_size); __entry->type = map->map_type; __entry->key_len = min(map->key_size, 16U); @@ -336,8 +339,9 @@ TRACE_EVENT(bpf_map_next_key, TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]", __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), __entry->ufd, - __print_hex(__get_dynamic_array(key), __entry->key_len), - __entry->key_trunc ? " ..." : "", + __entry->key_null ? "NULL" : __print_hex(__get_dynamic_array(key), + __entry->key_len), + __entry->key_trunc && !__entry->key_null ? " ..." : "", __print_hex(__get_dynamic_array(nxt), __entry->key_len), __entry->key_trunc ? " ..." : "") ); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 39123c06a5661316a80dd677b43b1e581a17e2e7..29a3d53a401535ef1e602a2dd82d79d66bc68bea 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -683,6 +683,57 @@ TRACE_EVENT(rxrpc_rx_ack, __entry->n_acks) ); +TRACE_EVENT(rxrpc_rx_abort, + TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, + u32 abort_code), + + TP_ARGS(call, serial, abort_code), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(rxrpc_serial_t, serial ) + __field(u32, abort_code ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->serial = serial; + __entry->abort_code = abort_code; + ), + + TP_printk("c=%p ABORT %08x ac=%d", + __entry->call, + __entry->serial, + __entry->abort_code) + ); + +TRACE_EVENT(rxrpc_rx_rwind_change, + TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, + u32 rwind, bool wake), + + TP_ARGS(call, serial, rwind, wake), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(rxrpc_serial_t, serial ) + __field(u32, rwind ) + __field(bool, wake ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->serial = serial; + __entry->rwind = rwind; + __entry->wake = wake; + ), + + TP_printk("c=%p %08x rw=%u%s", + __entry->call, + __entry->serial, + __entry->rwind, + __entry->wake ? " wake" : "") + ); + TRACE_EVENT(rxrpc_tx_data, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags, bool retrans, bool lose), @@ -1087,6 +1138,56 @@ TRACE_EVENT(rxrpc_improper_term, __entry->abort_code) ); +TRACE_EVENT(rxrpc_rx_eproto, + TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, + const char *why), + + TP_ARGS(call, serial, why), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(rxrpc_serial_t, serial ) + __field(const char *, why ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->serial = serial; + __entry->why = why; + ), + + TP_printk("c=%p EPROTO %08x %s", + __entry->call, + __entry->serial, + __entry->why) + ); + +TRACE_EVENT(rxrpc_connect_call, + TP_PROTO(struct rxrpc_call *call), + + TP_ARGS(call), + + TP_STRUCT__entry( + __field(struct rxrpc_call *, call ) + __field(unsigned long, user_call_ID ) + __field(u32, cid ) + __field(u32, call_id ) + ), + + TP_fast_assign( + __entry->call = call; + __entry->user_call_ID = call->user_call_ID; + __entry->cid = call->cid; + __entry->call_id = call->call_id; + ), + + TP_printk("c=%p u=%p %08x:%08x", + __entry->call, + (void *)__entry->user_call_ID, + __entry->cid, + __entry->call_id) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 2c748ddad5f875711ed66f91eae9bc69b9a41fe0..2b488565599daf73b8942fe6482890830af94dff 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -94,4 +94,10 @@ #define SCM_TIMESTAMPING_OPT_STATS 54 +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f8d9fed17ba99418d858ceb57a864b31a00d078a..6b0e2758585f83b186a253c9f7b7ca492754c6d0 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -477,6 +477,7 @@ header-y += virtio_types.h header-y += virtio_vsock.h header-y += virtio_crypto.h header-y += vm_sockets.h +header-y += vsockmon.h header-y += vt.h header-y += vtpm_proxy.h header-y += wait.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0539a0ceef38155835552360667070552ebce641..945a1f5f63c546c5521da5254a80664344ebd0ad 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,7 @@ enum bpf_cmd { BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, + BPF_PROG_TEST_RUN, }; enum bpf_map_type { @@ -96,6 +97,8 @@ enum bpf_map_type { BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_ARRAY_OF_MAPS, + BPF_MAP_TYPE_HASH_OF_MAPS, }; enum bpf_prog_type { @@ -152,6 +155,7 @@ union bpf_attr { __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* prealloc or not */ + __u32 inner_map_fd; /* fd pointing to the inner map */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -186,6 +190,17 @@ union bpf_attr { __u32 attach_type; __u32 attach_flags; }; + + struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __aligned_u64 data_in; + __aligned_u64 data_out; + __u32 repeat; + __u32 duration; + } test; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -456,6 +471,17 @@ union bpf_attr { * Return: * > 0 length of the string including the trailing NUL on success * < 0 error + * + * u64 bpf_get_socket_cookie(skb) + * Get the cookie for the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: 8 Bytes non-decreasing number on success or 0 if the socket + * field is missing inside sk_buff + * + * u32 bpf_get_socket_uid(skb) + * Get the owner uid of the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: uid of the socket owner on success or overflowuid if failed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -503,7 +529,9 @@ union bpf_attr { FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ - FN(probe_read_str), + FN(probe_read_str), \ + FN(get_socket_cookie), \ + FN(get_socket_uid), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -574,6 +602,7 @@ struct __sk_buff { __u32 tc_classid; __u32 data; __u32 data_end; + __u32 napi_id; }; struct bpf_tunnel_key { diff --git a/include/uapi/linux/can/vxcan.h b/include/uapi/linux/can/vxcan.h new file mode 100644 index 0000000000000000000000000000000000000000..ffb0b7156f7e030b9c65a0b5e96f931fa8187eea --- /dev/null +++ b/include/uapi/linux/can/vxcan.h @@ -0,0 +1,12 @@ +#ifndef _UAPI_CAN_VXCAN_H +#define _UAPI_CAN_VXCAN_H + +enum { + VXCAN_INFO_UNSPEC, + VXCAN_INFO_PEER, + + __VXCAN_INFO_MAX +#define VXCAN_INFO_MAX (__VXCAN_INFO_MAX - 1) +}; + +#endif diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 0f1f3a12e23c30e511cdb332059e30ee8d3d5efb..b0e807ac53bbfd3b088533381ecbf25a689633b0 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -65,8 +65,12 @@ enum devlink_command { #define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \ DEVLINK_CMD_ESWITCH_SET - /* add new commands above here */ + DEVLINK_CMD_DPIPE_TABLE_GET, + DEVLINK_CMD_DPIPE_ENTRIES_GET, + DEVLINK_CMD_DPIPE_HEADERS_GET, + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 }; @@ -115,6 +119,11 @@ enum devlink_eswitch_inline_mode { DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT, }; +enum devlink_eswitch_encap_mode { + DEVLINK_ESWITCH_ENCAP_MODE_NONE, + DEVLINK_ESWITCH_ENCAP_MODE_BASIC, +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -148,10 +157,73 @@ enum devlink_attr { DEVLINK_ATTR_ESWITCH_MODE, /* u16 */ DEVLINK_ATTR_ESWITCH_INLINE_MODE, /* u8 */ + DEVLINK_ATTR_DPIPE_TABLES, /* nested */ + DEVLINK_ATTR_DPIPE_TABLE, /* nested */ + DEVLINK_ATTR_DPIPE_TABLE_NAME, /* string */ + DEVLINK_ATTR_DPIPE_TABLE_SIZE, /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_MATCHES, /* nested */ + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS, /* nested */ + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED, /* u8 */ + + DEVLINK_ATTR_DPIPE_ENTRIES, /* nested */ + DEVLINK_ATTR_DPIPE_ENTRY, /* nested */ + DEVLINK_ATTR_DPIPE_ENTRY_INDEX, /* u64 */ + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES, /* nested */ + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES, /* nested */ + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER, /* u64 */ + + DEVLINK_ATTR_DPIPE_MATCH, /* nested */ + DEVLINK_ATTR_DPIPE_MATCH_VALUE, /* nested */ + DEVLINK_ATTR_DPIPE_MATCH_TYPE, /* u32 */ + + DEVLINK_ATTR_DPIPE_ACTION, /* nested */ + DEVLINK_ATTR_DPIPE_ACTION_VALUE, /* nested */ + DEVLINK_ATTR_DPIPE_ACTION_TYPE, /* u32 */ + + DEVLINK_ATTR_DPIPE_VALUE, + DEVLINK_ATTR_DPIPE_VALUE_MASK, + DEVLINK_ATTR_DPIPE_VALUE_MAPPING, /* u32 */ + + DEVLINK_ATTR_DPIPE_HEADERS, /* nested */ + DEVLINK_ATTR_DPIPE_HEADER, /* nested */ + DEVLINK_ATTR_DPIPE_HEADER_NAME, /* string */ + DEVLINK_ATTR_DPIPE_HEADER_ID, /* u32 */ + DEVLINK_ATTR_DPIPE_HEADER_FIELDS, /* nested */ + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, /* u8 */ + DEVLINK_ATTR_DPIPE_HEADER_INDEX, /* u32 */ + + DEVLINK_ATTR_DPIPE_FIELD, /* nested */ + DEVLINK_ATTR_DPIPE_FIELD_NAME, /* string */ + DEVLINK_ATTR_DPIPE_FIELD_ID, /* u32 */ + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, /* u32 */ + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, /* u32 */ + + DEVLINK_ATTR_PAD, + + DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1 }; +/* Mapping between internal resource described by the field and system + * structure + */ +enum devlink_dpipe_field_mapping_type { + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE, + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX, +}; + +/* Match type - specify the type of the match */ +enum devlink_dpipe_match_type { + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT, +}; + +/* Action type - specify the action type */ +enum devlink_dpipe_action_type { + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY, +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 3dc91a46e8b8da0b243a12a168bbf205e5a87916..5f4ea28eabe4bf65dcd7f2a5501cf89a7e1c5760 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1487,6 +1487,7 @@ enum ethtool_link_mode_bit_indices { */ /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ +/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 72a04a0e8ccef1e5560378af9177a6c47954daaf..57d1edb8efd9bae86d0b11779f59de9d18f35f1a 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -19,7 +19,8 @@ enum gtp_attrs { GTPA_LINK, GTPA_VERSION, GTPA_TID, /* for GTPv0 only */ - GTPA_SGSN_ADDRESS, + GTPA_PEER_ADDRESS, /* Remote GSN peer, either SGSN or GGSN */ +#define GTPA_SGSN_ADDRESS GTPA_PEER_ADDRESS /* maintain legacy attr name */ GTPA_MS_ADDRESS, GTPA_FLOW, GTPA_NET_NS_FD, diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h index 4d024d75d64ba8b7831cdb1d61384e47ac14ef33..cf73510b923864fd1b0c20bd3de47ba874a9b855 100644 --- a/include/uapi/linux/if_arp.h +++ b/include/uapi/linux/if_arp.h @@ -95,6 +95,7 @@ #define ARPHRD_IP6GRE 823 /* GRE over IPv6 */ #define ARPHRD_NETLINK 824 /* Netlink header */ #define ARPHRD_6LOWPAN 825 /* IPv6 over LoWPAN */ +#define ARPHRD_VSOCKMON 826 /* Vsock monitor header */ #define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ #define ARPHRD_NONE 0xFFFE /* zero header length */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 320fc1e747ee9623db56fbaf26b2a514b5d5a3d1..8e56ac70e0d1a3536a43aff83370e5b3bd5bb0b4 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -323,6 +323,7 @@ enum { IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_VLAN_TUNNEL, + IFLA_BRPORT_BCAST_FLOOD, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -538,11 +539,18 @@ enum { #define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1) /* GTP section */ + +enum ifla_gtp_role { + GTP_ROLE_GGSN = 0, + GTP_ROLE_SGSN, +}; + enum { IFLA_GTP_UNSPEC, IFLA_GTP_FD0, IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, + IFLA_GTP_ROLE, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -880,7 +888,9 @@ enum { /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) -#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST) +#define XDP_FLAGS_SKB_MODE (2U << 0) +#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ + XDP_FLAGS_SKB_MODE) enum { IFLA_XDP_UNSPEC, diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 9e7edfd8141e5dea69129807f46b89b2b637ead9..4df96a7dd4fae591d834735046911337221c60d3 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -66,6 +66,7 @@ struct sockaddr_ll { #define PACKET_FANOUT_CBPF 6 #define PACKET_FANOUT_EBPF 7 #define PACKET_FANOUT_FLAG_ROLLOVER 0x1000 +#define PACKET_FANOUT_FLAG_UNIQUEID 0x2000 #define PACKET_FANOUT_FLAG_DEFRAG 0x8000 struct tpacket_stats { diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 92f3c8677523237ea0db55b0269aa0b73b9c2009..6792d1967d3145f3f2092d3b1bf052bf13ce04fb 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -75,6 +75,7 @@ enum { IFLA_IPTUN_ENCAP_SPORT, IFLA_IPTUN_ENCAP_DPORT, IFLA_IPTUN_COLLECT_METADATA, + IFLA_IPTUN_FWMARK, __IFLA_IPTUN_MAX, }; #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) @@ -132,6 +133,7 @@ enum { IFLA_GRE_ENCAP_DPORT, IFLA_GRE_COLLECT_METADATA, IFLA_GRE_IGNORE_DF, + IFLA_GRE_FWMARK, __IFLA_GRE_MAX, }; @@ -147,6 +149,7 @@ enum { IFLA_VTI_OKEY, IFLA_VTI_LOCAL, IFLA_VTI_REMOTE, + IFLA_VTI_FWMARK, __IFLA_VTI_MAX, }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 8ef9e75e004ebbf951cb50b4b4b17f2c5b907dd4..2ae59178189d7983fb9ed99ae1cd5f40197cfb04 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -183,6 +183,8 @@ enum { DEVCONF_SEG6_REQUIRE_HMAC, DEVCONF_ENHANCED_DAD, DEVCONF_ADDR_GEN_MODE, + DEVCONF_DISABLE_POLICY, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN, DEVCONF_MAX }; diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h index d80a0498f77ed2d4dac3b61a6eef1906eb8b0626..f5e45095b0bb5c17af6515012587d6d805a7d79c 100644 --- a/include/uapi/linux/mpls_iptunnel.h +++ b/include/uapi/linux/mpls_iptunnel.h @@ -16,11 +16,13 @@ /* MPLS tunnel attributes * [RTA_ENCAP] = { * [MPLS_IPTUNNEL_DST] + * [MPLS_IPTUNNEL_TTL] * } */ enum { MPLS_IPTUNNEL_UNSPEC, MPLS_IPTUNNEL_DST, + MPLS_IPTUNNEL_TTL, __MPLS_IPTUNNEL_MAX, }; #define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1) diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 6a8e33dd4ecbd83b0d254d4cf2486dfe5cacfd34..a8072cc7fa0ba6e4b4acedca37a25a4316a24159 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -28,12 +28,14 @@ enum ip_conntrack_info { /* only for userspace compatibility */ #ifndef __KERNEL__ IP_CT_NEW_REPLY = IP_CT_NUMBER, +#else + IP_CT_UNTRACKED = 7, #endif }; #define NF_CT_STATE_INVALID_BIT (1 << 0) #define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) -#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_NUMBER + 1)) +#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) /* Bitset representing status of connection. */ enum ip_conntrack_status { @@ -94,7 +96,7 @@ enum ip_conntrack_status { IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), - /* Conntrack is a fake untracked entry */ + /* Conntrack is a fake untracked entry. Obsolete and not used anymore */ IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), @@ -117,6 +119,9 @@ enum ip_conntrack_events { IPCT_NATSEQADJ = IPCT_SEQADJ, IPCT_SECMARK, /* new security mark has been set */ IPCT_LABEL, /* new connlabel has been set */ +#ifdef __KERNEL__ + __IPCT_MAX +#endif }; enum ip_conntrack_expect_events { diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 05215d30fe5c9853b7871e799ccdce4878a04ef1..683f6f88fcacefa0898e3898cd75f31422fc0f9a 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -815,6 +815,17 @@ enum nft_rt_keys { NFT_RT_NEXTHOP6, }; +/** + * enum nft_hash_types - nf_tables hash expression types + * + * @NFT_HASH_JENKINS: Jenkins Hash + * @NFT_HASH_SYM: Symmetric Hash + */ +enum nft_hash_types { + NFT_HASH_JENKINS, + NFT_HASH_SYM, +}; + /** * enum nft_hash_attributes - nf_tables hash expression netlink attributes * @@ -824,6 +835,7 @@ enum nft_rt_keys { * @NFTA_HASH_MODULUS: modulus value (NLA_U32) * @NFTA_HASH_SEED: seed value (NLA_U32) * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32) + * @NFTA_HASH_TYPE: hash operation (NLA_U32: nft_hash_types) */ enum nft_hash_attributes { NFTA_HASH_UNSPEC, @@ -833,6 +845,7 @@ enum nft_hash_attributes { NFTA_HASH_MODULUS, NFTA_HASH_SEED, NFTA_HASH_OFFSET, + NFTA_HASH_TYPE, __NFTA_HASH_MAX, }; #define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1) @@ -888,6 +901,7 @@ enum nft_rt_attributes { * @NFT_CT_BYTES: conntrack bytes * @NFT_CT_AVGPKT: conntrack average bytes per packet * @NFT_CT_ZONE: conntrack zone + * @NFT_CT_EVENTMASK: ctnetlink events to be generated for this conntrack */ enum nft_ct_keys { NFT_CT_STATE, @@ -908,6 +922,7 @@ enum nft_ct_keys { NFT_CT_BYTES, NFT_CT_AVGPKT, NFT_CT_ZONE, + NFT_CT_EVENTMASK, }; /** @@ -1244,12 +1259,23 @@ enum nft_fib_flags { NFTA_FIB_F_MARK = 1 << 2, /* use skb->mark */ NFTA_FIB_F_IIF = 1 << 3, /* restrict to iif */ NFTA_FIB_F_OIF = 1 << 4, /* restrict to oif */ + NFTA_FIB_F_PRESENT = 1 << 5, /* check existence only */ +}; + +enum nft_ct_helper_attributes { + NFTA_CT_HELPER_UNSPEC, + NFTA_CT_HELPER_NAME, + NFTA_CT_HELPER_L3PROTO, + NFTA_CT_HELPER_L4PROTO, + __NFTA_CT_HELPER_MAX, }; +#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1) #define NFT_OBJECT_UNSPEC 0 #define NFT_OBJECT_COUNTER 1 #define NFT_OBJECT_QUOTA 2 -#define __NFT_OBJECT_MAX 3 +#define NFT_OBJECT_CT_HELPER 3 +#define __NFT_OBJECT_MAX 4 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index f3946a27bd07d5164fac6964785c86044f031790..f86127a46cfc80697df711c847d6a07b5a722347 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -50,12 +50,12 @@ struct nlmsghdr { /* Flags values */ -#define NLM_F_REQUEST 1 /* It is request message. */ -#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */ -#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */ -#define NLM_F_ECHO 8 /* Echo this request */ -#define NLM_F_DUMP_INTR 16 /* Dump was inconsistent due to sequence change */ -#define NLM_F_DUMP_FILTERED 32 /* Dump was filtered as requested */ +#define NLM_F_REQUEST 0x01 /* It is request message. */ +#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */ +#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */ +#define NLM_F_ECHO 0x08 /* Echo this request */ +#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */ +#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ @@ -69,6 +69,10 @@ struct nlmsghdr { #define NLM_F_CREATE 0x400 /* Create, if it does not exist */ #define NLM_F_APPEND 0x800 /* Add to end of list */ +/* Flags for ACK message */ +#define NLM_F_CAPPED 0x100 /* request was capped */ +#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */ + /* 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL 4.4BSD CHANGE NLM_F_REPLACE @@ -101,6 +105,37 @@ struct nlmsghdr { struct nlmsgerr { int error; struct nlmsghdr msg; + /* + * followed by the message contents unless NETLINK_CAP_ACK was set + * or the ACK indicates success (error == 0) + * message length is aligned with NLMSG_ALIGN() + */ + /* + * followed by TLVs defined in enum nlmsgerr_attrs + * if NETLINK_EXT_ACK was set + */ +}; + +/** + * enum nlmsgerr_attrs - nlmsgerr attributes + * @NLMSGERR_ATTR_UNUSED: unused + * @NLMSGERR_ATTR_MSG: error message string (string) + * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original + * message, counting from the beginning of the header (u32) + * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to + * be used - in the success case - to identify a created + * object or operation or similar (binary) + * @__NLMSGERR_ATTR_MAX: number of attributes + * @NLMSGERR_ATTR_MAX: highest attribute number + */ +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED, + NLMSGERR_ATTR_MSG, + NLMSGERR_ATTR_OFFS, + NLMSGERR_ATTR_COOKIE, + + __NLMSGERR_ATTR_MAX, + NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 }; #define NETLINK_ADD_MEMBERSHIP 1 @@ -115,6 +150,7 @@ struct nlmsgerr { #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 +#define NETLINK_EXT_ACK 11 struct nl_pktinfo { __u32 group; diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index 76b4d87c83a863dc7b0e809f5ab318b9656d1dfb..6dcd4de3397b393eb64c9da83e898484af65fda9 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h @@ -38,6 +38,7 @@ enum { NETLINK_DIAG_GROUPS, NETLINK_DIAG_RX_RING, NETLINK_DIAG_TX_RING, + NETLINK_DIAG_FLAGS, __NETLINK_DIAG_MAX, }; @@ -52,5 +53,14 @@ enum { /* deprecated since 4.6 */ #define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */ #endif +#define NDIAG_SHOW_FLAGS 0x00000008 /* show flags of a netlink socket */ + +/* flags */ +#define NDIAG_FLAG_CB_RUNNING 0x00000001 +#define NDIAG_FLAG_PKTINFO 0x00000002 +#define NDIAG_FLAG_BROADCAST_ERROR 0x00000004 +#define NDIAG_FLAG_NO_ENOBUFS 0x00000008 +#define NDIAG_FLAG_LISTEN_ALL_NSID 0x00000010 +#define NDIAG_FLAG_CAP_ACK 0x00000020 #endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5ed257c4cd4eaad8fcbd8dbde4ca88f385629271..b8c44b98f12d4c42d4f88a73049da962be2678b2 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -172,6 +172,42 @@ * Multiple such rules can be created. */ +/** + * DOC: FILS shared key authentication offload + * + * FILS shared key authentication offload can be advertized by drivers by + * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support + * FILS shared key authentication offload should be able to construct the + * authentication and association frames for FILS shared key authentication and + * eventually do a key derivation as per IEEE 802.11ai. The below additional + * parameters should be given to driver in %NL80211_CMD_CONNECT. + * %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai + * %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai + * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message + * %NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK + * rIK should be used to generate an authentication tag on the ERP message and + * rMSK should be used to derive a PMKSA. + * rIK, rMSK should be generated and keyname_nai, sequence number should be used + * as specified in IETF RFC 6696. + * + * When FILS shared key authentication is completed, driver needs to provide the + * below additional parameters to userspace. + * %NL80211_ATTR_FILS_KEK - used for key renewal + * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges + * %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated + * %Nl80211_ATTR_PMK - used to update PMKSA cache in userspace + * The PMKSA can be maintained in userspace persistently so that it can be used + * later after reboots or wifi turn off/on also. + * + * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS + * capable AP supporting PMK caching. It specifies the scope within which the + * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and + * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based + * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with + * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to + * use in a FILS shared key connection with PMKSA caching. + */ + /** * enum nl80211_commands - supported nl80211 commands * @@ -351,7 +387,9 @@ * are used. Extra IEs can also be passed from the userspace by * using the %NL80211_ATTR_IE attribute. The first cycle of the * scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY - * is supplied. + * is supplied. If the device supports multiple concurrent scheduled + * scans, it will allow such when the caller provides the flag attribute + * %NL80211_ATTR_SCHED_SCAN_MULTI to indicate user-space support for it. * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if * scheduled scan is not running. The caller may assume that as soon * as the call returns, it is safe to start a new scheduled scan again. @@ -370,10 +408,18 @@ * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) * - * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC - * (for the BSSID) and %NL80211_ATTR_PMKID. + * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC + * (for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK + * (PMK is used for PTKSA derivation in case of FILS shared key offload) or + * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID, + * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS + * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier + * advertized by a FILS capable AP identifying the scope of PMKSA in an + * ESS. * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC - * (for the BSSID) and %NL80211_ATTR_PMKID. + * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID, + * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS + * authentication. * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries. * * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain @@ -2012,6 +2058,36 @@ enum nl80211_commands { * u32 attribute with an &enum nl80211_timeout_reason value. This is used, * e.g., with %NL80211_CMD_CONNECT event. * + * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP) + * username part of NAI used to refer keys rRK and rIK. This is used with + * %NL80211_CMD_CONNECT. + * + * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part + * of NAI specifying the domain name of the ER server. This is used with + * %NL80211_CMD_CONNECT. + * + * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number + * to use in ERP messages. This is used in generating the FILS wrapped data + * for FILS authentication and is used with %NL80211_CMD_CONNECT. + * + * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the + * NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and + * %NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK + * from successful FILS authentication and is used with + * %NL80211_CMD_CONNECT. + * + * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP + * identifying the scope of PMKSAs. This is used with + * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA. + * + * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID. + * This is used with @NL80211_CMD_SET_PMKSA. + * + * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to + * indicate that it supports multiple active scheduled scan requests. + * @NL80211_ATTR_SCHED_SCAN_MAX_REQS: indicates maximum number of scheduled + * scan request that may be active for the device (u32). + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2423,6 +2499,17 @@ enum nl80211_attrs { NL80211_ATTR_TIMEOUT_REASON, + NL80211_ATTR_FILS_ERP_USERNAME, + NL80211_ATTR_FILS_ERP_REALM, + NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, + NL80211_ATTR_FILS_ERP_RRK, + NL80211_ATTR_FILS_CACHE_ID, + + NL80211_ATTR_PMK, + + NL80211_ATTR_SCHED_SCAN_MULTI, + NL80211_ATTR_SCHED_SCAN_MAX_REQS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3107,6 +3194,7 @@ enum nl80211_reg_rule_attr { * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching, * only report BSS with matching SSID. + * (This cannot be used together with BSSID.) * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a * BSS in scan results. Filtering is turned off if not specified. Note that * if this attribute is in a match set of its own, then it is treated as @@ -3122,6 +3210,8 @@ enum nl80211_reg_rule_attr { * BSS-es in the specified band is to be adjusted before doing * RSSI-based BSS selection. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. + * @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching + * (this cannot be used together with SSID). * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter * attribute number currently defined * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use @@ -3133,6 +3223,7 @@ enum nl80211_sched_scan_match_attr { NL80211_SCHED_SCAN_MATCH_ATTR_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI, NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST, + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID, /* keep last */ __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST, @@ -3942,7 +4033,10 @@ enum nl80211_ps_state { * @__NL80211_ATTR_CQM_INVALID: invalid * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies * the threshold for the RSSI level at which an event will be sent. Zero - * to disable. + * to disable. Alternatively, if %NL80211_EXT_FEATURE_CQM_RSSI_LIST is + * set, multiple values can be supplied as a low-to-high sorted array of + * threshold values in dBm. Events will be sent when the RSSI value + * crosses any of the thresholds. * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies * the minimum amount the RSSI level must change after an event before a * new event may be issued (to reduce effects of RSSI oscillation). @@ -4753,6 +4847,11 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan * for reporting BSSs with better RSSI than the current connected BSS * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI). + * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the + * %NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more + * RSSI threshold values to monitor rather than exactly one threshold. + * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key + * authentication with %NL80211_CMD_CONNECT. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -4771,6 +4870,8 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI, + NL80211_EXT_FEATURE_CQM_RSSI_LIST, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -4906,12 +5007,17 @@ enum nl80211_smps_mode { * change to the channel status. * @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is * over, channel becomes usable. + * @NL80211_RADAR_PRE_CAC_EXPIRED: Channel Availability Check done on this + * non-operating channel is expired and no longer valid. New CAC must + * be done on this channel before starting the operation. This is not + * applicable for ETSI dfs domain where pre-CAC is valid for ever. */ enum nl80211_radar_event { NL80211_RADAR_DETECTED, NL80211_RADAR_CAC_FINISHED, NL80211_RADAR_CAC_ABORTED, NL80211_RADAR_NOP_FINISHED, + NL80211_RADAR_PRE_CAC_EXPIRED, }; /** diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 7f41f7d0000f9f0ee36c274d88ad0d330fa8f5d6..61b7d36dfe34394f7cbed64217a8a9a1e7f55cfe 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -578,10 +578,25 @@ enum ovs_sample_attr { OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ __OVS_SAMPLE_ATTR_MAX, + +#ifdef __KERNEL__ + OVS_SAMPLE_ATTR_ARG /* struct sample_arg */ +#endif }; #define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) +#ifdef __KERNEL__ +struct sample_arg { + bool exec; /* When true, actions in sample will not + * change flow keys. False otherwise. + */ + u32 probability; /* Same value as + * 'OVS_SAMPLE_ATTR_PROBABILITY'. + */ +}; +#endif + /** * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION @@ -678,6 +693,17 @@ struct ovs_action_hash { * nothing if the connection is already committed will check that the current * packet is in conntrack entry's original direction. If directionality does * not match, will delete the existing conntrack entry and commit a new one. + * @OVS_CT_ATTR_EVENTMASK: Mask of bits indicating which conntrack event types + * (enum ip_conntrack_events IPCT_*) should be reported. For any bit set to + * zero, the corresponding event type is not generated. Default behavior + * depends on system configuration, but typically all event types are + * generated, hence listening on NFNLGRP_CONNTRACK_UPDATE events may get a lot + * of events. Explicitly passing this attribute allows limiting the updates + * received to the events of interest. The bit 1 << IPCT_NEW, 1 << + * IPCT_RELATED, and 1 << IPCT_DESTROY must be set to ones for those events to + * be received on NFNLGRP_CONNTRACK_NEW and NFNLGRP_CONNTRACK_DESTROY groups, + * respectively. Remaining bits control the changes for which an event is + * delivered on the NFNLGRP_CONNTRACK_UPDATE group. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, @@ -689,6 +715,7 @@ enum ovs_ct_attr { related connections. */ OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */ OVS_CT_ATTR_FORCE_COMMIT, /* No argument */ + OVS_CT_ATTR_EVENTMASK, /* u32 mask of IPCT_* events. */ __OVS_CT_ATTR_MAX }; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7a69f2a4ca0c06a68487ff382c6b84f8acab323b..d613be3b3239e476d728ffa323347ef42e97eaec 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -37,7 +37,20 @@ enum { #define TC_ACT_QUEUED 5 #define TC_ACT_REPEAT 6 #define TC_ACT_REDIRECT 7 -#define TC_ACT_JUMP 0x10000000 + +/* There is a special kind of actions called "extended actions", + * which need a value parameter. These have a local opcode located in + * the highest nibble, starting from 1. The rest of the bits + * are used to carry the value. These two parts together make + * a combined opcode. + */ +#define __TC_ACT_EXT_SHIFT 28 +#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT) +#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1) +#define TC_ACT_EXT_CMP(combined, opcode) \ + (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode) + +#define TC_ACT_JUMP __TC_ACT_EXT(1) /* Action type identifiers*/ enum { @@ -432,6 +445,11 @@ enum { TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */ TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */ + TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */ + TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */ + TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */ + TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index df7451d351311cd915fc96752fcaeb0a43b5d112..099bf5528fed30008bfbde3529315be35c0411f9 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -617,6 +617,14 @@ struct tc_drr_stats { #define TC_QOPT_BITMASK 15 #define TC_QOPT_MAX_QUEUE 16 +enum { + TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */ + TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */ + __TC_MQPRIO_HW_OFFLOAD_MAX +}; + +#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1) + struct tc_mqprio_qopt { __u8 num_tc; __u8 prio_tc_map[TC_QOPT_BITMASK + 1]; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 6546917d605a916bfd5a905e30eb05d68fd6ad6b..cce061382e4073d4fe8296a00f22eba42cf13818 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -122,6 +122,8 @@ enum { RTM_NEWNETCONF = 80, #define RTM_NEWNETCONF RTM_NEWNETCONF + RTM_DELNETCONF, +#define RTM_DELNETCONF RTM_DELNETCONF RTM_GETNETCONF = 82, #define RTM_GETNETCONF RTM_GETNETCONF @@ -319,6 +321,7 @@ enum rtattr_type_t { RTA_EXPIRES, RTA_PAD, RTA_UID, + RTA_TTL_PROPAGATE, __RTA_MAX }; @@ -545,6 +548,7 @@ enum { TCA_STATS2, TCA_STAB, TCA_PAD, + TCA_DUMP_INVISIBLE, __TCA_MAX }; diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index d3ae381fcf3327489c82e2f47eb39a363ec030c7..ced9d8b974268ed270661c3e2da77165e3a24784 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -115,6 +115,8 @@ typedef __s32 sctp_assoc_t; #define SCTP_PR_SUPPORTED 113 #define SCTP_DEFAULT_PRINFO 114 #define SCTP_PR_ASSOC_STATUS 115 +#define SCTP_PR_STREAM_STATUS 116 +#define SCTP_RECONFIG_SUPPORTED 117 #define SCTP_ENABLE_STREAM_RESET 118 #define SCTP_RESET_STREAMS 119 #define SCTP_RESET_ASSOC 120 @@ -502,6 +504,28 @@ struct sctp_stream_reset_event { __u16 strreset_stream_list[]; }; +#define SCTP_ASSOC_RESET_DENIED 0x0004 +#define SCTP_ASSOC_RESET_FAILED 0x0008 +struct sctp_assoc_reset_event { + __u16 assocreset_type; + __u16 assocreset_flags; + __u32 assocreset_length; + sctp_assoc_t assocreset_assoc_id; + __u32 assocreset_local_tsn; + __u32 assocreset_remote_tsn; +}; + +#define SCTP_ASSOC_CHANGE_DENIED 0x0004 +#define SCTP_ASSOC_CHANGE_FAILED 0x0008 +struct sctp_stream_change_event { + __u16 strchange_type; + __u16 strchange_flags; + __u32 strchange_length; + sctp_assoc_t strchange_assoc_id; + __u16 strchange_instrms; + __u16 strchange_outstrms; +}; + /* * Described in Section 7.3 * Ancillary Data and Notification Interest Options @@ -518,6 +542,8 @@ struct sctp_event_subscribe { __u8 sctp_authentication_event; __u8 sctp_sender_dry_event; __u8 sctp_stream_reset_event; + __u8 sctp_assoc_reset_event; + __u8 sctp_stream_change_event; }; /* @@ -543,6 +569,8 @@ union sctp_notification { struct sctp_authkey_event sn_authkey_event; struct sctp_sender_dry_event sn_sender_dry_event; struct sctp_stream_reset_event sn_strreset_event; + struct sctp_assoc_reset_event sn_assocreset_event; + struct sctp_stream_change_event sn_strchange_event; }; /* Section 5.3.1 @@ -572,6 +600,10 @@ enum sctp_sn_type { #define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT SCTP_STREAM_RESET_EVENT, #define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT + SCTP_ASSOC_RESET_EVENT, +#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT + SCTP_STREAM_CHANGE_EVENT, +#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT }; /* Notification error codes used to fill up the error fields in some diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 3b2bed7ca9a4d92c5671e614f2bc598668805f75..95cffcb21dfdba7c974706131d0f43e21435e82d 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -177,7 +177,6 @@ enum LINUX_MIB_TIMEWAITED, /* TimeWaited */ LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */ LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */ - LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */ LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */ LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */ LINUX_MIB_DELAYEDACKS, /* DelayedACKs */ @@ -260,6 +259,7 @@ enum LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */ LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ + LINUX_MIB_TCPFASTOPENBLACKHOLE, /* TCPFastOpenBlackholeDetect */ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ LINUX_MIB_TCPAUTOCORKING, /* TCPAutoCorking */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index d2b12152e358f14e791ef3e842cb6eac7cc8ceec..e13d48058b8d0e5cf36e458e68e257d73a9a1e8f 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -568,6 +568,7 @@ enum { NET_IPV6_PROXY_NDP=23, NET_IPV6_ACCEPT_SOURCE_ROUTE=25, NET_IPV6_ACCEPT_RA_FROM_LOCAL=26, + NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27, __NET_IPV6_MAX }; diff --git a/include/uapi/linux/vsockmon.h b/include/uapi/linux/vsockmon.h new file mode 100644 index 0000000000000000000000000000000000000000..a08b522ef597894b092ecc0fbfe6f35a2580fc1c --- /dev/null +++ b/include/uapi/linux/vsockmon.h @@ -0,0 +1,60 @@ +#ifndef _UAPI_VSOCKMON_H +#define _UAPI_VSOCKMON_H + +#include + +/* + * vsockmon is the AF_VSOCK packet capture device. Packets captured have the + * following layout: + * + * +-----------------------------------+ + * | vsockmon header | + * | (struct af_vsockmon_hdr) | + * +-----------------------------------+ + * | transport header | + * | (af_vsockmon_hdr->len bytes long) | + * +-----------------------------------+ + * | payload | + * | (until end of packet) | + * +-----------------------------------+ + * + * The vsockmon header is a transport-independent description of the packet. + * It duplicates some of the information from the transport header so that + * no transport-specific knowledge is necessary to process packets. + * + * The transport header is useful for low-level transport-specific packet + * analysis. Transport type is given in af_vsockmon_hdr->transport and + * transport header length is given in af_vsockmon_hdr->len. + * + * If af_vsockmon_hdr->op is AF_VSOCK_OP_PAYLOAD then the payload follows the + * transport header. Other ops do not have a payload. + */ + +struct af_vsockmon_hdr { + __le64 src_cid; + __le64 dst_cid; + __le32 src_port; + __le32 dst_port; + __le16 op; /* enum af_vsockmon_op */ + __le16 transport; /* enum af_vsockmon_transport */ + __le16 len; /* Transport header length */ + __u8 reserved[2]; +}; + +enum af_vsockmon_op { + AF_VSOCK_OP_UNKNOWN = 0, + AF_VSOCK_OP_CONNECT = 1, + AF_VSOCK_OP_DISCONNECT = 2, + AF_VSOCK_OP_CONTROL = 3, + AF_VSOCK_OP_PAYLOAD = 4, +}; + +enum af_vsockmon_transport { + AF_VSOCK_TRANSPORT_UNKNOWN = 0, + AF_VSOCK_TRANSPORT_NO_INFO = 1, /* No transport information */ + + /* Transport header type: struct virtio_vsock_hdr */ + AF_VSOCK_TRANSPORT_VIRTIO = 2, +}; + +#endif diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 1fc62b239f1b7ea9f6da74740f3e35894bbc5a32..2b384ff09fa05411bdffdaa53472591554aa2be6 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -303,6 +303,7 @@ enum xfrm_attr_type_t { XFRMA_PROTO, /* __u8 */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, + XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) @@ -494,6 +495,13 @@ struct xfrm_address_filter { __u8 dplen; }; +struct xfrm_user_offload { + int ifindex; + __u8 flags; +}; +#define XFRM_OFFLOAD_IPV6 1 +#define XFRM_OFFLOAD_INBOUND 2 + #ifndef __KERNEL__ /* backwards compatibility for userspace */ #define XFRMGRP_ACQUIRE 1 diff --git a/kernel/audit.c b/kernel/audit.c index a871bf80fde1adc79040936475b7045971e72d76..dc202d582aa16ce59b3cee61cd9b5854e237a193 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1399,7 +1399,7 @@ static void audit_receive_skb(struct sk_buff *skb) err = audit_receive_msg(skb, nlh); /* if err or if this message says it wants a response */ if (err || (nlh->nlmsg_flags & NLM_F_ACK)) - netlink_ack(skb, nlh, err); + netlink_ack(skb, nlh, err, NULL); nlh = nlmsg_next(nlh, &len); } diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e1ce4f4fd7fd47fda2c18776573c0f65479c6728..e1e5e658f2dbf887be70fbfc9492d4365aef5064 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,7 +1,7 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o -obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o +obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 6b6f41f0b21164a3cc2c26bb71be2b89098bbdbd..5e00b2333c26e3256a6f4dc51a2a58011a2c9db6 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -1,4 +1,5 @@ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * Copyright (c) 2016,2017 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -16,6 +17,8 @@ #include #include +#include "map_in_map.h" + static void bpf_array_free_percpu(struct bpf_array *array) { int i; @@ -113,6 +116,30 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key) return array->value + array->elem_size * index; } +/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ +static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +{ + struct bpf_insn *insn = insn_buf; + u32 elem_size = round_up(map->value_size, 8); + const int ret = BPF_REG_0; + const int map_ptr = BPF_REG_1; + const int index = BPF_REG_2; + + *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); + *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); + + if (is_power_of_2(elem_size)) { + *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); + } else { + *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); + } + *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *insn++ = BPF_MOV64_IMM(ret, 0); + return insn - insn_buf; +} + /* Called from eBPF program */ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) { @@ -155,7 +182,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { struct bpf_array *array = container_of(map, struct bpf_array, map); - u32 index = *(u32 *)key; + u32 index = key ? *(u32 *)key : U32_MAX; u32 *next = (u32 *)next_key; if (index >= array->map.max_entries) { @@ -260,21 +287,17 @@ static void array_map_free(struct bpf_map *map) bpf_map_area_free(array); } -static const struct bpf_map_ops array_ops = { +const struct bpf_map_ops array_map_ops = { .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, .map_lookup_elem = array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, + .map_gen_lookup = array_map_gen_lookup, }; -static struct bpf_map_type_list array_type __ro_after_init = { - .ops = &array_ops, - .type = BPF_MAP_TYPE_ARRAY, -}; - -static const struct bpf_map_ops percpu_array_ops = { +const struct bpf_map_ops percpu_array_map_ops = { .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, @@ -283,19 +306,6 @@ static const struct bpf_map_ops percpu_array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list percpu_array_type __ro_after_init = { - .ops = &percpu_array_ops, - .type = BPF_MAP_TYPE_PERCPU_ARRAY, -}; - -static int __init register_array_map(void) -{ - bpf_register_map_type(&array_type); - bpf_register_map_type(&percpu_array_type); - return 0; -} -late_initcall(register_array_map); - static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) { /* only file descriptors can be stored in this type of map */ @@ -399,7 +409,7 @@ void bpf_fd_array_map_clear(struct bpf_map *map) fd_array_map_delete_elem(map, &i); } -static const struct bpf_map_ops prog_array_ops = { +const struct bpf_map_ops prog_array_map_ops = { .map_alloc = fd_array_map_alloc, .map_free = fd_array_map_free, .map_get_next_key = array_map_get_next_key, @@ -409,18 +419,6 @@ static const struct bpf_map_ops prog_array_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, }; -static struct bpf_map_type_list prog_array_type __ro_after_init = { - .ops = &prog_array_ops, - .type = BPF_MAP_TYPE_PROG_ARRAY, -}; - -static int __init register_prog_array_map(void) -{ - bpf_register_map_type(&prog_array_type); - return 0; -} -late_initcall(register_prog_array_map); - static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, struct file *map_file) { @@ -511,7 +509,7 @@ static void perf_event_fd_array_release(struct bpf_map *map, rcu_read_unlock(); } -static const struct bpf_map_ops perf_event_array_ops = { +const struct bpf_map_ops perf_event_array_map_ops = { .map_alloc = fd_array_map_alloc, .map_free = fd_array_map_free, .map_get_next_key = array_map_get_next_key, @@ -522,18 +520,6 @@ static const struct bpf_map_ops perf_event_array_ops = { .map_release = perf_event_fd_array_release, }; -static struct bpf_map_type_list perf_event_array_type __ro_after_init = { - .ops = &perf_event_array_ops, - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, -}; - -static int __init register_perf_event_array_map(void) -{ - bpf_register_map_type(&perf_event_array_type); - return 0; -} -late_initcall(register_perf_event_array_map); - #ifdef CONFIG_CGROUPS static void *cgroup_fd_array_get_ptr(struct bpf_map *map, struct file *map_file /* not used */, @@ -554,7 +540,7 @@ static void cgroup_fd_array_free(struct bpf_map *map) fd_array_map_free(map); } -static const struct bpf_map_ops cgroup_array_ops = { +const struct bpf_map_ops cgroup_array_map_ops = { .map_alloc = fd_array_map_alloc, .map_free = cgroup_fd_array_free, .map_get_next_key = array_map_get_next_key, @@ -563,16 +549,53 @@ static const struct bpf_map_ops cgroup_array_ops = { .map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr, }; +#endif -static struct bpf_map_type_list cgroup_array_type __ro_after_init = { - .ops = &cgroup_array_ops, - .type = BPF_MAP_TYPE_CGROUP_ARRAY, -}; +static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) +{ + struct bpf_map *map, *inner_map_meta; + + inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); + if (IS_ERR(inner_map_meta)) + return inner_map_meta; -static int __init register_cgroup_array_map(void) + map = fd_array_map_alloc(attr); + if (IS_ERR(map)) { + bpf_map_meta_free(inner_map_meta); + return map; + } + + map->inner_map_meta = inner_map_meta; + + return map; +} + +static void array_of_map_free(struct bpf_map *map) { - bpf_register_map_type(&cgroup_array_type); - return 0; + /* map->inner_map_meta is only accessed by syscall which + * is protected by fdget/fdput. + */ + bpf_map_meta_free(map->inner_map_meta); + bpf_fd_array_map_clear(map); + fd_array_map_free(map); } -late_initcall(register_cgroup_array_map); -#endif + +static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_map **inner_map = array_map_lookup_elem(map, key); + + if (!inner_map) + return NULL; + + return READ_ONCE(*inner_map); +} + +const struct bpf_map_ops array_of_maps_map_ops = { + .map_alloc = array_of_map_alloc, + .map_free = array_of_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = array_of_map_lookup_elem, + .map_delete_elem = fd_array_map_delete_elem, + .map_fd_get_ptr = bpf_map_fd_get_ptr, + .map_fd_put_ptr = bpf_map_fd_put_ptr, +}; diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c index f62d1d56f41d0c5f3e3ef2c12b7dea3eb6578a10..e6ef4401a13804b6a6604bf22671e0c9dc4d39b2 100644 --- a/kernel/bpf/bpf_lru_list.c +++ b/kernel/bpf/bpf_lru_list.c @@ -13,7 +13,7 @@ #define LOCAL_FREE_TARGET (128) #define LOCAL_NR_SCANS LOCAL_FREE_TARGET -#define PERCPU_FREE_TARGET (16) +#define PERCPU_FREE_TARGET (4) #define PERCPU_NR_SCANS PERCPU_FREE_TARGET /* Helpers to get the local list index */ diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index da0f53690295610ff5ae447ed98fb6e70c99c4f1..ea6033cba94721fd8ca080354c825771d93620fb 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -154,7 +154,7 @@ int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, /** * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering - * @sk: The socken sending or receiving traffic + * @sk: The socket sending or receiving traffic * @skb: The skb that is being sent or received * @type: The type of program to be exectuted * @@ -189,10 +189,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, prog = rcu_dereference(cgrp->bpf.effective[type]); if (prog) { unsigned int offset = skb->data - skb_network_header(skb); + struct sock *save_sk = skb->sk; + skb->sk = sk; __skb_push(skb, offset); ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM; __skb_pull(skb, offset); + skb->sk = save_sk; } rcu_read_unlock(); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index b4f1cb0c5ac7104c3f12f9ed5c1e3fe159c57824..6f81e0f5a0faa2f7df85ae1771f9dfde48e59651 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -394,27 +394,23 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) void bpf_prog_kallsyms_add(struct bpf_prog *fp) { - unsigned long flags; - if (!bpf_prog_kallsyms_candidate(fp) || !capable(CAP_SYS_ADMIN)) return; - spin_lock_irqsave(&bpf_lock, flags); + spin_lock_bh(&bpf_lock); bpf_prog_ksym_node_add(fp->aux); - spin_unlock_irqrestore(&bpf_lock, flags); + spin_unlock_bh(&bpf_lock); } void bpf_prog_kallsyms_del(struct bpf_prog *fp) { - unsigned long flags; - if (!bpf_prog_kallsyms_candidate(fp)) return; - spin_lock_irqsave(&bpf_lock, flags); + spin_lock_bh(&bpf_lock); bpf_prog_ksym_node_del(fp->aux); - spin_unlock_irqrestore(&bpf_lock, flags); + spin_unlock_bh(&bpf_lock); } static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 361a69dfe5434d6afb554477b899e91db90fb29f..004334ea13ba3f56f10b33e9a1a50a97906000e4 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -16,6 +16,7 @@ #include #include "percpu_freelist.h" #include "bpf_lru_list.h" +#include "map_in_map.h" struct bucket { struct hlist_nulls_head head; @@ -86,6 +87,11 @@ static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size return *(void __percpu **)(l->key + key_size); } +static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) +{ + return *(void **)(l->key + roundup(map->key_size, 8)); +} + static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) { return (struct htab_elem *) (htab->elems + i * htab->elem_size); @@ -426,7 +432,11 @@ static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, return NULL; } -/* Called from syscall or from eBPF program */ +/* Called from syscall or from eBPF program directly, so + * arguments have to match bpf_map_lookup_elem() exactly. + * The return value is adjusted by BPF instructions + * in htab_map_gen_lookup(). + */ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); @@ -458,6 +468,30 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key) return NULL; } +/* inline bpf_map_lookup_elem() call. + * Instead of: + * bpf_prog + * bpf_map_lookup_elem + * map->ops->map_lookup_elem + * htab_map_lookup_elem + * __htab_map_lookup_elem + * do: + * bpf_prog + * __htab_map_lookup_elem + */ +static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +{ + struct bpf_insn *insn = insn_buf; + const int ret = BPF_REG_0; + + *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); + *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); + *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, + offsetof(struct htab_elem, key) + + round_up(map->key_size, 8)); + return insn - insn_buf; +} + static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) { struct htab_elem *l = __htab_map_lookup_elem(map, key); @@ -506,12 +540,15 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) struct hlist_nulls_head *head; struct htab_elem *l, *next_l; u32 hash, key_size; - int i; + int i = 0; WARN_ON_ONCE(!rcu_read_lock_held()); key_size = map->key_size; + if (!key) + goto find_first_elem; + hash = htab_map_hash(key, key_size); head = select_bucket(htab, hash); @@ -519,10 +556,8 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) /* lookup the key */ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); - if (!l) { - i = 0; + if (!l) goto find_first_elem; - } /* key was found, get next key in the same bucket */ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), @@ -582,6 +617,14 @@ static void htab_elem_free_rcu(struct rcu_head *head) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { + struct bpf_map *map = &htab->map; + + if (map->ops->map_fd_put_ptr) { + void *ptr = fd_htab_map_get_ptr(map, l); + + map->ops->map_fd_put_ptr(ptr); + } + if (htab_is_prealloc(htab)) { pcpu_freelist_push(&htab->freelist, &l->fnode); } else { @@ -1027,6 +1070,7 @@ static void delete_all_elements(struct bpf_htab *htab) } } } + /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void htab_map_free(struct bpf_map *map) { @@ -1053,21 +1097,17 @@ static void htab_map_free(struct bpf_map *map) kfree(htab); } -static const struct bpf_map_ops htab_ops = { +const struct bpf_map_ops htab_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_map_lookup_elem, .map_update_elem = htab_map_update_elem, .map_delete_elem = htab_map_delete_elem, + .map_gen_lookup = htab_map_gen_lookup, }; -static struct bpf_map_type_list htab_type __ro_after_init = { - .ops = &htab_ops, - .type = BPF_MAP_TYPE_HASH, -}; - -static const struct bpf_map_ops htab_lru_ops = { +const struct bpf_map_ops htab_lru_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, @@ -1076,11 +1116,6 @@ static const struct bpf_map_ops htab_lru_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_type __ro_after_init = { - .ops = &htab_lru_ops, - .type = BPF_MAP_TYPE_LRU_HASH, -}; - /* Called from eBPF program */ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) { @@ -1154,7 +1189,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, return ret; } -static const struct bpf_map_ops htab_percpu_ops = { +const struct bpf_map_ops htab_percpu_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, @@ -1163,12 +1198,7 @@ static const struct bpf_map_ops htab_percpu_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list htab_percpu_type __ro_after_init = { - .ops = &htab_percpu_ops, - .type = BPF_MAP_TYPE_PERCPU_HASH, -}; - -static const struct bpf_map_ops htab_lru_percpu_ops = { +const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, @@ -1177,17 +1207,102 @@ static const struct bpf_map_ops htab_lru_percpu_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = { - .ops = &htab_lru_percpu_ops, - .type = BPF_MAP_TYPE_LRU_PERCPU_HASH, -}; +static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) +{ + struct bpf_map *map; + + if (attr->value_size != sizeof(u32)) + return ERR_PTR(-EINVAL); + + /* pointer is stored internally */ + attr->value_size = sizeof(void *); + map = htab_map_alloc(attr); + attr->value_size = sizeof(u32); -static int __init register_htab_map(void) + return map; +} + +static void fd_htab_map_free(struct bpf_map *map) { - bpf_register_map_type(&htab_type); - bpf_register_map_type(&htab_percpu_type); - bpf_register_map_type(&htab_lru_type); - bpf_register_map_type(&htab_lru_percpu_type); - return 0; + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct hlist_nulls_node *n; + struct hlist_nulls_head *head; + struct htab_elem *l; + int i; + + for (i = 0; i < htab->n_buckets; i++) { + head = select_bucket(htab, i); + + hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { + void *ptr = fd_htab_map_get_ptr(map, l); + + map->ops->map_fd_put_ptr(ptr); + } + } + + htab_map_free(map); +} + +/* only called from syscall */ +int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags) +{ + void *ptr; + int ret; + u32 ufd = *(u32 *)value; + + ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + ret = htab_map_update_elem(map, key, &ptr, map_flags); + if (ret) + map->ops->map_fd_put_ptr(ptr); + + return ret; +} + +static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) +{ + struct bpf_map *map, *inner_map_meta; + + inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); + if (IS_ERR(inner_map_meta)) + return inner_map_meta; + + map = fd_htab_map_alloc(attr); + if (IS_ERR(map)) { + bpf_map_meta_free(inner_map_meta); + return map; + } + + map->inner_map_meta = inner_map_meta; + + return map; } -late_initcall(register_htab_map); + +static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_map **inner_map = htab_map_lookup_elem(map, key); + + if (!inner_map) + return NULL; + + return READ_ONCE(*inner_map); +} + +static void htab_of_map_free(struct bpf_map *map) +{ + bpf_map_meta_free(map->inner_map_meta); + fd_htab_map_free(map); +} + +const struct bpf_map_ops htab_of_maps_map_ops = { + .map_alloc = htab_of_map_alloc, + .map_free = htab_of_map_free, + .map_get_next_key = htab_map_get_next_key, + .map_lookup_elem = htab_of_map_lookup_elem, + .map_delete_elem = htab_map_delete_elem, + .map_fd_get_ptr = bpf_map_fd_get_ptr, + .map_fd_put_ptr = bpf_map_fd_put_ptr, +}; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index b37bd9ab7f574242722c1d9b0503b896019488b2..39cfafd895b80d2f0fb5054626af001949a8ad98 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -505,7 +505,7 @@ static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key) return -ENOTSUPP; } -static const struct bpf_map_ops trie_ops = { +const struct bpf_map_ops trie_map_ops = { .map_alloc = trie_alloc, .map_free = trie_free, .map_get_next_key = trie_get_next_key, @@ -513,15 +513,3 @@ static const struct bpf_map_ops trie_ops = { .map_update_elem = trie_update_elem, .map_delete_elem = trie_delete_elem, }; - -static struct bpf_map_type_list trie_type __ro_after_init = { - .ops = &trie_ops, - .type = BPF_MAP_TYPE_LPM_TRIE, -}; - -static int __init register_trie_map(void) -{ - bpf_register_map_type(&trie_type); - return 0; -} -late_initcall(register_trie_map); diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c new file mode 100644 index 0000000000000000000000000000000000000000..59bcdf821ae47477f2a058af3e6918f0ce222325 --- /dev/null +++ b/kernel/bpf/map_in_map.c @@ -0,0 +1,97 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include + +#include "map_in_map.h" + +struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) +{ + struct bpf_map *inner_map, *inner_map_meta; + struct fd f; + + f = fdget(inner_map_ufd); + inner_map = __bpf_map_get(f); + if (IS_ERR(inner_map)) + return inner_map; + + /* prog_array->owner_prog_type and owner_jited + * is a runtime binding. Doing static check alone + * in the verifier is not enough. + */ + if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { + fdput(f); + return ERR_PTR(-ENOTSUPP); + } + + /* Does not support >1 level map-in-map */ + if (inner_map->inner_map_meta) { + fdput(f); + return ERR_PTR(-EINVAL); + } + + inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); + if (!inner_map_meta) { + fdput(f); + return ERR_PTR(-ENOMEM); + } + + inner_map_meta->map_type = inner_map->map_type; + inner_map_meta->key_size = inner_map->key_size; + inner_map_meta->value_size = inner_map->value_size; + inner_map_meta->map_flags = inner_map->map_flags; + inner_map_meta->ops = inner_map->ops; + inner_map_meta->max_entries = inner_map->max_entries; + + fdput(f); + return inner_map_meta; +} + +void bpf_map_meta_free(struct bpf_map *map_meta) +{ + kfree(map_meta); +} + +bool bpf_map_meta_equal(const struct bpf_map *meta0, + const struct bpf_map *meta1) +{ + /* No need to compare ops because it is covered by map_type */ + return meta0->map_type == meta1->map_type && + meta0->key_size == meta1->key_size && + meta0->value_size == meta1->value_size && + meta0->map_flags == meta1->map_flags && + meta0->max_entries == meta1->max_entries; +} + +void *bpf_map_fd_get_ptr(struct bpf_map *map, + struct file *map_file /* not used */, + int ufd) +{ + struct bpf_map *inner_map; + struct fd f; + + f = fdget(ufd); + inner_map = __bpf_map_get(f); + if (IS_ERR(inner_map)) + return inner_map; + + if (bpf_map_meta_equal(map->inner_map_meta, inner_map)) + inner_map = bpf_map_inc(inner_map, false); + else + inner_map = ERR_PTR(-EINVAL); + + fdput(f); + return inner_map; +} + +void bpf_map_fd_put_ptr(void *ptr) +{ + /* ptr->ops->map_free() has to go through one + * rcu grace period by itself. + */ + bpf_map_put(ptr); +} diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h new file mode 100644 index 0000000000000000000000000000000000000000..177fadb689dca9075ff9c43055d392821908ba6d --- /dev/null +++ b/kernel/bpf/map_in_map.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef __MAP_IN_MAP_H__ +#define __MAP_IN_MAP_H__ + +#include + +struct file; +struct bpf_map; + +struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd); +void bpf_map_meta_free(struct bpf_map *map_meta); +bool bpf_map_meta_equal(const struct bpf_map *meta0, + const struct bpf_map *meta1); +void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file, + int ufd); +void bpf_map_fd_put_ptr(void *ptr); + +#endif diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 22aa45cd0324e320b9cd8b0f89bf9befdaae74b6..4dfd6f2ec2f9725681f490971e60acc8033c64b6 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -264,7 +264,7 @@ static void stack_map_free(struct bpf_map *map) put_callchain_buffers(); } -static const struct bpf_map_ops stack_map_ops = { +const struct bpf_map_ops stack_map_ops = { .map_alloc = stack_map_alloc, .map_free = stack_map_free, .map_get_next_key = stack_map_get_next_key, @@ -272,15 +272,3 @@ static const struct bpf_map_ops stack_map_ops = { .map_update_elem = stack_map_update_elem, .map_delete_elem = stack_map_delete_elem, }; - -static struct bpf_map_type_list stack_map_type __ro_after_init = { - .ops = &stack_map_ops, - .type = BPF_MAP_TYPE_STACK_TRACE, -}; - -static int __init register_stack_map(void) -{ - bpf_register_map_type(&stack_map_type); - return 0; -} -late_initcall(register_stack_map); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 821f9e807de5705d5b4d65e502fb13a06d3215bb..13642c73dca0b4c4461099f3a2774d8ec733160e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -27,30 +27,29 @@ DEFINE_PER_CPU(int, bpf_prog_active); int sysctl_unprivileged_bpf_disabled __read_mostly; -static LIST_HEAD(bpf_map_types); +static const struct bpf_map_ops * const bpf_map_types[] = { +#define BPF_PROG_TYPE(_id, _ops) +#define BPF_MAP_TYPE(_id, _ops) \ + [_id] = &_ops, +#include +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE +}; static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) { - struct bpf_map_type_list *tl; struct bpf_map *map; - list_for_each_entry(tl, &bpf_map_types, list_node) { - if (tl->type == attr->map_type) { - map = tl->ops->map_alloc(attr); - if (IS_ERR(map)) - return map; - map->ops = tl->ops; - map->map_type = attr->map_type; - return map; - } - } - return ERR_PTR(-EINVAL); -} + if (attr->map_type >= ARRAY_SIZE(bpf_map_types) || + !bpf_map_types[attr->map_type]) + return ERR_PTR(-EINVAL); -/* boot time registration of different map implementations */ -void bpf_register_map_type(struct bpf_map_type_list *tl) -{ - list_add(&tl->list_node, &bpf_map_types); + map = bpf_map_types[attr->map_type]->map_alloc(attr); + if (IS_ERR(map)) + return map; + map->ops = bpf_map_types[attr->map_type]; + map->map_type = attr->map_type; + return map; } void *bpf_map_area_alloc(size_t size) @@ -215,7 +214,7 @@ int bpf_map_new_fd(struct bpf_map *map) offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ sizeof(attr->CMD##_LAST_FIELD)) != NULL -#define BPF_MAP_CREATE_LAST_FIELD map_flags +#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd /* called via syscall */ static int map_create(union bpf_attr *attr) { @@ -352,6 +351,9 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_percpu_array_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { err = bpf_stackmap_copy(map, key, value); + } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || + map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + err = -ENOTSUPP; } else { rcu_read_lock(); ptr = map->ops->map_lookup_elem(map, key); @@ -438,11 +440,17 @@ static int map_update_elem(union bpf_attr *attr) err = bpf_percpu_array_update(map, key, value, attr->flags); } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || map->map_type == BPF_MAP_TYPE_PROG_ARRAY || - map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) { + map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || + map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) { rcu_read_lock(); err = bpf_fd_array_map_update_elem(map, f.file, key, value, attr->flags); rcu_read_unlock(); + } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + rcu_read_lock(); + err = bpf_fd_htab_map_update_elem(map, f.file, key, value, + attr->flags); + rcu_read_unlock(); } else { rcu_read_lock(); err = map->ops->map_update_elem(map, key, value, attr->flags); @@ -528,14 +536,18 @@ static int map_get_next_key(union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); - err = -ENOMEM; - key = kmalloc(map->key_size, GFP_USER); - if (!key) - goto err_put; + if (ukey) { + err = -ENOMEM; + key = kmalloc(map->key_size, GFP_USER); + if (!key) + goto err_put; - err = -EFAULT; - if (copy_from_user(key, ukey, map->key_size) != 0) - goto free_key; + err = -EFAULT; + if (copy_from_user(key, ukey, map->key_size) != 0) + goto free_key; + } else { + key = NULL; + } err = -ENOMEM; next_key = kmalloc(map->key_size, GFP_USER); @@ -564,87 +576,23 @@ static int map_get_next_key(union bpf_attr *attr) return err; } -static LIST_HEAD(bpf_prog_types); +static const struct bpf_verifier_ops * const bpf_prog_types[] = { +#define BPF_PROG_TYPE(_id, _ops) \ + [_id] = &_ops, +#define BPF_MAP_TYPE(_id, _ops) +#include +#undef BPF_PROG_TYPE +#undef BPF_MAP_TYPE +}; static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) { - struct bpf_prog_type_list *tl; - - list_for_each_entry(tl, &bpf_prog_types, list_node) { - if (tl->type == type) { - prog->aux->ops = tl->ops; - prog->type = type; - return 0; - } - } - - return -EINVAL; -} - -void bpf_register_prog_type(struct bpf_prog_type_list *tl) -{ - list_add(&tl->list_node, &bpf_prog_types); -} - -/* fixup insn->imm field of bpf_call instructions: - * if (insn->imm == BPF_FUNC_map_lookup_elem) - * insn->imm = bpf_map_lookup_elem - __bpf_call_base; - * else if (insn->imm == BPF_FUNC_map_update_elem) - * insn->imm = bpf_map_update_elem - __bpf_call_base; - * else ... - * - * this function is called after eBPF program passed verification - */ -static void fixup_bpf_calls(struct bpf_prog *prog) -{ - const struct bpf_func_proto *fn; - int i; + if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type]) + return -EINVAL; - for (i = 0; i < prog->len; i++) { - struct bpf_insn *insn = &prog->insnsi[i]; - - if (insn->code == (BPF_JMP | BPF_CALL)) { - /* we reach here when program has bpf_call instructions - * and it passed bpf_check(), means that - * ops->get_func_proto must have been supplied, check it - */ - BUG_ON(!prog->aux->ops->get_func_proto); - - if (insn->imm == BPF_FUNC_get_route_realm) - prog->dst_needed = 1; - if (insn->imm == BPF_FUNC_get_prandom_u32) - bpf_user_rnd_init_once(); - if (insn->imm == BPF_FUNC_xdp_adjust_head) - prog->xdp_adjust_head = 1; - if (insn->imm == BPF_FUNC_tail_call) { - /* If we tail call into other programs, we - * cannot make any assumptions since they - * can be replaced dynamically during runtime - * in the program array. - */ - prog->cb_access = 1; - prog->xdp_adjust_head = 1; - - /* mark bpf_tail_call as different opcode - * to avoid conditional branch in - * interpeter for every normal call - * and to prevent accidental JITing by - * JIT compiler that doesn't support - * bpf_tail_call yet - */ - insn->imm = 0; - insn->code |= BPF_X; - continue; - } - - fn = prog->aux->ops->get_func_proto(insn->imm); - /* all functions that have prototype and verifier allowed - * programs to call them, must be real in-kernel functions - */ - BUG_ON(!fn->func); - insn->imm = fn->func - __bpf_call_base; - } - } + prog->aux->ops = bpf_prog_types[type]; + prog->type = type; + return 0; } /* drop refcnt on maps used by eBPF program and free auxilary data */ @@ -900,9 +848,6 @@ static int bpf_prog_load(union bpf_attr *attr) if (err < 0) goto free_used_maps; - /* fixup BPF_CALL->imm field */ - fixup_bpf_calls(prog); - /* eBPF program is ready to be JITed */ prog = bpf_prog_select_runtime(prog, &err); if (err < 0) @@ -1028,6 +973,28 @@ static int bpf_prog_detach(const union bpf_attr *attr) } #endif /* CONFIG_CGROUP_BPF */ +#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration + +static int bpf_prog_test_run(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct bpf_prog *prog; + int ret = -ENOTSUPP; + + if (CHECK_ATTR(BPF_PROG_TEST_RUN)) + return -EINVAL; + + prog = bpf_prog_get(attr->test.prog_fd); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + if (prog->aux->ops->test_run) + ret = prog->aux->ops->test_run(prog, attr, uattr); + + bpf_prog_put(prog); + return ret; +} + SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { union bpf_attr attr = {}; @@ -1094,7 +1061,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_OBJ_GET: err = bpf_obj_get(&attr); break; - #ifdef CONFIG_CGROUP_BPF case BPF_PROG_ATTACH: err = bpf_prog_attach(&attr); @@ -1103,7 +1069,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_prog_detach(&attr); break; #endif - + case BPF_PROG_TEST_RUN: + err = bpf_prog_test_run(&attr, uattr); + break; default: err = -EINVAL; break; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a834068a400e279f963097489ed165c6ad1301b2..c2ff608c1984ed3f7e5031d5f9fbcfe479f7fc67 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -143,6 +143,8 @@ struct bpf_verifier_stack_elem { #define BPF_COMPLEXITY_LIMIT_INSNS 65536 #define BPF_COMPLEXITY_LIMIT_STACK 1024 +#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) + struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; @@ -1215,6 +1217,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + if (func_id != BPF_FUNC_map_lookup_elem) + goto error; default: break; } @@ -1291,7 +1297,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) } } -static int check_call(struct bpf_verifier_env *env, int func_id) +static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { struct bpf_verifier_state *state = &env->cur_state; const struct bpf_func_proto *fn = NULL; @@ -1375,6 +1381,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id) } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { + struct bpf_insn_aux_data *insn_aux; + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; /* remember map_ptr, so that check_map_access() @@ -1387,6 +1395,11 @@ static int check_call(struct bpf_verifier_env *env, int func_id) } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; + insn_aux = &env->insn_aux_data[insn_idx]; + if (!insn_aux->map_ptr) + insn_aux->map_ptr = meta.map_ptr; + else if (insn_aux->map_ptr != meta.map_ptr) + insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose("unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); @@ -1909,6 +1922,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) dst_reg->type = PTR_TO_STACK; dst_reg->imm = insn->imm; return 0; + } else if (opcode == BPF_ADD && + BPF_CLASS(insn->code) == BPF_ALU64 && + dst_reg->type == PTR_TO_STACK && + ((BPF_SRC(insn->code) == BPF_X && + regs[insn->src_reg].type == CONST_IMM) || + BPF_SRC(insn->code) == BPF_K)) { + if (BPF_SRC(insn->code) == BPF_X) + dst_reg->imm += regs[insn->src_reg].imm; + else + dst_reg->imm += insn->imm; + return 0; } else if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && (dst_reg->type == PTR_TO_PACKET || @@ -2112,14 +2136,19 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, struct bpf_reg_state *reg = ®s[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { - reg->type = type; + if (type == UNKNOWN_VALUE) { + __mark_reg_unknown_value(regs, regno); + } else if (reg->map_ptr->inner_map_meta) { + reg->type = CONST_PTR_TO_MAP; + reg->map_ptr = reg->map_ptr->inner_map_meta; + } else { + reg->type = type; + } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; - if (type == UNKNOWN_VALUE) - __mark_reg_unknown_value(regs, regno); } } @@ -2960,7 +2989,7 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } - err = check_call(env, insn->imm); + err = check_call(env, insn->imm, insn_idx); if (err) return err; @@ -3044,16 +3073,33 @@ static int do_check(struct bpf_verifier_env *env) return 0; } +static int check_map_prealloc(struct bpf_map *map) +{ + return (map->map_type != BPF_MAP_TYPE_HASH && + map->map_type != BPF_MAP_TYPE_PERCPU_HASH && + map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || + !(map->map_flags & BPF_F_NO_PREALLOC); +} + static int check_map_prog_compatibility(struct bpf_map *map, struct bpf_prog *prog) { - if (prog->type == BPF_PROG_TYPE_PERF_EVENT && - (map->map_type == BPF_MAP_TYPE_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_HASH) && - (map->map_flags & BPF_F_NO_PREALLOC)) { - verbose("perf_event programs can only use preallocated hash map\n"); - return -EINVAL; + /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use + * preallocated hash maps, since doing memory allocation + * in overflow_handler can crash depending on where nmi got + * triggered. + */ + if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { + if (!check_map_prealloc(map)) { + verbose("perf_event programs can only use preallocated hash map\n"); + return -EINVAL; + } + if (map->inner_map_meta && + !check_map_prealloc(map->inner_map_meta)) { + verbose("perf_event programs can only use preallocated inner hash map\n"); + return -EINVAL; + } } return 0; } @@ -3182,6 +3228,41 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) insn->src_reg = 0; } +/* single env->prog->insni[off] instruction was replaced with the range + * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying + * [0, off) and [off, end) to new locations, so the patched range stays zero + */ +static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, + u32 off, u32 cnt) +{ + struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + + if (cnt == 1) + return 0; + new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); + if (!new_data) + return -ENOMEM; + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); + memcpy(new_data + off + cnt - 1, old_data + off, + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); + env->insn_aux_data = new_data; + vfree(old_data); + return 0; +} + +static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, + const struct bpf_insn *patch, u32 len) +{ + struct bpf_prog *new_prog; + + new_prog = bpf_patch_insn_single(env->prog, off, patch, len); + if (!new_prog) + return NULL; + if (adjust_insn_aux_data(env, new_prog->len, off, len)) + return NULL; + return new_prog; +} + /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ @@ -3201,10 +3282,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) verbose("bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { - new_prog = bpf_patch_insn_single(env->prog, 0, - insn_buf, cnt); + new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; + env->prog = new_prog; delta += cnt - 1; } @@ -3229,7 +3310,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) else continue; - if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX) + if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); @@ -3238,8 +3319,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) return -EINVAL; } - new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf, - cnt); + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; @@ -3253,6 +3333,89 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) return 0; } +/* fixup insn->imm field of bpf_call instructions + * and inline eligible helpers as explicit sequence of BPF instructions + * + * this function is called after eBPF program passed verification + */ +static int fixup_bpf_calls(struct bpf_verifier_env *env) +{ + struct bpf_prog *prog = env->prog; + struct bpf_insn *insn = prog->insnsi; + const struct bpf_func_proto *fn; + const int insn_cnt = prog->len; + struct bpf_insn insn_buf[16]; + struct bpf_prog *new_prog; + struct bpf_map *map_ptr; + int i, cnt, delta = 0; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (insn->code != (BPF_JMP | BPF_CALL)) + continue; + + if (insn->imm == BPF_FUNC_get_route_realm) + prog->dst_needed = 1; + if (insn->imm == BPF_FUNC_get_prandom_u32) + bpf_user_rnd_init_once(); + if (insn->imm == BPF_FUNC_tail_call) { + /* If we tail call into other programs, we + * cannot make any assumptions since they can + * be replaced dynamically during runtime in + * the program array. + */ + prog->cb_access = 1; + + /* mark bpf_tail_call as different opcode to avoid + * conditional branch in the interpeter for every normal + * call and to prevent accidental JITing by JIT compiler + * that doesn't support bpf_tail_call yet + */ + insn->imm = 0; + insn->code |= BPF_X; + continue; + } + + if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) { + map_ptr = env->insn_aux_data[i + delta].map_ptr; + if (map_ptr == BPF_MAP_PTR_POISON || + !map_ptr->ops->map_gen_lookup) + goto patch_call_imm; + + cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); + if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { + verbose("bpf verifier is misconfigured\n"); + return -EINVAL; + } + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, + cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + + /* keep walking new program and skip insns we just inserted */ + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + +patch_call_imm: + fn = prog->aux->ops->get_func_proto(insn->imm); + /* all functions that have prototype and verifier allowed + * programs to call them, must be real in-kernel functions + */ + if (!fn->func) { + verbose("kernel subsystem misconfigured func %s#%d\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; + } + insn->imm = fn->func - __bpf_call_base; + } + + return 0; +} + static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; @@ -3348,6 +3511,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); + if (ret == 0) + ret = fixup_bpf_calls(env); + if (log_level && log_len >= log_size - 1) { BUG_ON(log_len >= log_size); /* verifier log exceeded user supplied buffer */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f806dbd66de9b0445a5caf5ad4af1c24b7ed1b26..460a031c77e592411b83a4071d976706db975687 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -501,16 +501,11 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type return true; } -static const struct bpf_verifier_ops kprobe_prog_ops = { +const struct bpf_verifier_ops kprobe_prog_ops = { .get_func_proto = kprobe_prog_func_proto, .is_valid_access = kprobe_prog_is_valid_access, }; -static struct bpf_prog_type_list kprobe_tl __ro_after_init = { - .ops = &kprobe_prog_ops, - .type = BPF_PROG_TYPE_KPROBE, -}; - BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, u64, flags, void *, data, u64, size) { @@ -584,16 +579,11 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type return true; } -static const struct bpf_verifier_ops tracepoint_prog_ops = { +const struct bpf_verifier_ops tracepoint_prog_ops = { .get_func_proto = tp_prog_func_proto, .is_valid_access = tp_prog_is_valid_access, }; -static struct bpf_prog_type_list tracepoint_tl __ro_after_init = { - .ops = &tracepoint_prog_ops, - .type = BPF_PROG_TYPE_TRACEPOINT, -}; - static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, enum bpf_reg_type *reg_type) { @@ -642,22 +632,8 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static const struct bpf_verifier_ops perf_event_prog_ops = { +const struct bpf_verifier_ops perf_event_prog_ops = { .get_func_proto = tp_prog_func_proto, .is_valid_access = pe_prog_is_valid_access, .convert_ctx_access = pe_prog_convert_ctx_access, }; - -static struct bpf_prog_type_list perf_event_tl __ro_after_init = { - .ops = &perf_event_prog_ops, - .type = BPF_PROG_TYPE_PERF_EVENT, -}; - -static int __init register_kprobe_prog_ops(void) -{ - bpf_register_prog_type(&kprobe_tl); - bpf_register_prog_type(&tracepoint_tl); - bpf_register_prog_type(&perf_event_tl); - return 0; -} -late_initcall(register_kprobe_prog_ops); diff --git a/lib/nlattr.c b/lib/nlattr.c index b42b8577fc2346045c4d88ede93960bf1cd8f3e7..a7e0b16078dff5035d1494d15f62622fdfe54b66 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -112,6 +112,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy + * @extack: extended ACK report struct * * Validates all attributes in the specified attribute stream against the * specified policy. Attributes with a type exceeding maxtype will be @@ -120,20 +121,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype, * Returns 0 on success or a negative error code. */ int nla_validate(const struct nlattr *head, int len, int maxtype, - const struct nla_policy *policy) + const struct nla_policy *policy, + struct netlink_ext_ack *extack) { const struct nlattr *nla; - int rem, err; + int rem; nla_for_each_attr(nla, head, len, rem) { - err = validate_nla(nla, maxtype, policy); - if (err < 0) - goto errout; + int err = validate_nla(nla, maxtype, policy); + + if (err < 0) { + if (extack) + extack->bad_attr = nla; + return err; + } } - err = 0; -errout: - return err; + return 0; } EXPORT_SYMBOL(nla_validate); @@ -180,7 +184,8 @@ EXPORT_SYMBOL(nla_policy_len); * Returns 0 on success or a negative error code. */ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, - int len, const struct nla_policy *policy) + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack) { const struct nlattr *nla; int rem, err; @@ -193,8 +198,11 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, if (type > 0 && type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); - if (err < 0) + if (err < 0) { + if (extack) + extack->bad_attr = nla; goto errout; + } } tb[type] = (struct nlattr *)nla; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f8635fd5744259431305b73599949011d67a78b0..a930e436db5d8ff6b3e4959ae60761607bcd47cc 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, struct rhash_head *head; int elasticity; - elasticity = ht->elasticity; + elasticity = RHT_ELASTICITY; pprev = rht_bucket_var(tbl, hash); rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *list; @@ -958,35 +958,20 @@ int rhashtable_init(struct rhashtable *ht, if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); - if (params->max_size) - ht->p.max_size = rounddown_pow_of_two(params->max_size); + /* Cap total entries at 2^31 to avoid nelems overflow. */ + ht->max_elems = 1u << 31; - if (params->insecure_max_entries) - ht->p.insecure_max_entries = - rounddown_pow_of_two(params->insecure_max_entries); - else - ht->p.insecure_max_entries = ht->p.max_size * 2; + if (params->max_size) { + ht->p.max_size = rounddown_pow_of_two(params->max_size); + if (ht->p.max_size < ht->max_elems / 2) + ht->max_elems = ht->p.max_size * 2; + } - ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); + ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); if (params->nelem_hint) size = rounded_hashtable_size(&ht->p); - /* The maximum (not average) chain length grows with the - * size of the hash table, at a rate of (log N)/(log log N). - * The value of 16 is selected so that even if the hash - * table grew to 2^32 you would not expect the maximum - * chain length to exceed it unless we are under attack - * (or extremely unlucky). - * - * As this limit is only to detect attacks, we don't need - * to set it to a lower value as you'd need the chain - * length to vastly exceed 16 to have any real effect - * on the system. - */ - if (!params->insecure_elasticity) - ht->elasticity = 16; - if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); else diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 0362da0b66c352e4cb3eb96748fe2db4955d6b11..a0f66280ea502e636d9b9f283ff12b173aa65646 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -434,6 +434,41 @@ static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) return 0; } +static int __bpf_fill_stxdw(struct bpf_test *self, int size) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1); + insn[1] = BPF_ST_MEM(size, R10, -40, 42); + + for (i = 2; i < len - 2; i++) + insn[i] = BPF_STX_XADD(size, R10, R0, -40); + + insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40); + insn[len - 1] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_stxw(struct bpf_test *self) +{ + return __bpf_fill_stxdw(self, BPF_W); +} + +static int bpf_fill_stxdw(struct bpf_test *self) +{ + return __bpf_fill_stxdw(self, BPF_DW); +} + static struct bpf_test tests[] = { { "TAX", @@ -4302,6 +4337,41 @@ static struct bpf_test tests[] = { { }, { { 0, 0x22 } }, }, + { + "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU64_REG(BPF_MOV, R1, R10), + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_W, R10, -40, 0x10), + BPF_STX_XADD(BPF_W, R10, R0, -40), + BPF_ALU64_REG(BPF_MOV, R0, R10), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_W, R10, -40, 0x10), + BPF_STX_XADD(BPF_W, R10, R0, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x12 } }, + }, + { + "STX_XADD_W: X + 1 + 1 + 1 + ...", + { }, + INTERNAL, + { }, + { { 0, 4134 } }, + .fill_helper = bpf_fill_stxw, + }, { "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", .u.insns_int = { @@ -4315,6 +4385,41 @@ static struct bpf_test tests[] = { { }, { { 0, 0x22 } }, }, + { + "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU64_REG(BPF_MOV, R1, R10), + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_DW, R10, -40, 0x10), + BPF_STX_XADD(BPF_DW, R10, R0, -40), + BPF_ALU64_REG(BPF_MOV, R0, R10), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_DW, R10, -40, 0x10), + BPF_STX_XADD(BPF_DW, R10, R0, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x12 } }, + }, + { + "STX_XADD_DW: X + 1 + 1 + 1 + ...", + { }, + INTERNAL, + { }, + { { 0, 4134 } }, + .fill_helper = bpf_fill_stxdw, + }, /* BPF_JMP | BPF_EXIT */ { "JMP_EXIT", @@ -4656,6 +4761,51 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + /* Mainly testing JIT + imm64 here. */ + "JMP_JGE_X: ldimm64 test 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 2), + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xeeeeeeeeU } }, + }, + { + "JMP_JGE_X: ldimm64 test 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 0), + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffffU } }, + }, + { + "JMP_JGE_X: ldimm64 test 3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 4), + BPF_LD_IMM64(R0, 0xffffffffffffffffUL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JNE | BPF_X */ { "JMP_JNE_X: if (3 != 2) return 1", diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c index 5945f7e19c679cb882a5a5716a1de33bed50a8f1..40d3d72beb5384efee121bd4319a02b6d0821522 100644 --- a/net/6lowpan/core.c +++ b/net/6lowpan/core.c @@ -23,10 +23,18 @@ int lowpan_register_netdevice(struct net_device *dev, { int i, ret; - dev->addr_len = EUI64_ADDR_LEN; + switch (lltype) { + case LOWPAN_LLTYPE_IEEE802154: + dev->addr_len = EUI64_ADDR_LEN; + break; + + case LOWPAN_LLTYPE_BTLE: + dev->addr_len = ETH_ALEN; + break; + } + dev->type = ARPHRD_6LOWPAN; dev->mtu = IPV6_MIN_MTU; - dev->priv_flags |= IFF_NO_QUEUE; lowpan_dev(dev)->lltype = lltype; diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 79f1fa22509a350b35340897c1cb137357734713..6b1042e216565a85ecc1b1e6301172750c07a8ae 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -278,6 +278,23 @@ lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev, return ret; } +static void lowpan_iphc_uncompress_lladdr(const struct net_device *dev, + struct in6_addr *ipaddr, + const void *lladdr) +{ + switch (dev->addr_len) { + case ETH_ALEN: + lowpan_iphc_uncompress_eui48_lladdr(ipaddr, lladdr); + break; + case EUI64_ADDR_LEN: + lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr); + break; + default: + WARN_ON_ONCE(1); + break; + } +} + /* Uncompress address function for source and * destination address(non-multicast). * @@ -320,7 +337,7 @@ static int lowpan_iphc_uncompress_addr(struct sk_buff *skb, lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; default: - lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_lladdr(dev, ipaddr, lladdr); break; } break; @@ -381,7 +398,7 @@ static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb, lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr); break; default: - lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr); + lowpan_iphc_uncompress_lladdr(dev, ipaddr, lladdr); break; } ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen); @@ -666,6 +683,8 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) { case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC: + skb->pkt_type = PACKET_BROADCAST; + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); if (!ci) { @@ -681,11 +700,15 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); break; case LOWPAN_IPHC_M: + skb->pkt_type = PACKET_BROADCAST; + /* multicast */ err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr, iphc1 & LOWPAN_IPHC_DAM_MASK); break; case LOWPAN_IPHC_DAC: + skb->pkt_type = PACKET_HOST; + spin_lock_bh(&lowpan_dev(dev)->ctx.lock); ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid)); if (!ci) { @@ -701,6 +724,8 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, spin_unlock_bh(&lowpan_dev(dev)->ctx.lock); break; default: + skb->pkt_type = PACKET_HOST; + err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr, iphc1 & LOWPAN_IPHC_DAM_MASK, daddr); @@ -802,6 +827,21 @@ lowpan_iphc_compress_ctx_802154_lladdr(const struct in6_addr *ipaddr, return lladdr_compress; } +static bool lowpan_iphc_addr_equal(const struct net_device *dev, + const struct lowpan_iphc_ctx *ctx, + const struct in6_addr *ipaddr, + const void *lladdr) +{ + struct in6_addr tmp = {}; + + lowpan_iphc_uncompress_lladdr(dev, &tmp, lladdr); + + if (ctx) + ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen); + + return ipv6_addr_equal(&tmp, ipaddr); +} + static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct net_device *dev, const struct in6_addr *ipaddr, const struct lowpan_iphc_ctx *ctx, @@ -819,13 +859,7 @@ static u8 lowpan_compress_ctx_addr(u8 **hc_ptr, const struct net_device *dev, } break; default: - /* check for SAM/DAM = 11 */ - memcpy(&tmp.s6_addr[8], lladdr, EUI64_ADDR_LEN); - /* second bit-flip (Universe/Local) is done according RFC2464 */ - tmp.s6_addr[8] ^= 0x02; - /* context information are always used */ - ipv6_addr_prefix_copy(&tmp, &ctx->pfx, ctx->plen); - if (ipv6_addr_equal(&tmp, ipaddr)) { + if (lowpan_iphc_addr_equal(dev, ctx, ipaddr, lladdr)) { dam = LOWPAN_IPHC_DAM_11; goto out; } @@ -921,11 +955,12 @@ static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct net_device *dev, } break; default: - if (is_addr_mac_addr_based(ipaddr, lladdr)) { - dam = LOWPAN_IPHC_DAM_11; /* 0-bits */ + if (lowpan_iphc_addr_equal(dev, NULL, ipaddr, lladdr)) { + dam = LOWPAN_IPHC_DAM_11; pr_debug("address compression 0 bits\n"); goto out; } + break; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e97ab824e368cc16f9609acd70d5337866eb2936..9ee5787634e59690d67cb8fa148e03b18d455c99 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -562,8 +562,7 @@ static int vlan_dev_init(struct net_device *dev) NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | NETIF_F_ALL_FCOE; - dev->features |= real_dev->vlan_features | NETIF_F_LLTX | - NETIF_F_GSO_SOFTWARE; + dev->features |= dev->hw_features | NETIF_F_LLTX; dev->gso_max_size = real_dev->gso_max_size; dev->gso_max_segs = real_dev->gso_max_segs; if (dev->features & NETIF_F_VLAN_FEATURES) diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 1270207f3d7c9dd6fde0e1f7839acfe18a4d82c9..9c94aad153b308d4ca3e1fb098f045365895a218 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -35,7 +35,8 @@ static inline int vlan_validate_qos_map(struct nlattr *attr) { if (!attr) return 0; - return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy); + return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy, + NULL); } static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) diff --git a/net/Makefile b/net/Makefile index 9b681550e3a3ea3c6146ac67572b6c97a28c9d2c..9086ffbb508514c1e4fb1a5d2d04d6c6b1cf5bea 100644 --- a/net/Makefile +++ b/net/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_NET) += $(tmp-y) # LLC has to be linked before the files in net/802/ obj-$(CONFIG_LLC) += llc/ -obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ +obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ bpf/ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_XFRM) += xfrm/ diff --git a/net/atm/clip.c b/net/atm/clip.c index 53b4ac09e7b7d5d6a57f049dc1653c961b052622..ec527b62f79db1a4712d6fd654d2f899255de8bc 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -106,7 +106,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) entry->expires = jiffies - 1; /* force resolution or expiration */ error = neigh_update(entry->neigh, NULL, NUD_NONE, - NEIGH_UPDATE_F_ADMIN); + NEIGH_UPDATE_F_ADMIN, 0); if (error) pr_crit("neigh_update failed with %d\n", error); goto out; @@ -481,7 +481,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) link_vcc(clip_vcc, entry); } error = neigh_update(neigh, llc_oui, NUD_PERMANENT, - NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN); + NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 0); neigh_release(neigh); return error; } diff --git a/net/atm/common.c b/net/atm/common.c index 9613381f5db04e28ff66749706d1d61d82f77b88..f06422f4108d209fde356457c453164f2f4d7289 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -62,21 +62,16 @@ static void vcc_remove_socket(struct sock *sk) write_unlock_irq(&vcc_sklist_lock); } -static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size) +static bool vcc_tx_ready(struct atm_vcc *vcc, unsigned int size) { - struct sk_buff *skb; struct sock *sk = sk_atm(vcc); if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); - return NULL; + return false; } - while (!(skb = alloc_skb(size, GFP_KERNEL))) - schedule(); - pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); - atomic_add(skb->truesize, &sk->sk_wmem_alloc); - return skb; + return true; } static void vcc_sock_destruct(struct sock *sk) @@ -606,7 +601,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) eff = (size+3) & ~3; /* align to word boundary */ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); error = 0; - while (!(skb = alloc_tx(vcc, eff))) { + while (!vcc_tx_ready(vcc, eff)) { if (m->msg_flags & MSG_DONTWAIT) { error = -EAGAIN; break; @@ -628,6 +623,15 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) finish_wait(sk_sleep(sk), &wait); if (error) goto out; + + skb = alloc_skb(eff, GFP_KERNEL); + if (!skb) { + error = -ENOMEM; + goto out; + } + pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); + atomic_add(skb->truesize, &sk->sk_wmem_alloc); + skb->dev = NULL; /* for paths shared with net_device interfaces */ ATM_SKB(skb)->atm_options = vcc->atm_options; if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 71343d0fec94b55f7318ec8578abc956148f7791..495ba7cdcb0451c997656116a300a49b7e43a089 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -679,15 +679,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_forw_packet *forw_packet_aggr; + struct sk_buff *skb; unsigned char *skb_buff; unsigned int skb_size; atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left; - forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing, - queue_left, bat_priv); - if (!forw_packet_aggr) - return; - if (atomic_read(&bat_priv->aggregated_ogms) && packet_len < BATADV_MAX_AGGREGATION_BYTES) skb_size = BATADV_MAX_AGGREGATION_BYTES; @@ -696,9 +692,14 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, skb_size += ETH_HLEN; - forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); - if (!forw_packet_aggr->skb) { - batadv_forw_packet_free(forw_packet_aggr, true); + skb = netdev_alloc_skb_ip_align(NULL, skb_size); + if (!skb) + return; + + forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing, + queue_left, bat_priv, skb); + if (!forw_packet_aggr) { + kfree_skb(skb); return; } diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ba8420d8a992db2c14936f568f761869afd921c0..d07e89ec84677d22a74891406ab6fa5ee6087011 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -395,7 +395,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, ether_addr_copy(ethhdr->h_source, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): CLAIM %pM on vid %d\n", mac, - BATADV_PRINT_VID(vid)); + batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_UNCLAIM: /* unclaim frame @@ -404,7 +404,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, - BATADV_PRINT_VID(vid)); + batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_ANNOUNCE: /* announcement frame @@ -413,7 +413,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", - ethhdr->h_source, BATADV_PRINT_VID(vid)); + ethhdr->h_source, batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_REQUEST: /* request frame @@ -425,14 +425,14 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", ethhdr->h_source, ethhdr->h_dest, - BATADV_PRINT_VID(vid)); + batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_LOOPDETECT: ether_addr_copy(ethhdr->h_source, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n", ethhdr->h_source, ethhdr->h_dest, - BATADV_PRINT_VID(vid)); + batadv_print_vid(vid)); break; } @@ -475,9 +475,9 @@ static void batadv_bla_loopdetect_report(struct work_struct *work) batadv_info(bat_priv->soft_iface, "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n", - BATADV_PRINT_VID(backbone_gw->vid)); + batadv_print_vid(backbone_gw->vid)); snprintf(vid_str, sizeof(vid_str), "%d", - BATADV_PRINT_VID(backbone_gw->vid)); + batadv_print_vid(backbone_gw->vid)); vid_str[sizeof(vid_str) - 1] = 0; batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT, @@ -510,7 +510,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", - orig, BATADV_PRINT_VID(vid)); + orig, batadv_print_vid(vid)); entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) @@ -719,7 +719,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", - mac, BATADV_PRINT_VID(vid)); + mac, batadv_print_vid(vid)); kref_get(&claim->refcount); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, @@ -739,8 +739,8 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, goto claim_free_ref; batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_add_claim(): changing ownership for %pM, vid %d\n", - mac, BATADV_PRINT_VID(vid)); + "bla_add_claim(): changing ownership for %pM, vid %d to gw %pM\n", + mac, batadv_print_vid(vid), backbone_gw->orig); remove_crc = true; } @@ -809,7 +809,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, return; batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", - mac, BATADV_PRINT_VID(vid)); + mac, batadv_print_vid(vid)); batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim); @@ -849,7 +849,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", - BATADV_PRINT_VID(vid), backbone_gw->orig, crc); + batadv_print_vid(vid), backbone_gw->orig, crc); spin_lock_bh(&backbone_gw->crc_lock); backbone_crc = backbone_gw->crc; @@ -859,7 +859,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", backbone_gw->orig, - BATADV_PRINT_VID(backbone_gw->vid), + batadv_print_vid(backbone_gw->vid), backbone_crc, crc); batadv_bla_send_request(backbone_gw); @@ -904,7 +904,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_request(): REQUEST vid %d (sent by %pM)...\n", - BATADV_PRINT_VID(vid), ethhdr->h_source); + batadv_print_vid(vid), ethhdr->h_source); batadv_bla_answer_request(bat_priv, primary_if, vid); return true; @@ -941,7 +941,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, /* this must be an UNCLAIM frame */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", - claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig); + claim_addr, batadv_print_vid(vid), backbone_gw->orig); batadv_bla_del_claim(bat_priv, claim_addr, vid); batadv_backbone_gw_put(backbone_gw); @@ -1161,7 +1161,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, if (ret == 1) batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, + ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst); if (ret < 2) @@ -1197,7 +1197,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); + ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst); return true; } @@ -1295,10 +1295,13 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, goto skip; batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_purge_claims(): %pM, vid %d, time out\n", - claim->addr, claim->vid); + "bla_purge_claims(): timed out.\n"); purge_now: + batadv_dbg(BATADV_DBG_BLA, bat_priv, + "bla_purge_claims(): %pM, vid %d\n", + claim->addr, claim->vid); + batadv_handle_unclaim(bat_priv, primary_if, backbone_gw->orig, claim->addr, claim->vid); @@ -1846,6 +1849,13 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, /* possible optimization: race for a claim */ /* No claim exists yet, claim it for us! */ + + batadv_dbg(BATADV_DBG_BLA, bat_priv, + "bla_rx(): Unclaimed MAC %pM found. Claim it. Local: %s\n", + ethhdr->h_source, + batadv_is_my_client(bat_priv, + ethhdr->h_source, vid) ? + "yes" : "no"); batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); @@ -1963,10 +1973,22 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, /* if yes, the client has roamed and we have * to unclaim it. */ - batadv_handle_unclaim(bat_priv, primary_if, - primary_if->net_dev->dev_addr, - ethhdr->h_source, vid); - goto allow; + if (batadv_has_timed_out(claim->lasttime, 100)) { + /* only unclaim if the last claim entry is + * older than 100 ms to make sure we really + * have a roaming client here. + */ + batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n", + ethhdr->h_source); + batadv_handle_unclaim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } else { + batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n", + ethhdr->h_source); + goto handled; + } } /* check if it is a multicast/broadcast frame */ @@ -2042,7 +2064,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) backbone_crc = backbone_gw->crc; spin_unlock_bh(&backbone_gw->crc_lock); seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", - claim->addr, BATADV_PRINT_VID(claim->vid), + claim->addr, batadv_print_vid(claim->vid), backbone_gw->orig, (is_own ? 'x' : ' '), backbone_crc); @@ -2274,7 +2296,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", backbone_gw->orig, - BATADV_PRINT_VID(backbone_gw->vid), secs, + batadv_print_vid(backbone_gw->vid), secs, msecs, backbone_crc); } rcu_read_unlock(); @@ -2449,3 +2471,52 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) return ret; } + +#ifdef CONFIG_BATMAN_ADV_DAT +/** + * batadv_bla_check_claim - check if address is claimed + * + * @bat_priv: the bat priv with all the soft interface information + * @addr: mac address of which the claim status is checked + * @vid: the VLAN ID + * + * addr is checked if this address is claimed by the local device itself. + * + * Return: true if bla is disabled or the mac is claimed by the device, + * false if the device addr is already claimed by another gateway + */ +bool batadv_bla_check_claim(struct batadv_priv *bat_priv, + u8 *addr, unsigned short vid) +{ + struct batadv_bla_claim search_claim; + struct batadv_bla_claim *claim = NULL; + struct batadv_hard_iface *primary_if = NULL; + bool ret = true; + + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + return ret; + + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if) + return ret; + + /* First look if the mac address is claimed */ + ether_addr_copy(search_claim.addr, addr); + search_claim.vid = vid; + + claim = batadv_claim_hash_find(bat_priv, &search_claim); + + /* If there is a claim and we are not owner of the claim, + * return false. + */ + if (claim) { + if (!batadv_compare_eth(claim->backbone_gw->orig, + primary_if->net_dev->dev_addr)) + ret = false; + batadv_claim_put(claim); + } + + batadv_hardif_put(primary_if); + return ret; +} +#endif diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index e157986bd01cf989dc70c93bae7a4fdf17cf3c4f..234775748b8eae9477f802804f004e50d2a4840c 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -69,6 +69,10 @@ void batadv_bla_status_update(struct net_device *net_dev); int batadv_bla_init(struct batadv_priv *bat_priv); void batadv_bla_free(struct batadv_priv *bat_priv); int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); +#ifdef CONFIG_BATMAN_ADV_DAT +bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, + unsigned short vid); +#endif #define BATADV_BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ @@ -145,6 +149,13 @@ static inline int batadv_bla_backbone_dump(struct sk_buff *msg, return -EOPNOTSUPP; } +static inline +bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, + unsigned short vid) +{ + return true; +} + #endif /* ifdef CONFIG_BATMAN_ADV_BLA */ #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 1bfd1dbc2feba7bf6c16004ea8ca85ed6066c929..013e970eff393e0550aa250f7e72c27301071552 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -43,6 +43,7 @@ #include #include +#include "bridge_loop_avoidance.h" #include "hard-interface.h" #include "hash.h" #include "log.h" @@ -330,7 +331,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, batadv_dbg(BATADV_DBG_DAT, bat_priv, "Entry updated: %pI4 %pM (vid: %d)\n", &dat_entry->ip, dat_entry->mac_addr, - BATADV_PRINT_VID(vid)); + batadv_print_vid(vid)); goto out; } @@ -356,7 +357,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, } batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n", - &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid)); + &dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(vid)); out: if (dat_entry) @@ -835,7 +836,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n", &dat_entry->ip, dat_entry->mac_addr, - BATADV_PRINT_VID(dat_entry->vid), + batadv_print_vid(dat_entry->vid), last_seen_mins, last_seen_secs); } rcu_read_unlock(); @@ -1002,6 +1003,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, bool ret = false; struct batadv_dat_entry *dat_entry = NULL; struct sk_buff *skb_new; + struct net_device *soft_iface = bat_priv->soft_iface; int hdr_size = 0; unsigned short vid; @@ -1040,16 +1042,30 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, goto out; } + /* If BLA is enabled, only send ARP replies if we have claimed + * the destination for the ARP request or if no one else of + * the backbone gws belonging to our backbone has claimed the + * destination. + */ + if (!batadv_bla_check_claim(bat_priv, + dat_entry->mac_addr, vid)) { + batadv_dbg(BATADV_DBG_DAT, bat_priv, + "Device %pM claimed by another backbone gw. Don't send ARP reply!", + dat_entry->mac_addr); + ret = true; + goto out; + } + skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src, dat_entry->mac_addr, hw_src, vid); if (!skb_new) goto out; - skb_new->protocol = eth_type_trans(skb_new, - bat_priv->soft_iface); - bat_priv->stats.rx_packets++; - bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; + skb_new->protocol = eth_type_trans(skb_new, soft_iface); + + soft_iface->stats.rx_packets++; + soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; netif_rx(skb_new); batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); @@ -1188,6 +1204,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, struct sk_buff *skb, int hdr_size) { + struct batadv_dat_entry *dat_entry = NULL; u16 type; __be32 ip_src, ip_dst; u8 *hw_src, *hw_dst; @@ -1210,12 +1227,41 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, hw_dst = batadv_arp_hw_dst(skb, hdr_size); ip_dst = batadv_arp_ip_dst(skb, hdr_size); + /* If ip_dst is already in cache and has the right mac address, + * drop this frame if this ARP reply is destined for us because it's + * most probably an ARP reply generated by another node of the DHT. + * We have most probably received already a reply earlier. Delivering + * this frame would lead to doubled receive of an ARP reply. + */ + dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_src, vid); + if (dat_entry && batadv_compare_eth(hw_src, dat_entry->mac_addr)) { + batadv_dbg(BATADV_DBG_DAT, bat_priv, "Doubled ARP reply removed: ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]; dat_entry: %pM-%pI4\n", + hw_src, &ip_src, hw_dst, &ip_dst, + dat_entry->mac_addr, &dat_entry->ip); + dropped = true; + goto out; + } + /* Update our internal cache with both the IP addresses the node got * within the ARP reply */ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); + /* If BLA is enabled, only forward ARP replies if we have claimed the + * source of the ARP reply or if no one else of the same backbone has + * already claimed that client. This prevents that different gateways + * to the same backbone all forward the ARP reply leading to multiple + * replies in the backbone. + */ + if (!batadv_bla_check_claim(bat_priv, hw_src, vid)) { + batadv_dbg(BATADV_DBG_DAT, bat_priv, + "Device %pM claimed by another backbone gw. Drop ARP reply.\n", + hw_src); + dropped = true; + goto out; + } + /* if this REPLY is directed to a client of mine, let's deliver the * packet to the interface */ @@ -1228,6 +1274,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, out: if (dropped) kfree_skb(skb); + if (dat_entry) + batadv_dat_entry_put(dat_entry); /* if dropped == false -> deliver to the interface */ return dropped; } @@ -1256,7 +1304,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, /* If this packet is an ARP_REQUEST and the node already has the * information that it is going to ask, then the packet can be dropped */ - if (forw_packet->num_packets) + if (batadv_forw_packet_is_rebroadcast(forw_packet)) goto out; vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size); diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index 7a2b9f4da07830103a8f00e4c3b488b0367a9dc2..65ce97efa6b5946175f64d300573a532f28387f8 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -73,9 +73,10 @@ __printf(2, 3); /* possibly ratelimited debug output */ #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \ do { \ - if (atomic_read(&(bat_priv)->log_level) & (type) && \ + struct batadv_priv *__batpriv = (bat_priv); \ + if (atomic_read(&__batpriv->log_level) & (type) && \ (!(ratelimited) || net_ratelimit())) \ - batadv_debug_log(bat_priv, fmt, ## arg); \ + batadv_debug_log(__batpriv, fmt, ## arg); \ } \ while (0) #else /* !CONFIG_BATMAN_ADV_DEBUG */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 5000c540614d0c0a866857e5245ff3f365d14223..fb381fb26a66196942401ec6cb636c66e648a36a 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -516,6 +516,9 @@ static void batadv_recv_handler_init(void) BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12); BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8); + i = FIELD_SIZEOF(struct sk_buff, cb); + BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i); + /* broadcast packet */ batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 57a8103dbce7f00fb58ff5ae252e12a0f6453aec..810f7d026f544027991af1714c169342792e1a68 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2017.0" +#define BATADV_SOURCE_VERSION "2017.1" #endif /* B.A.T.M.A.N. parameters */ @@ -193,6 +193,7 @@ enum batadv_uev_type { #include #include +#include "packet.h" #include "types.h" struct net_device; @@ -200,8 +201,19 @@ struct packet_type; struct seq_file; struct sk_buff; -#define BATADV_PRINT_VID(vid) (((vid) & BATADV_VLAN_HAS_TAG) ? \ - (int)((vid) & VLAN_VID_MASK) : -1) +/** + * batadv_print_vid - return printable version of vid information + * @vid: the VLAN identifier + * + * Return: -1 when no VLAN is used, VLAN id otherwise + */ +static inline int batadv_print_vid(unsigned short vid) +{ + if (vid & BATADV_VLAN_HAS_TAG) + return (int)(vid & VLAN_VID_MASK); + else + return -1; +} extern struct list_head batadv_hardif_list; diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 952ba81a565b611ee0a83fc0bbfb719b54187407..d327670641ac336a14f0ecc85dd848d4952e8e6e 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -494,9 +494,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) if (!bridged) goto update; -#if !IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) - pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); -#endif + if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)) + pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n"); querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP); querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP); @@ -671,7 +670,6 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv, return 0; } -#if IS_ENABLED(CONFIG_IPV6) /** * batadv_mcast_is_report_ipv6 - check for MLD reports * @skb: the ethernet frame destined for the mesh @@ -736,7 +734,6 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, return 0; } -#endif /** * batadv_mcast_forw_mode_check - check for optimized forwarding potential @@ -765,11 +762,12 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, case ETH_P_IP: return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, is_unsnoopable); -#if IS_ENABLED(CONFIG_IPV6) case ETH_P_IPV6: + if (!IS_ENABLED(CONFIG_IPV6)) + return -EINVAL; + return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, is_unsnoopable); -#endif default: return -EINVAL; } diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 7fd740b6e36dfb0e11c67c283cec68a5cfd1b5f6..e1ebe14ee2a6e21cc8d6b4a42552cae4bd15061f 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -941,15 +941,17 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct batadv_unicast_packet *unicast_packet; struct batadv_unicast_4addr_packet *unicast_4addr_packet; - u8 *orig_addr; - struct batadv_orig_node *orig_node = NULL; + u8 *orig_addr, *orig_addr_gw; + struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; int check, hdr_size = sizeof(*unicast_packet); enum batadv_subtype subtype; - bool is4addr; + struct ethhdr *ethhdr; int ret = NET_RX_DROP; + bool is4addr, is_gw; unicast_packet = (struct batadv_unicast_packet *)skb->data; unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; + ethhdr = eth_hdr(skb); is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; /* the caller function should have already pulled 2 bytes */ @@ -972,6 +974,23 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, /* packet for me */ if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { + /* If this is a unicast packet from another backgone gw, + * drop it. + */ + orig_addr_gw = ethhdr->h_source; + orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); + if (orig_node_gw) { + is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, + hdr_size); + batadv_orig_node_put(orig_node_gw); + if (is_gw) { + batadv_dbg(BATADV_DBG_BLA, bat_priv, + "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", + orig_addr_gw); + return NET_RX_DROP; + } + } + if (is4addr) { subtype = unicast_4addr_packet->subtype; batadv_dat_inc_counter(bat_priv, subtype); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 1489ec27daff5548b072e88648f5cca192f74afa..403df596a73d28afa8b31b46a2c0649052ce3db0 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -482,6 +482,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, * @if_outgoing: The (optional) if_outgoing to be grabbed * @queue_left: The (optional) queue counter to decrease * @bat_priv: The bat_priv for the mesh of this forw_packet + * @skb: The raw packet this forwarding packet shall contain * * Allocates a forwarding packet and tries to get a reference to the * (optional) if_incoming, if_outgoing and queue_left. If queue_left @@ -493,7 +494,8 @@ struct batadv_forw_packet * batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, atomic_t *queue_left, - struct batadv_priv *bat_priv) + struct batadv_priv *bat_priv, + struct sk_buff *skb) { struct batadv_forw_packet *forw_packet; const char *qname; @@ -525,7 +527,7 @@ batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, INIT_HLIST_NODE(&forw_packet->list); INIT_HLIST_NODE(&forw_packet->cleanup_list); - forw_packet->skb = NULL; + forw_packet->skb = skb; forw_packet->queue_left = queue_left; forw_packet->if_incoming = if_incoming; forw_packet->if_outgoing = if_outgoing; @@ -756,22 +758,23 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, if (!primary_if) goto err; + newskb = skb_copy(skb, GFP_ATOMIC); + if (!newskb) { + batadv_hardif_put(primary_if); + goto err; + } + forw_packet = batadv_forw_packet_alloc(primary_if, NULL, &bat_priv->bcast_queue_left, - bat_priv); + bat_priv, newskb); batadv_hardif_put(primary_if); if (!forw_packet) - goto err; - - newskb = skb_copy(skb, GFP_ATOMIC); - if (!newskb) goto err_packet_free; /* as we have a copy now, it is safe to decrease the TTL */ bcast_packet = (struct batadv_bcast_packet *)newskb->data; bcast_packet->ttl--; - forw_packet->skb = newskb; forw_packet->own = own_packet; INIT_DELAYED_WORK(&forw_packet->delayed_work, @@ -781,11 +784,60 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, return NETDEV_TX_OK; err_packet_free: - batadv_forw_packet_free(forw_packet, true); + kfree_skb(newskb); err: return NETDEV_TX_BUSY; } +/** + * batadv_forw_packet_bcasts_left - check if a retransmission is necessary + * @forw_packet: the forwarding packet to check + * @hard_iface: the interface to check on + * + * Checks whether a given packet has any (re)transmissions left on the provided + * interface. + * + * hard_iface may be NULL: In that case the number of transmissions this skb had + * so far is compared with the maximum amount of retransmissions independent of + * any interface instead. + * + * Return: True if (re)transmissions are left, false otherwise. + */ +static bool +batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet, + struct batadv_hard_iface *hard_iface) +{ + unsigned int max; + + if (hard_iface) + max = hard_iface->num_bcasts; + else + max = BATADV_NUM_BCASTS_MAX; + + return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max; +} + +/** + * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet + * @forw_packet: the packet to increase the counter for + */ +static void +batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet) +{ + BATADV_SKB_CB(forw_packet->skb)->num_bcasts++; +} + +/** + * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions + * @forw_packet: the packet to check + * + * Return: True if this packet was transmitted before, false otherwise. + */ +bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet) +{ + return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0; +} + static void batadv_send_outstanding_bcast_packet(struct work_struct *work) { struct batadv_hard_iface *hard_iface; @@ -826,7 +878,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) if (hard_iface->soft_iface != soft_iface) continue; - if (forw_packet->num_packets >= hard_iface->num_bcasts) + if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface)) continue; if (forw_packet->own) { @@ -884,10 +936,10 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) } rcu_read_unlock(); - forw_packet->num_packets++; + batadv_forw_packet_bcasts_inc(forw_packet); /* if we still have some more bcasts to send */ - if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { + if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) { batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time); return; diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index f21166d1032360a1febe3cebdfc9ee0e9958bb9c..a16b34f473ef02e46e642b46d4b31f588d89ea67 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -34,11 +34,13 @@ struct batadv_forw_packet * batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, atomic_t *queue_left, - struct batadv_priv *bat_priv); + struct batadv_priv *bat_priv, + struct sk_buff *skb); bool batadv_forw_packet_steal(struct batadv_forw_packet *packet, spinlock_t *l); void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time); +bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet); int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index d042c99af028e2083307de1ba8978f2061fee45d..b25789abf7b9e10aec7af1dfc41a5c9ff805284a 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -64,28 +64,6 @@ #include "sysfs.h" #include "translation-table.h" -static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); -static void batadv_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info); -static u32 batadv_get_msglevel(struct net_device *dev); -static void batadv_set_msglevel(struct net_device *dev, u32 value); -static u32 batadv_get_link(struct net_device *dev); -static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data); -static void batadv_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data); -static int batadv_get_sset_count(struct net_device *dev, int stringset); - -static const struct ethtool_ops batadv_ethtool_ops = { - .get_settings = batadv_get_settings, - .get_drvinfo = batadv_get_drvinfo, - .get_msglevel = batadv_get_msglevel, - .set_msglevel = batadv_set_msglevel, - .get_link = batadv_get_link, - .get_strings = batadv_get_strings, - .get_ethtool_stats = batadv_get_ethtool_stats, - .get_sset_count = batadv_get_sset_count, -}; - int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) { int result; @@ -140,7 +118,7 @@ static u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx) static struct net_device_stats *batadv_interface_stats(struct net_device *dev) { struct batadv_priv *bat_priv = netdev_priv(dev); - struct net_device_stats *stats = &bat_priv->stats; + struct net_device_stats *stats = &dev->stats; stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); @@ -230,6 +208,9 @@ static int batadv_interface_tx(struct sk_buff *skb, if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; + /* reset control block to avoid left overs from previous users */ + memset(skb->cb, 0, sizeof(struct batadv_skb_cb)); + netif_trans_update(soft_iface); vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); @@ -947,6 +928,98 @@ static const struct net_device_ops batadv_netdev_ops = { .ndo_del_slave = batadv_softif_slave_del, }; +static void batadv_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); + strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "batman", sizeof(info->bus_info)); +} + +/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 + * Declare each description string in struct.name[] to get fixed sized buffer + * and compile time checking for strings longer than ETH_GSTRING_LEN. + */ +static const struct { + const char name[ETH_GSTRING_LEN]; +} batadv_counters_strings[] = { + { "tx" }, + { "tx_bytes" }, + { "tx_dropped" }, + { "rx" }, + { "rx_bytes" }, + { "forward" }, + { "forward_bytes" }, + { "mgmt_tx" }, + { "mgmt_tx_bytes" }, + { "mgmt_rx" }, + { "mgmt_rx_bytes" }, + { "frag_tx" }, + { "frag_tx_bytes" }, + { "frag_rx" }, + { "frag_rx_bytes" }, + { "frag_fwd" }, + { "frag_fwd_bytes" }, + { "tt_request_tx" }, + { "tt_request_rx" }, + { "tt_response_tx" }, + { "tt_response_rx" }, + { "tt_roam_adv_tx" }, + { "tt_roam_adv_rx" }, +#ifdef CONFIG_BATMAN_ADV_DAT + { "dat_get_tx" }, + { "dat_get_rx" }, + { "dat_put_tx" }, + { "dat_put_rx" }, + { "dat_cached_reply_tx" }, +#endif +#ifdef CONFIG_BATMAN_ADV_NC + { "nc_code" }, + { "nc_code_bytes" }, + { "nc_recode" }, + { "nc_recode_bytes" }, + { "nc_buffer" }, + { "nc_decode" }, + { "nc_decode_bytes" }, + { "nc_decode_failed" }, + { "nc_sniffed" }, +#endif +}; + +static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, batadv_counters_strings, + sizeof(batadv_counters_strings)); +} + +static void batadv_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct batadv_priv *bat_priv = netdev_priv(dev); + int i; + + for (i = 0; i < BATADV_CNT_NUM; i++) + data[i] = batadv_sum_counter(bat_priv, i); +} + +static int batadv_get_sset_count(struct net_device *dev, int stringset) +{ + if (stringset == ETH_SS_STATS) + return BATADV_CNT_NUM; + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops batadv_ethtool_ops = { + .get_drvinfo = batadv_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = batadv_get_strings, + .get_ethtool_stats = batadv_get_ethtool_stats, + .get_sset_count = batadv_get_sset_count, +}; + /** * batadv_softif_free - Deconstructor of batadv_soft_interface * @dev: Device to cleanup and remove @@ -971,8 +1044,6 @@ static void batadv_softif_free(struct net_device *dev) */ static void batadv_softif_init_early(struct net_device *dev) { - struct batadv_priv *priv = netdev_priv(dev); - ether_setup(dev); dev->netdev_ops = &batadv_netdev_ops; @@ -989,8 +1060,6 @@ static void batadv_softif_init_early(struct net_device *dev) eth_hw_addr_random(dev); dev->ethtool_ops = &batadv_ethtool_ops; - - memset(priv, 0, sizeof(*priv)); } struct net_device *batadv_softif_create(struct net *net, const char *name) @@ -1083,118 +1152,3 @@ struct rtnl_link_ops batadv_link_ops __read_mostly = { .setup = batadv_softif_init_early, .dellink = batadv_softif_destroy_netlink, }; - -/* ethtool */ -static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - cmd->supported = 0; - cmd->advertising = 0; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_FULL; - cmd->port = PORT_TP; - cmd->phy_address = 0; - cmd->transceiver = XCVR_INTERNAL; - cmd->autoneg = AUTONEG_DISABLE; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - - return 0; -} - -static void batadv_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); - strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, "batman", sizeof(info->bus_info)); -} - -static u32 batadv_get_msglevel(struct net_device *dev) -{ - return -EOPNOTSUPP; -} - -static void batadv_set_msglevel(struct net_device *dev, u32 value) -{ -} - -static u32 batadv_get_link(struct net_device *dev) -{ - return 1; -} - -/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 - * Declare each description string in struct.name[] to get fixed sized buffer - * and compile time checking for strings longer than ETH_GSTRING_LEN. - */ -static const struct { - const char name[ETH_GSTRING_LEN]; -} batadv_counters_strings[] = { - { "tx" }, - { "tx_bytes" }, - { "tx_dropped" }, - { "rx" }, - { "rx_bytes" }, - { "forward" }, - { "forward_bytes" }, - { "mgmt_tx" }, - { "mgmt_tx_bytes" }, - { "mgmt_rx" }, - { "mgmt_rx_bytes" }, - { "frag_tx" }, - { "frag_tx_bytes" }, - { "frag_rx" }, - { "frag_rx_bytes" }, - { "frag_fwd" }, - { "frag_fwd_bytes" }, - { "tt_request_tx" }, - { "tt_request_rx" }, - { "tt_response_tx" }, - { "tt_response_rx" }, - { "tt_roam_adv_tx" }, - { "tt_roam_adv_rx" }, -#ifdef CONFIG_BATMAN_ADV_DAT - { "dat_get_tx" }, - { "dat_get_rx" }, - { "dat_put_tx" }, - { "dat_put_rx" }, - { "dat_cached_reply_tx" }, -#endif -#ifdef CONFIG_BATMAN_ADV_NC - { "nc_code" }, - { "nc_code_bytes" }, - { "nc_recode" }, - { "nc_recode_bytes" }, - { "nc_buffer" }, - { "nc_decode" }, - { "nc_decode_bytes" }, - { "nc_decode_failed" }, - { "nc_sniffed" }, -#endif -}; - -static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - if (stringset == ETH_SS_STATS) - memcpy(data, batadv_counters_strings, - sizeof(batadv_counters_strings)); -} - -static void batadv_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct batadv_priv *bat_priv = netdev_priv(dev); - int i; - - for (i = 0; i < BATADV_CNT_NUM; i++) - data[i] = batadv_sum_counter(bat_priv, i); -} - -static int batadv_get_sset_count(struct net_device *dev, int stringset) -{ - if (stringset == ETH_SS_STATS) - return BATADV_CNT_NUM; - - return -EOPNOTSUPP; -} diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index c94ebdecdc3d123f71c5af89783623e58d2f323d..556f9a865ddfb5e488c65e9edd708fad187f56b1 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -873,8 +873,8 @@ static int batadv_tp_send(void *arg) /* something went wrong during the preparation/transmission */ if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, - "Meter: batadv_tp_send() cannot send packets (%d)\n", - err); + "Meter: %s() cannot send packets (%d)\n", + __func__, err); /* ensure nobody else tries to stop the thread now */ if (atomic_dec_and_test(&tp_vars->sending)) tp_vars->reason = err; @@ -979,7 +979,8 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst, if (!tp_vars) { spin_unlock_bh(&bat_priv->tp_list_lock); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, - "Meter: batadv_tp_start cannot allocate list elements\n"); + "Meter: %s cannot allocate list elements\n", + __func__); batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR, dst, bat_priv, session_cookie); return; diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 6077a87d46f0f781ac72dcc3cc1d9f84c814ad19..e75b4937b497401284bc553182737a2f7af54605 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -617,7 +617,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, - BATADV_PRINT_VID(tt_global->common.vid), message); + batadv_print_vid(tt_global->common.vid), message); batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, batadv_choose_tt, &tt_global->common); @@ -671,7 +671,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) { batadv_dbg(BATADV_DBG_TT, bat_priv, "Re-adding pending client %pM (vid: %d)\n", - addr, BATADV_PRINT_VID(vid)); + addr, batadv_print_vid(vid)); /* whatever the reason why the PENDING flag was set, * this is a client which was enqueued to be removed in * this orig_interval. Since it popped up again, the @@ -684,7 +684,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) { batadv_dbg(BATADV_DBG_TT, bat_priv, "Roaming client %pM (vid: %d) came back to its original location\n", - addr, BATADV_PRINT_VID(vid)); + addr, batadv_print_vid(vid)); /* the ROAM flag is set because this client roamed away * and the node got a roaming_advertisement message. Now * that the client popped up again at its original @@ -716,7 +716,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, if (!vlan) { net_ratelimited_function(batadv_info, soft_iface, "adding TT local entry %pM to non-existent VLAN %d\n", - addr, BATADV_PRINT_VID(vid)); + addr, batadv_print_vid(vid)); kmem_cache_free(batadv_tl_cache, tt_local); tt_local = NULL; goto out; @@ -724,7 +724,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", - addr, BATADV_PRINT_VID(vid), + addr, batadv_print_vid(vid), (u8)atomic_read(&bat_priv->tt.vn)); ether_addr_copy(tt_local->common.addr, addr); @@ -1097,7 +1097,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n", tt_common_entry->addr, - BATADV_PRINT_VID(tt_common_entry->vid), + batadv_print_vid(tt_common_entry->vid), ((tt_common_entry->flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'), no_purge ? 'P' : '.', @@ -1296,7 +1296,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv, "Local tt entry (%pM, vid: %d) pending to be removed: %s\n", tt_local_entry->common.addr, - BATADV_PRINT_VID(tt_local_entry->common.vid), message); + batadv_print_vid(tt_local_entry->common.vid), message); } /** @@ -1727,7 +1727,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new global tt entry: %pM (vid: %d, via %pM)\n", - common->addr, BATADV_PRINT_VID(common->vid), + common->addr, batadv_print_vid(common->vid), orig_node->orig); ret = true; @@ -1835,7 +1835,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv, if (!vlan) { seq_printf(seq, " * Cannot retrieve VLAN %d for originator %pM\n", - BATADV_PRINT_VID(tt_common_entry->vid), + batadv_print_vid(tt_common_entry->vid), best_entry->orig_node->orig); goto print_list; } @@ -1844,7 +1844,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv, seq_printf(seq, " %c %pM %4i (%3u) via %pM (%3u) (%#.8x) [%c%c%c%c]\n", '*', tt_global_entry->common.addr, - BATADV_PRINT_VID(tt_global_entry->common.vid), + batadv_print_vid(tt_global_entry->common.vid), best_entry->ttvn, best_entry->orig_node->orig, last_ttvn, vlan->tt.crc, ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'), @@ -1867,7 +1867,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv, if (!vlan) { seq_printf(seq, " + Cannot retrieve VLAN %d for originator %pM\n", - BATADV_PRINT_VID(tt_common_entry->vid), + batadv_print_vid(tt_common_entry->vid), orig_entry->orig_node->orig); continue; } @@ -1876,7 +1876,7 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv, seq_printf(seq, " %c %pM %4d (%3u) via %pM (%3u) (%#.8x) [%c%c%c%c]\n", '+', tt_global_entry->common.addr, - BATADV_PRINT_VID(tt_global_entry->common.vid), + batadv_print_vid(tt_global_entry->common.vid), orig_entry->ttvn, orig_entry->orig_node->orig, last_ttvn, vlan->tt.crc, ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'), @@ -2213,7 +2213,7 @@ batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv, "Deleting %pM from global tt entry %pM (vid: %d): %s\n", orig_node->orig, tt_global_entry->common.addr, - BATADV_PRINT_VID(vid), message); + batadv_print_vid(vid), message); _batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry); } @@ -2253,12 +2253,13 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, /* its the last one, mark for roaming. */ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; - } else + } else { /* there is another entry, we can simply delete this * one and can still use the other one. */ batadv_tt_global_del_orig_node(bat_priv, tt_global_entry, orig_node, message); + } } /** @@ -2314,10 +2315,11 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, /* local entry exists, case 2: client roamed to us. */ batadv_tt_global_del_orig_list(tt_global_entry); batadv_tt_global_free(bat_priv, tt_global_entry, message); - } else + } else { /* no local entry exists, case 1: check for roaming */ batadv_tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, message); + } out: if (tt_global_entry) @@ -2375,7 +2377,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, - BATADV_PRINT_VID(vid), message); + batadv_print_vid(vid), message); hlist_del_rcu(&tt_common_entry->hash_entry); batadv_tt_global_entry_put(tt_global); } @@ -2435,7 +2437,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv) batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, - BATADV_PRINT_VID(tt_global->common.vid), + batadv_print_vid(tt_global->common.vid), msg); hlist_del_rcu(&tt_common->hash_entry); @@ -3650,7 +3652,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client, batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n", - orig_node->orig, client, BATADV_PRINT_VID(vid)); + orig_node->orig, client, batadv_print_vid(vid)); batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); @@ -3773,7 +3775,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting local tt entry (%pM, vid: %d): pending\n", tt_common->addr, - BATADV_PRINT_VID(tt_common->vid)); + batadv_print_vid(tt_common->vid)); batadv_tt_local_size_dec(bat_priv, tt_common->vid); hlist_del_rcu(&tt_common->hash_entry); @@ -4017,7 +4019,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv, "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n", - addr, BATADV_PRINT_VID(vid), orig_node->orig); + addr, batadv_print_vid(vid), orig_node->orig); ret = true; out: return ret; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 246f21b4973bc39d0678273ad831da1f5b7e0df3..ea43a64492479809fe6bdf95b436792078f50e9f 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1000,7 +1000,6 @@ struct batadv_priv_bat_v { * struct batadv_priv - per mesh interface data * @mesh_state: current status of the mesh (inactive/active/deactivating) * @soft_iface: net device which holds this struct as private data - * @stats: structure holding the data for the ndo_get_stats() call * @bat_counters: mesh internal traffic statistic counters (see batadv_counters) * @aggregated_ogms: bool indicating whether OGM aggregation is enabled * @bonding: bool indicating whether traffic bonding is enabled @@ -1055,7 +1054,6 @@ struct batadv_priv_bat_v { struct batadv_priv { atomic_t mesh_state; struct net_device *soft_iface; - struct net_device_stats stats; u64 __percpu *bat_counters; /* Per cpu counters */ atomic_t aggregated_ogms; atomic_t bonding; @@ -1377,9 +1375,11 @@ struct batadv_nc_packet { * relevant to batman-adv in the skb->cb buffer in skbs. * @decoded: Marks a skb as decoded, which is checked when searching for coding * opportunities in network-coding.c + * @num_bcasts: Counter for broadcast packet retransmissions */ struct batadv_skb_cb { bool decoded; + unsigned int num_bcasts; }; /** @@ -1392,7 +1392,7 @@ struct batadv_skb_cb { * @skb: bcast packet's skb buffer * @packet_len: size of aggregated OGM packet inside the skb buffer * @direct_link_flags: direct link flags for aggregated OGM packets - * @num_packets: counter for bcast packet retransmission + * @num_packets: counter for aggregated OGMv1 packets * @delayed_work: work queue callback item for packet sending * @if_incoming: pointer to incoming hard-iface or primary iface if * locally generated packet diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index d491529332f4568b532759331bd8f88653573ba0..608959989f8eddbfc9b97279a7fba61fd7381d04 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -38,7 +39,6 @@ struct skb_cb { struct in6_addr addr; struct in6_addr gw; struct l2cap_chan *chan; - int status; }; #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) @@ -64,7 +64,7 @@ struct lowpan_peer { struct l2cap_chan *chan; /* peer addresses in various formats */ - unsigned char eui64_addr[EUI64_ADDR_LEN]; + unsigned char lladdr[ETH_ALEN]; struct in6_addr peer_addr; }; @@ -270,28 +270,20 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) } static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, - struct l2cap_chan *chan) + struct lowpan_peer *peer) { - const u8 *saddr, *daddr; + const u8 *saddr; struct lowpan_btle_dev *dev; - struct lowpan_peer *peer; dev = lowpan_btle_dev(netdev); - rcu_read_lock(); - peer = __peer_lookup_chan(dev, chan); - rcu_read_unlock(); - if (!peer) - return -EINVAL; - - saddr = peer->eui64_addr; - daddr = dev->netdev->dev_addr; + saddr = peer->lladdr; - return lowpan_header_decompress(skb, netdev, daddr, saddr); + return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr); } static int recv_pkt(struct sk_buff *skb, struct net_device *dev, - struct l2cap_chan *chan) + struct lowpan_peer *peer) { struct sk_buff *local_skb; int ret; @@ -344,8 +336,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, local_skb->dev = dev; - ret = iphc_decompress(local_skb, dev, chan); + ret = iphc_decompress(local_skb, dev, peer); if (ret < 0) { + BT_DBG("iphc_decompress failed: %d", ret); kfree_skb(local_skb); goto drop; } @@ -365,6 +358,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, consume_skb(local_skb); consume_skb(skb); } else { + BT_DBG("unknown packet type"); goto drop; } @@ -390,7 +384,7 @@ static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) if (!dev || !dev->netdev) return -ENOENT; - err = recv_pkt(skb, dev->netdev, chan); + err = recv_pkt(skb, dev->netdev, peer); if (err) { BT_DBG("recv pkt %d", err); err = -EAGAIN; @@ -399,37 +393,6 @@ static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) return err; } -static u8 get_addr_type_from_eui64(u8 byte) -{ - /* Is universal(0) or local(1) bit */ - return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC); -} - -static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) -{ - u8 *eui64 = ip6_daddr->s6_addr + 8; - - addr->b[0] = eui64[7]; - addr->b[1] = eui64[6]; - addr->b[2] = eui64[5]; - addr->b[3] = eui64[2]; - addr->b[4] = eui64[1]; - addr->b[5] = eui64[0]; -} - -static void convert_dest_bdaddr(struct in6_addr *ip6_daddr, - bdaddr_t *addr, u8 *addr_type) -{ - copy_to_bdaddr(ip6_daddr, addr); - - /* We need to toggle the U/L bit that we got from IPv6 address - * so that we get the proper address and type of the BD address. - */ - addr->b[5] ^= 0x02; - - *addr_type = get_addr_type_from_eui64(addr->b[5]); -} - static int setup_header(struct sk_buff *skb, struct net_device *netdev, bdaddr_t *peer_addr, u8 *peer_addr_type) { @@ -437,8 +400,7 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, struct ipv6hdr *hdr; struct lowpan_btle_dev *dev; struct lowpan_peer *peer; - bdaddr_t addr, *any = BDADDR_ANY; - u8 *daddr = any->b; + u8 *daddr; int err, status = 0; hdr = ipv6_hdr(skb); @@ -449,34 +411,24 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; + daddr = NULL; } else { - u8 addr_type; + BT_DBG("dest IP %pI6c", &ipv6_daddr); - /* Get destination BT device from skb. - * If there is no such peer then discard the packet. + /* The packet might be sent to 6lowpan interface + * because of routing (either via default route + * or user set route) so get peer according to + * the destination address. */ - convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type); - - BT_DBG("dest addr %pMR type %d IP %pI6c", &addr, - addr_type, &ipv6_daddr); - - peer = peer_lookup_ba(dev, &addr, addr_type); + peer = peer_lookup_dst(dev, &ipv6_daddr, skb); if (!peer) { - /* The packet might be sent to 6lowpan interface - * because of routing (either via default route - * or user set route) so get peer according to - * the destination address. - */ - peer = peer_lookup_dst(dev, &ipv6_daddr, skb); - if (!peer) { - BT_DBG("no such peer %pMR found", &addr); - return -ENOENT; - } + BT_DBG("no such peer"); + return -ENOENT; } - daddr = peer->eui64_addr; - *peer_addr = addr; - *peer_addr_type = addr_type; + daddr = peer->lladdr; + *peer_addr = peer->chan->dst; + *peer_addr_type = peer->chan->dst_type; lowpan_cb(skb)->chan = peer->chan; status = 1; @@ -527,15 +479,8 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, return 0; } - if (!err) - err = lowpan_cb(skb)->status; - - if (err < 0) { - if (err == -EAGAIN) - netdev->stats.tx_dropped++; - else - netdev->stats.tx_errors++; - } + if (err < 0) + netdev->stats.tx_errors++; return err; } @@ -647,9 +592,9 @@ static void netdev_setup(struct net_device *dev) { dev->hard_header_len = 0; dev->needed_tailroom = 0; - dev->flags = IFF_RUNNING | IFF_POINTOPOINT | - IFF_MULTICAST; + dev->flags = IFF_RUNNING | IFF_MULTICAST; dev->watchdog_timeo = 0; + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; dev->netdev_ops = &netdev_ops; dev->header_ops = &header_ops; @@ -660,34 +605,6 @@ static struct device_type bt_type = { .name = "bluetooth", }; -static void set_addr(u8 *eui, u8 *addr, u8 addr_type) -{ - /* addr is the BT address in little-endian format */ - eui[0] = addr[5]; - eui[1] = addr[4]; - eui[2] = addr[3]; - eui[3] = 0xFF; - eui[4] = 0xFE; - eui[5] = addr[2]; - eui[6] = addr[1]; - eui[7] = addr[0]; - - /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ - if (addr_type == BDADDR_LE_PUBLIC) - eui[0] &= ~0x02; - else - eui[0] |= 0x02; - - BT_DBG("type %d addr %*phC", addr_type, 8, eui); -} - -static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr, - u8 addr_type) -{ - netdev->addr_assign_type = NET_ADDR_PERM; - set_addr(netdev->dev_addr, addr->b, addr_type); -} - static void ifup(struct net_device *netdev) { int err; @@ -746,16 +663,9 @@ static struct l2cap_chan *chan_create(void) return chan; } -static void set_ip_addr_bits(u8 addr_type, u8 *addr) -{ - if (addr_type == BDADDR_LE_PUBLIC) - *addr |= 0x02; - else - *addr &= ~0x02; -} - static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, - struct lowpan_btle_dev *dev) + struct lowpan_btle_dev *dev, + bool new_netdev) { struct lowpan_peer *peer; @@ -766,19 +676,9 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, peer->chan = chan; memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); - /* RFC 2464 ch. 5 */ - peer->peer_addr.s6_addr[0] = 0xFE; - peer->peer_addr.s6_addr[1] = 0x80; - set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b, - chan->dst_type); - - memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, - EUI64_ADDR_LEN); + baswap((void *)peer->lladdr, &chan->dst); - /* IPv6 address needs to have the U/L bit set properly so toggle - * it back here. - */ - set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); + lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr); spin_lock(&devices_lock); INIT_LIST_HEAD(&peer->list); @@ -786,7 +686,8 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, spin_unlock(&devices_lock); /* Notifying peers about us needs to be done without locks held */ - INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); + if (new_netdev) + INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); return peer->chan; @@ -803,7 +704,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) if (!netdev) return -ENOMEM; - set_dev_addr(netdev, &chan->src, chan->src_type); + netdev->addr_assign_type = NET_ADDR_PERM; + baswap((void *)netdev->dev_addr, &chan->src); netdev->netdev_ops = &netdev_ops; SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); @@ -843,6 +745,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) static inline void chan_ready_cb(struct l2cap_chan *chan) { struct lowpan_btle_dev *dev; + bool new_netdev = false; dev = lookup_dev(chan->conn); @@ -853,12 +756,13 @@ static inline void chan_ready_cb(struct l2cap_chan *chan) l2cap_chan_del(chan, -ENOENT); return; } + new_netdev = true; } if (!try_module_get(THIS_MODULE)) return; - add_peer_chan(chan, dev); + add_peer_chan(chan, dev, new_netdev); ifup(dev->netdev); } @@ -964,26 +868,28 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, static void chan_suspend_cb(struct l2cap_chan *chan) { - struct sk_buff *skb = chan->data; + struct lowpan_btle_dev *dev; - BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb); + BT_DBG("chan %p suspend", chan); - if (!skb) + dev = lookup_dev(chan->conn); + if (!dev || !dev->netdev) return; - lowpan_cb(skb)->status = -EAGAIN; + netif_stop_queue(dev->netdev); } static void chan_resume_cb(struct l2cap_chan *chan) { - struct sk_buff *skb = chan->data; + struct lowpan_btle_dev *dev; - BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb); + BT_DBG("chan %p resume", chan); - if (!skb) + dev = lookup_dev(chan->conn); + if (!dev || !dev->netdev) return; - lowpan_cb(skb)->status = 0; + netif_wake_queue(dev->netdev); } static long chan_get_sndtimeo_cb(struct l2cap_chan *chan) diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 06c31b9a68b0bce3cc4f28224c3c23864f78e7c9..68f951b3e85aa725485bef77de63f162f0241d97 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -13,6 +13,7 @@ menuconfig BT select CRYPTO_CMAC select CRYPTO_ECB select CRYPTO_SHA256 + select CRYPTO_ECDH help Bluetooth is low-cost, low-power, short-range wireless technology. It was designed as a replacement for cables and other short-range diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 4bfaa19a557382311369b229b0d26846d5a968dc..5d0a113e2e4049e6043099ddcf1fdd50c22ce0ab 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -13,7 +13,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecc.o hci_request.o mgmt_util.o + ecdh_helper.o hci_request.o mgmt_util.o bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 69e1f7d362a8b71ed61cc12c66e8b81837712447..42d0997e2fbb78af58970504baf0d855d72a964a 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -159,12 +159,17 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk) BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); + lock_sock(sk); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; + release_sock(sk); parent->sk_ack_backlog++; } EXPORT_SYMBOL(bt_accept_enqueue); +/* Calling function must hold the sk lock. + * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. + */ void bt_accept_unlink(struct sock *sk) { BT_DBG("sk %p state %d", sk, sk->sk_state); @@ -183,11 +188,32 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) BT_DBG("parent %p", parent); +restart: list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; + /* Prevent early freeing of sk due to unlink and sock_kill */ + sock_hold(sk); lock_sock(sk); + /* Check sk has not already been unlinked via + * bt_accept_unlink() due to serialisation caused by sk locking + */ + if (!bt_sk(sk)->parent) { + BT_DBG("sk %p, already unlinked", sk); + release_sock(sk); + sock_put(sk); + + /* Restart the loop as sk is no longer in the list + * and also avoid a potential infinite loop because + * list_for_each_entry_safe() is not thread safe. + */ + goto restart; + } + + /* sk is safely in the parent list so reduce reference count */ + sock_put(sk); + /* FIXME: Is this check still needed */ if (sk->sk_state == BT_CLOSED) { bt_accept_unlink(sk); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 02a4ccc04e1ebba5301f8d2bfec55229d428ffd8..ebcab5bbadd7eb174f8bb2aff10b4e0d5283e182 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -263,7 +263,7 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle) struct hci_cp_read_local_amp_assoc cp; struct amp_assoc *loc_assoc = &hdev->loc_assoc; struct hci_request req; - int err = 0; + int err; BT_DBG("%s handle %d", hdev->name, phy_handle); @@ -282,7 +282,7 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) { struct hci_cp_read_local_amp_assoc cp; struct hci_request req; - int err = 0; + int err; memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc)); memset(&cp, 0, sizeof(cp)); @@ -292,7 +292,7 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) set_bit(READ_LOC_AMP_ASSOC, &mgr->state); hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); - hci_req_run_skb(&req, read_local_amp_assoc_complete); + err = hci_req_run_skb(&req, read_local_amp_assoc_complete); if (err < 0) a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); } @@ -303,7 +303,7 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, struct hci_cp_read_local_amp_assoc cp; struct amp_mgr *mgr = hcon->amp_mgr; struct hci_request req; - int err = 0; + int err; cp.phy_handle = hcon->handle; cp.len_so_far = cpu_to_le16(0); @@ -314,7 +314,7 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, /* Read Local AMP Assoc final link information data */ hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); - hci_req_run_skb(&req, read_local_amp_assoc_complete); + err = hci_req_run_skb(&req, read_local_amp_assoc_complete); if (err < 0) a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); } diff --git a/net/bluetooth/ecc.c b/net/bluetooth/ecc.c deleted file mode 100644 index e1709f8467acacb216241ec03f845a0043464c3b..0000000000000000000000000000000000000000 --- a/net/bluetooth/ecc.c +++ /dev/null @@ -1,816 +0,0 @@ -/* - * Copyright (c) 2013, Kenneth MacKay - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include "ecc.h" - -/* 256-bit curve */ -#define ECC_BYTES 32 - -#define MAX_TRIES 16 - -/* Number of u64's needed */ -#define NUM_ECC_DIGITS (ECC_BYTES / 8) - -struct ecc_point { - u64 x[NUM_ECC_DIGITS]; - u64 y[NUM_ECC_DIGITS]; -}; - -typedef struct { - u64 m_low; - u64 m_high; -} uint128_t; - -#define CURVE_P_32 { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, \ - 0x0000000000000000ull, 0xFFFFFFFF00000001ull } - -#define CURVE_G_32 { \ - { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, \ - 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }, \ - { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, \ - 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull } \ -} - -#define CURVE_N_32 { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, \ - 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull } - -static u64 curve_p[NUM_ECC_DIGITS] = CURVE_P_32; -static struct ecc_point curve_g = CURVE_G_32; -static u64 curve_n[NUM_ECC_DIGITS] = CURVE_N_32; - -static void vli_clear(u64 *vli) -{ - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) - vli[i] = 0; -} - -/* Returns true if vli == 0, false otherwise. */ -static bool vli_is_zero(const u64 *vli) -{ - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - if (vli[i]) - return false; - } - - return true; -} - -/* Returns nonzero if bit bit of vli is set. */ -static u64 vli_test_bit(const u64 *vli, unsigned int bit) -{ - return (vli[bit / 64] & ((u64) 1 << (bit % 64))); -} - -/* Counts the number of 64-bit "digits" in vli. */ -static unsigned int vli_num_digits(const u64 *vli) -{ - int i; - - /* Search from the end until we find a non-zero digit. - * We do it in reverse because we expect that most digits will - * be nonzero. - */ - for (i = NUM_ECC_DIGITS - 1; i >= 0 && vli[i] == 0; i--); - - return (i + 1); -} - -/* Counts the number of bits required for vli. */ -static unsigned int vli_num_bits(const u64 *vli) -{ - unsigned int i, num_digits; - u64 digit; - - num_digits = vli_num_digits(vli); - if (num_digits == 0) - return 0; - - digit = vli[num_digits - 1]; - for (i = 0; digit; i++) - digit >>= 1; - - return ((num_digits - 1) * 64 + i); -} - -/* Sets dest = src. */ -static void vli_set(u64 *dest, const u64 *src) -{ - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) - dest[i] = src[i]; -} - -/* Returns sign of left - right. */ -static int vli_cmp(const u64 *left, const u64 *right) -{ - int i; - - for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) { - if (left[i] > right[i]) - return 1; - else if (left[i] < right[i]) - return -1; - } - - return 0; -} - -/* Computes result = in << c, returning carry. Can modify in place - * (if result == in). 0 < shift < 64. - */ -static u64 vli_lshift(u64 *result, const u64 *in, - unsigned int shift) -{ - u64 carry = 0; - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - u64 temp = in[i]; - - result[i] = (temp << shift) | carry; - carry = temp >> (64 - shift); - } - - return carry; -} - -/* Computes vli = vli >> 1. */ -static void vli_rshift1(u64 *vli) -{ - u64 *end = vli; - u64 carry = 0; - - vli += NUM_ECC_DIGITS; - - while (vli-- > end) { - u64 temp = *vli; - *vli = (temp >> 1) | carry; - carry = temp << 63; - } -} - -/* Computes result = left + right, returning carry. Can modify in place. */ -static u64 vli_add(u64 *result, const u64 *left, - const u64 *right) -{ - u64 carry = 0; - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - u64 sum; - - sum = left[i] + right[i] + carry; - if (sum != left[i]) - carry = (sum < left[i]); - - result[i] = sum; - } - - return carry; -} - -/* Computes result = left - right, returning borrow. Can modify in place. */ -static u64 vli_sub(u64 *result, const u64 *left, const u64 *right) -{ - u64 borrow = 0; - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - u64 diff; - - diff = left[i] - right[i] - borrow; - if (diff != left[i]) - borrow = (diff > left[i]); - - result[i] = diff; - } - - return borrow; -} - -static uint128_t mul_64_64(u64 left, u64 right) -{ - u64 a0 = left & 0xffffffffull; - u64 a1 = left >> 32; - u64 b0 = right & 0xffffffffull; - u64 b1 = right >> 32; - u64 m0 = a0 * b0; - u64 m1 = a0 * b1; - u64 m2 = a1 * b0; - u64 m3 = a1 * b1; - uint128_t result; - - m2 += (m0 >> 32); - m2 += m1; - - /* Overflow */ - if (m2 < m1) - m3 += 0x100000000ull; - - result.m_low = (m0 & 0xffffffffull) | (m2 << 32); - result.m_high = m3 + (m2 >> 32); - - return result; -} - -static uint128_t add_128_128(uint128_t a, uint128_t b) -{ - uint128_t result; - - result.m_low = a.m_low + b.m_low; - result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); - - return result; -} - -static void vli_mult(u64 *result, const u64 *left, const u64 *right) -{ - uint128_t r01 = { 0, 0 }; - u64 r2 = 0; - unsigned int i, k; - - /* Compute each digit of result in sequence, maintaining the - * carries. - */ - for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) { - unsigned int min; - - if (k < NUM_ECC_DIGITS) - min = 0; - else - min = (k + 1) - NUM_ECC_DIGITS; - - for (i = min; i <= k && i < NUM_ECC_DIGITS; i++) { - uint128_t product; - - product = mul_64_64(left[i], right[k - i]); - - r01 = add_128_128(r01, product); - r2 += (r01.m_high < product.m_high); - } - - result[k] = r01.m_low; - r01.m_low = r01.m_high; - r01.m_high = r2; - r2 = 0; - } - - result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low; -} - -static void vli_square(u64 *result, const u64 *left) -{ - uint128_t r01 = { 0, 0 }; - u64 r2 = 0; - int i, k; - - for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; k++) { - unsigned int min; - - if (k < NUM_ECC_DIGITS) - min = 0; - else - min = (k + 1) - NUM_ECC_DIGITS; - - for (i = min; i <= k && i <= k - i; i++) { - uint128_t product; - - product = mul_64_64(left[i], left[k - i]); - - if (i < k - i) { - r2 += product.m_high >> 63; - product.m_high = (product.m_high << 1) | - (product.m_low >> 63); - product.m_low <<= 1; - } - - r01 = add_128_128(r01, product); - r2 += (r01.m_high < product.m_high); - } - - result[k] = r01.m_low; - r01.m_low = r01.m_high; - r01.m_high = r2; - r2 = 0; - } - - result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low; -} - -/* Computes result = (left + right) % mod. - * Assumes that left < mod and right < mod, result != mod. - */ -static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, - const u64 *mod) -{ - u64 carry; - - carry = vli_add(result, left, right); - - /* result > mod (result = mod + remainder), so subtract mod to - * get remainder. - */ - if (carry || vli_cmp(result, mod) >= 0) - vli_sub(result, result, mod); -} - -/* Computes result = (left - right) % mod. - * Assumes that left < mod and right < mod, result != mod. - */ -static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right, - const u64 *mod) -{ - u64 borrow = vli_sub(result, left, right); - - /* In this case, p_result == -diff == (max int) - diff. - * Since -x % d == d - x, we can get the correct result from - * result + mod (with overflow). - */ - if (borrow) - vli_add(result, result, mod); -} - -/* Computes result = product % curve_p - from http://www.nsa.gov/ia/_files/nist-routines.pdf */ -static void vli_mmod_fast(u64 *result, const u64 *product) -{ - u64 tmp[NUM_ECC_DIGITS]; - int carry; - - /* t */ - vli_set(result, product); - - /* s1 */ - tmp[0] = 0; - tmp[1] = product[5] & 0xffffffff00000000ull; - tmp[2] = product[6]; - tmp[3] = product[7]; - carry = vli_lshift(tmp, tmp, 1); - carry += vli_add(result, result, tmp); - - /* s2 */ - tmp[1] = product[6] << 32; - tmp[2] = (product[6] >> 32) | (product[7] << 32); - tmp[3] = product[7] >> 32; - carry += vli_lshift(tmp, tmp, 1); - carry += vli_add(result, result, tmp); - - /* s3 */ - tmp[0] = product[4]; - tmp[1] = product[5] & 0xffffffff; - tmp[2] = 0; - tmp[3] = product[7]; - carry += vli_add(result, result, tmp); - - /* s4 */ - tmp[0] = (product[4] >> 32) | (product[5] << 32); - tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull); - tmp[2] = product[7]; - tmp[3] = (product[6] >> 32) | (product[4] << 32); - carry += vli_add(result, result, tmp); - - /* d1 */ - tmp[0] = (product[5] >> 32) | (product[6] << 32); - tmp[1] = (product[6] >> 32); - tmp[2] = 0; - tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32); - carry -= vli_sub(result, result, tmp); - - /* d2 */ - tmp[0] = product[6]; - tmp[1] = product[7]; - tmp[2] = 0; - tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull); - carry -= vli_sub(result, result, tmp); - - /* d3 */ - tmp[0] = (product[6] >> 32) | (product[7] << 32); - tmp[1] = (product[7] >> 32) | (product[4] << 32); - tmp[2] = (product[4] >> 32) | (product[5] << 32); - tmp[3] = (product[6] << 32); - carry -= vli_sub(result, result, tmp); - - /* d4 */ - tmp[0] = product[7]; - tmp[1] = product[4] & 0xffffffff00000000ull; - tmp[2] = product[5]; - tmp[3] = product[6] & 0xffffffff00000000ull; - carry -= vli_sub(result, result, tmp); - - if (carry < 0) { - do { - carry += vli_add(result, result, curve_p); - } while (carry < 0); - } else { - while (carry || vli_cmp(curve_p, result) != 1) - carry -= vli_sub(result, result, curve_p); - } -} - -/* Computes result = (left * right) % curve_p. */ -static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right) -{ - u64 product[2 * NUM_ECC_DIGITS]; - - vli_mult(product, left, right); - vli_mmod_fast(result, product); -} - -/* Computes result = left^2 % curve_p. */ -static void vli_mod_square_fast(u64 *result, const u64 *left) -{ - u64 product[2 * NUM_ECC_DIGITS]; - - vli_square(product, left); - vli_mmod_fast(result, product); -} - -#define EVEN(vli) (!(vli[0] & 1)) -/* Computes result = (1 / p_input) % mod. All VLIs are the same size. - * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" - * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf - */ -static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod) -{ - u64 a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS]; - u64 u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS]; - u64 carry; - int cmp_result; - - if (vli_is_zero(input)) { - vli_clear(result); - return; - } - - vli_set(a, input); - vli_set(b, mod); - vli_clear(u); - u[0] = 1; - vli_clear(v); - - while ((cmp_result = vli_cmp(a, b)) != 0) { - carry = 0; - - if (EVEN(a)) { - vli_rshift1(a); - - if (!EVEN(u)) - carry = vli_add(u, u, mod); - - vli_rshift1(u); - if (carry) - u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; - } else if (EVEN(b)) { - vli_rshift1(b); - - if (!EVEN(v)) - carry = vli_add(v, v, mod); - - vli_rshift1(v); - if (carry) - v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; - } else if (cmp_result > 0) { - vli_sub(a, a, b); - vli_rshift1(a); - - if (vli_cmp(u, v) < 0) - vli_add(u, u, mod); - - vli_sub(u, u, v); - if (!EVEN(u)) - carry = vli_add(u, u, mod); - - vli_rshift1(u); - if (carry) - u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; - } else { - vli_sub(b, b, a); - vli_rshift1(b); - - if (vli_cmp(v, u) < 0) - vli_add(v, v, mod); - - vli_sub(v, v, u); - if (!EVEN(v)) - carry = vli_add(v, v, mod); - - vli_rshift1(v); - if (carry) - v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull; - } - } - - vli_set(result, u); -} - -/* ------ Point operations ------ */ - -/* Returns true if p_point is the point at infinity, false otherwise. */ -static bool ecc_point_is_zero(const struct ecc_point *point) -{ - return (vli_is_zero(point->x) && vli_is_zero(point->y)); -} - -/* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf - */ - -/* Double in place */ -static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1) -{ - /* t1 = x, t2 = y, t3 = z */ - u64 t4[NUM_ECC_DIGITS]; - u64 t5[NUM_ECC_DIGITS]; - - if (vli_is_zero(z1)) - return; - - vli_mod_square_fast(t4, y1); /* t4 = y1^2 */ - vli_mod_mult_fast(t5, x1, t4); /* t5 = x1*y1^2 = A */ - vli_mod_square_fast(t4, t4); /* t4 = y1^4 */ - vli_mod_mult_fast(y1, y1, z1); /* t2 = y1*z1 = z3 */ - vli_mod_square_fast(z1, z1); /* t3 = z1^2 */ - - vli_mod_add(x1, x1, z1, curve_p); /* t1 = x1 + z1^2 */ - vli_mod_add(z1, z1, z1, curve_p); /* t3 = 2*z1^2 */ - vli_mod_sub(z1, x1, z1, curve_p); /* t3 = x1 - z1^2 */ - vli_mod_mult_fast(x1, x1, z1); /* t1 = x1^2 - z1^4 */ - - vli_mod_add(z1, x1, x1, curve_p); /* t3 = 2*(x1^2 - z1^4) */ - vli_mod_add(x1, x1, z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */ - if (vli_test_bit(x1, 0)) { - u64 carry = vli_add(x1, x1, curve_p); - vli_rshift1(x1); - x1[NUM_ECC_DIGITS - 1] |= carry << 63; - } else { - vli_rshift1(x1); - } - /* t1 = 3/2*(x1^2 - z1^4) = B */ - - vli_mod_square_fast(z1, x1); /* t3 = B^2 */ - vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - A */ - vli_mod_sub(z1, z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */ - vli_mod_sub(t5, t5, z1, curve_p); /* t5 = A - x3 */ - vli_mod_mult_fast(x1, x1, t5); /* t1 = B * (A - x3) */ - vli_mod_sub(t4, x1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */ - - vli_set(x1, z1); - vli_set(z1, y1); - vli_set(y1, t4); -} - -/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ -static void apply_z(u64 *x1, u64 *y1, u64 *z) -{ - u64 t1[NUM_ECC_DIGITS]; - - vli_mod_square_fast(t1, z); /* z^2 */ - vli_mod_mult_fast(x1, x1, t1); /* x1 * z^2 */ - vli_mod_mult_fast(t1, t1, z); /* z^3 */ - vli_mod_mult_fast(y1, y1, t1); /* y1 * z^3 */ -} - -/* P = (x1, y1) => 2P, (x2, y2) => P' */ -static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, - u64 *p_initial_z) -{ - u64 z[NUM_ECC_DIGITS]; - - vli_set(x2, x1); - vli_set(y2, y1); - - vli_clear(z); - z[0] = 1; - - if (p_initial_z) - vli_set(z, p_initial_z); - - apply_z(x1, y1, z); - - ecc_point_double_jacobian(x1, y1, z); - - apply_z(x2, y2, z); -} - -/* Input P = (x1, y1, Z), Q = (x2, y2, Z) - * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) - * or P => P', Q => P + Q - */ -static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2) -{ - /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ - u64 t5[NUM_ECC_DIGITS]; - - vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */ - vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */ - vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */ - vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */ - vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */ - vli_mod_square_fast(t5, y2); /* t5 = (y2 - y1)^2 = D */ - - vli_mod_sub(t5, t5, x1, curve_p); /* t5 = D - B */ - vli_mod_sub(t5, t5, x2, curve_p); /* t5 = D - B - C = x3 */ - vli_mod_sub(x2, x2, x1, curve_p); /* t3 = C - B */ - vli_mod_mult_fast(y1, y1, x2); /* t2 = y1*(C - B) */ - vli_mod_sub(x2, x1, t5, curve_p); /* t3 = B - x3 */ - vli_mod_mult_fast(y2, y2, x2); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */ - - vli_set(x2, t5); -} - -/* Input P = (x1, y1, Z), Q = (x2, y2, Z) - * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) - * or P => P - Q, Q => P + Q - */ -static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2) -{ - /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ - u64 t5[NUM_ECC_DIGITS]; - u64 t6[NUM_ECC_DIGITS]; - u64 t7[NUM_ECC_DIGITS]; - - vli_mod_sub(t5, x2, x1, curve_p); /* t5 = x2 - x1 */ - vli_mod_square_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */ - vli_mod_mult_fast(x1, x1, t5); /* t1 = x1*A = B */ - vli_mod_mult_fast(x2, x2, t5); /* t3 = x2*A = C */ - vli_mod_add(t5, y2, y1, curve_p); /* t4 = y2 + y1 */ - vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y2 - y1 */ - - vli_mod_sub(t6, x2, x1, curve_p); /* t6 = C - B */ - vli_mod_mult_fast(y1, y1, t6); /* t2 = y1 * (C - B) */ - vli_mod_add(t6, x1, x2, curve_p); /* t6 = B + C */ - vli_mod_square_fast(x2, y2); /* t3 = (y2 - y1)^2 */ - vli_mod_sub(x2, x2, t6, curve_p); /* t3 = x3 */ - - vli_mod_sub(t7, x1, x2, curve_p); /* t7 = B - x3 */ - vli_mod_mult_fast(y2, y2, t7); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_sub(y2, y2, y1, curve_p); /* t4 = y3 */ - - vli_mod_square_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */ - vli_mod_sub(t7, t7, t6, curve_p); /* t7 = x3' */ - vli_mod_sub(t6, t7, x1, curve_p); /* t6 = x3' - B */ - vli_mod_mult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */ - vli_mod_sub(y1, t6, y1, curve_p); /* t2 = y3' */ - - vli_set(x1, t7); -} - -static void ecc_point_mult(struct ecc_point *result, - const struct ecc_point *point, u64 *scalar, - u64 *initial_z, int num_bits) -{ - /* R0 and R1 */ - u64 rx[2][NUM_ECC_DIGITS]; - u64 ry[2][NUM_ECC_DIGITS]; - u64 z[NUM_ECC_DIGITS]; - int i, nb; - - vli_set(rx[1], point->x); - vli_set(ry[1], point->y); - - xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z); - - for (i = num_bits - 2; i > 0; i--) { - nb = !vli_test_bit(scalar, i); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]); - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]); - } - - nb = !vli_test_bit(scalar, 0); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb]); - - /* Find final 1/Z value. */ - vli_mod_sub(z, rx[1], rx[0], curve_p); /* X1 - X0 */ - vli_mod_mult_fast(z, z, ry[1 - nb]); /* Yb * (X1 - X0) */ - vli_mod_mult_fast(z, z, point->x); /* xP * Yb * (X1 - X0) */ - vli_mod_inv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, point->y); /* yP / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, rx[1 - nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */ - /* End 1/Z calculation */ - - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb]); - - apply_z(rx[0], ry[0], z); - - vli_set(result->x, rx[0]); - vli_set(result->y, ry[0]); -} - -static void ecc_bytes2native(const u8 bytes[ECC_BYTES], - u64 native[NUM_ECC_DIGITS]) -{ - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - const u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i); - - native[NUM_ECC_DIGITS - 1 - i] = - ((u64) digit[0] << 0) | - ((u64) digit[1] << 8) | - ((u64) digit[2] << 16) | - ((u64) digit[3] << 24) | - ((u64) digit[4] << 32) | - ((u64) digit[5] << 40) | - ((u64) digit[6] << 48) | - ((u64) digit[7] << 56); - } -} - -static void ecc_native2bytes(const u64 native[NUM_ECC_DIGITS], - u8 bytes[ECC_BYTES]) -{ - int i; - - for (i = 0; i < NUM_ECC_DIGITS; i++) { - u8 *digit = bytes + 8 * (NUM_ECC_DIGITS - 1 - i); - - digit[0] = native[NUM_ECC_DIGITS - 1 - i] >> 0; - digit[1] = native[NUM_ECC_DIGITS - 1 - i] >> 8; - digit[2] = native[NUM_ECC_DIGITS - 1 - i] >> 16; - digit[3] = native[NUM_ECC_DIGITS - 1 - i] >> 24; - digit[4] = native[NUM_ECC_DIGITS - 1 - i] >> 32; - digit[5] = native[NUM_ECC_DIGITS - 1 - i] >> 40; - digit[6] = native[NUM_ECC_DIGITS - 1 - i] >> 48; - digit[7] = native[NUM_ECC_DIGITS - 1 - i] >> 56; - } -} - -bool ecc_make_key(u8 public_key[64], u8 private_key[32]) -{ - struct ecc_point pk; - u64 priv[NUM_ECC_DIGITS]; - unsigned int tries = 0; - - do { - if (tries++ >= MAX_TRIES) - return false; - - get_random_bytes(priv, ECC_BYTES); - - if (vli_is_zero(priv)) - continue; - - /* Make sure the private key is in the range [1, n-1]. */ - if (vli_cmp(curve_n, priv) != 1) - continue; - - ecc_point_mult(&pk, &curve_g, priv, NULL, vli_num_bits(priv)); - } while (ecc_point_is_zero(&pk)); - - ecc_native2bytes(priv, private_key); - ecc_native2bytes(pk.x, public_key); - ecc_native2bytes(pk.y, &public_key[32]); - - return true; -} - -bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32], - u8 secret[32]) -{ - u64 priv[NUM_ECC_DIGITS]; - u64 rand[NUM_ECC_DIGITS]; - struct ecc_point product, pk; - - get_random_bytes(rand, ECC_BYTES); - - ecc_bytes2native(public_key, pk.x); - ecc_bytes2native(&public_key[32], pk.y); - ecc_bytes2native(private_key, priv); - - ecc_point_mult(&product, &pk, priv, rand, vli_num_bits(priv)); - - ecc_native2bytes(product.x, secret); - - return !ecc_point_is_zero(&product); -} diff --git a/net/bluetooth/ecc.h b/net/bluetooth/ecc.h deleted file mode 100644 index 8d6a2f4d1905f33df35e1d2380f7e49cfe0f6c5f..0000000000000000000000000000000000000000 --- a/net/bluetooth/ecc.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2013, Kenneth MacKay - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* Create a public/private key pair. - * Outputs: - * public_key - Will be filled in with the public key. - * private_key - Will be filled in with the private key. - * - * Returns true if the key pair was generated successfully, false - * if an error occurred. The keys are with the LSB first. - */ -bool ecc_make_key(u8 public_key[64], u8 private_key[32]); - -/* Compute a shared secret given your secret key and someone else's - * public key. - * Note: It is recommended that you hash the result of ecdh_shared_secret - * before using it for symmetric encryption or HMAC. - * - * Inputs: - * public_key - The public key of the remote party - * private_key - Your private key. - * - * Outputs: - * secret - Will be filled in with the shared secret value. - * - * Returns true if the shared secret was generated successfully, false - * if an error occurred. Both input and output parameters are with the - * LSB first. - */ -bool ecdh_shared_secret(const u8 public_key[64], const u8 private_key[32], - u8 secret[32]); diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..24d4e60f8c48b8c02532558f7a90bf273f586274 --- /dev/null +++ b/net/bluetooth/ecdh_helper.c @@ -0,0 +1,231 @@ +/* + * ECDH helper functions - KPP wrappings + * + * Copyright (C) 2017 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + * SOFTWARE IS DISCLAIMED. + */ +#include "ecdh_helper.h" + +#include +#include +#include + +struct ecdh_completion { + struct completion completion; + int err; +}; + +static void ecdh_complete(struct crypto_async_request *req, int err) +{ + struct ecdh_completion *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + out[i] = __swab64(in[ndigits - 1 - i]); +} + +bool compute_ecdh_secret(const u8 public_key[64], const u8 private_key[32], + u8 secret[32]) +{ + struct crypto_kpp *tfm; + struct kpp_request *req; + struct ecdh p; + struct ecdh_completion result; + struct scatterlist src, dst; + u8 *tmp, *buf; + unsigned int buf_len; + int err = -ENOMEM; + + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) + return false; + + tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + if (IS_ERR(tfm)) { + pr_err("alg: kpp: Failed to load tfm for kpp: %ld\n", + PTR_ERR(tfm)); + goto free_tmp; + } + + req = kpp_request_alloc(tfm, GFP_KERNEL); + if (!req) + goto free_kpp; + + init_completion(&result.completion); + + /* Security Manager Protocol holds digits in litte-endian order + * while ECC API expect big-endian data + */ + swap_digits((u64 *)private_key, (u64 *)tmp, 4); + p.key = (char *)tmp; + p.key_size = 32; + /* Set curve_id */ + p.curve_id = ECC_CURVE_NIST_P256; + buf_len = crypto_ecdh_key_len(&p); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) { + pr_err("alg: kpp: Failed to allocate %d bytes for buf\n", + buf_len); + goto free_req; + } + crypto_ecdh_encode_key(buf, buf_len, &p); + + /* Set A private Key */ + err = crypto_kpp_set_secret(tfm, (void *)buf, buf_len); + if (err) + goto free_all; + + swap_digits((u64 *)public_key, (u64 *)tmp, 4); /* x */ + swap_digits((u64 *)&public_key[32], (u64 *)&tmp[32], 4); /* y */ + + sg_init_one(&src, tmp, 64); + sg_init_one(&dst, secret, 32); + kpp_request_set_input(req, &src, 64); + kpp_request_set_output(req, &dst, 32); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ecdh_complete, &result); + err = crypto_kpp_compute_shared_secret(req); + if (err == -EINPROGRESS) { + wait_for_completion(&result.completion); + err = result.err; + } + if (err < 0) { + pr_err("alg: ecdh: compute shared secret failed. err %d\n", + err); + goto free_all; + } + + swap_digits((u64 *)secret, (u64 *)tmp, 4); + memcpy(secret, tmp, 32); + +free_all: + kzfree(buf); +free_req: + kpp_request_free(req); +free_kpp: + crypto_free_kpp(tfm); +free_tmp: + kfree(tmp); + return (err == 0); +} + +bool generate_ecdh_keys(u8 public_key[64], u8 private_key[32]) +{ + struct crypto_kpp *tfm; + struct kpp_request *req; + struct ecdh p; + struct ecdh_completion result; + struct scatterlist dst; + u8 *tmp, *buf; + unsigned int buf_len; + int err = -ENOMEM; + const unsigned short max_tries = 16; + unsigned short tries = 0; + + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) + return false; + + tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + if (IS_ERR(tfm)) { + pr_err("alg: kpp: Failed to load tfm for kpp: %ld\n", + PTR_ERR(tfm)); + goto free_tmp; + } + + req = kpp_request_alloc(tfm, GFP_KERNEL); + if (!req) + goto free_kpp; + + init_completion(&result.completion); + + /* Set curve_id */ + p.curve_id = ECC_CURVE_NIST_P256; + p.key_size = 32; + buf_len = crypto_ecdh_key_len(&p); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) { + pr_err("alg: kpp: Failed to allocate %d bytes for buf\n", + buf_len); + goto free_req; + } + + do { + if (tries++ >= max_tries) + goto free_all; + + /* Set private Key */ + p.key = (char *)private_key; + crypto_ecdh_encode_key(buf, buf_len, &p); + err = crypto_kpp_set_secret(tfm, buf, buf_len); + if (err) + goto free_all; + + sg_init_one(&dst, tmp, 64); + kpp_request_set_input(req, NULL, 0); + kpp_request_set_output(req, &dst, 64); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ecdh_complete, &result); + + err = crypto_kpp_generate_public_key(req); + + if (err == -EINPROGRESS) { + wait_for_completion(&result.completion); + err = result.err; + } + + /* Private key is not valid. Regenerate */ + if (err == -EINVAL) + continue; + + if (err < 0) + goto free_all; + else + break; + + } while (true); + + /* Keys are handed back in little endian as expected by Security + * Manager Protocol + */ + swap_digits((u64 *)tmp, (u64 *)public_key, 4); /* x */ + swap_digits((u64 *)&tmp[32], (u64 *)&public_key[32], 4); /* y */ + swap_digits((u64 *)private_key, (u64 *)tmp, 4); + memcpy(private_key, tmp, 32); + +free_all: + kzfree(buf); +free_req: + kpp_request_free(req); +free_kpp: + crypto_free_kpp(tfm); +free_tmp: + kfree(tmp); + return (err == 0); +} diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..7a423faf76e5657c6a4c860031138f59eef0dcc0 --- /dev/null +++ b/net/bluetooth/ecdh_helper.h @@ -0,0 +1,27 @@ +/* + * ECDH helper functions - KPP wrappings + * + * Copyright (C) 2017 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + * SOFTWARE IS DISCLAIMED. + */ +#include + +bool compute_ecdh_secret(const u8 pub_a[64], const u8 priv_b[32], + u8 secret[32]); +bool generate_ecdh_keys(u8 public_key[64], u8 private_key[32]); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 3ac89e9ace71556c66804b19e8dde8d397eb308f..05686776a5fb78d67dd38b22632ccfd1355b857e 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2950,8 +2950,8 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; - hdev->le_conn_min_interval = 0x0028; - hdev->le_conn_max_interval = 0x0038; + hdev->le_conn_min_interval = 0x0018; + hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; hdev->le_supv_timeout = 0x002a; hdev->le_def_tx_len = 0x001b; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f64d6566021fccab9208518a1c53934b2da7f708..638bf0e1a2e390133fc7bcfa2041fc41a94d413f 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| + MSG_CMSG_COMPAT)) return -EINVAL; if (len < 4 || len > HCI_MAX_FRAME_SIZE) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fc7f321a382369f0d5097ce785921b1298a237f0..f88ac99528ce448d8e0f8ea5eb06ee2d7719afab 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -2425,6 +2425,22 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan, return 0; } +static void l2cap_le_flowctl_send(struct l2cap_chan *chan) +{ + int sent = 0; + + BT_DBG("chan %p", chan); + + while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { + l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); + chan->tx_credits--; + sent++; + } + + BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, + skb_queue_len(&chan->tx_q)); +} + int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; @@ -2458,9 +2474,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (len > chan->omtu) return -EMSGSIZE; - if (!chan->tx_credits) - return -EAGAIN; - __skb_queue_head_init(&seg_queue); err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); @@ -2475,10 +2488,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); - while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { - l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); - chan->tx_credits--; - } + l2cap_le_flowctl_send(chan); if (!chan->tx_credits) chan->ops->suspend(chan); @@ -5570,10 +5580,8 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, chan->tx_credits += credits; - while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { - l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); - chan->tx_credits--; - } + /* Resume sending */ + l2cap_le_flowctl_send(chan); if (chan->tx_credits) chan->ops->resume(chan); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index f7eb02f09b54055c9b8c17b6fdcf53ec5041aa11..8ebca9033d60621ba686656242864dcf6a721dd7 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -311,7 +311,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) skb_queue_head_init(&d->tx_queue); mutex_init(&d->lock); - atomic_set(&d->refcnt, 1); + refcount_set(&d->refcnt, 1); rfcomm_dlc_clear_state(d); @@ -342,7 +342,7 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) { struct rfcomm_session *s = d->session; - BT_DBG("dlc %p refcnt %d session %p", d, atomic_read(&d->refcnt), s); + BT_DBG("dlc %p refcnt %d session %p", d, refcount_read(&d->refcnt), s); list_del(&d->list); d->session = NULL; diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index dc688f13e49612cd74decf8853b8c270803942f3..ee92c925ecc5d3ac42bc041f60924da80672e1b8 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c @@ -26,7 +26,7 @@ #include #include -#include "ecc.h" +#include "ecdh_helper.h" #include "smp.h" #include "selftest.h" @@ -142,18 +142,30 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32], const u8 pub_a[64], const u8 pub_b[64], const u8 dhkey[32]) { - u8 dhkey_a[32], dhkey_b[32]; + u8 *tmp, *dhkey_a, *dhkey_b; + int ret = 0; - ecdh_shared_secret(pub_b, priv_a, dhkey_a); - ecdh_shared_secret(pub_a, priv_b, dhkey_b); - - if (memcmp(dhkey_a, dhkey, 32)) + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) return -EINVAL; + dhkey_a = &tmp[0]; + dhkey_b = &tmp[32]; + + compute_ecdh_secret(pub_b, priv_a, dhkey_a); + compute_ecdh_secret(pub_a, priv_b, dhkey_b); + + if (memcmp(dhkey_a, dhkey, 32)) { + ret = -EINVAL; + goto out; + } + if (memcmp(dhkey_b, dhkey, 32)) - return -EINVAL; + ret = -EINVAL; - return 0; +out: + kfree(dhkey_a); + return ret; } static char test_ecdh_buffer[32]; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index fae391f1871f138c802722d8d8d86d817a97947e..14585edc94397c6f42e5f5fcee2f7f471019819e 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -31,7 +31,7 @@ #include #include -#include "ecc.h" +#include "ecdh_helper.h" #include "smp.h" #define SMP_DEV(hdev) \ @@ -569,8 +569,11 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) smp->debug_key = true; } else { while (true) { + /* Seed private key with random number */ + get_random_bytes(smp->local_sk, 32); + /* Generate local key pair for Secure Connections */ - if (!ecc_make_key(smp->local_pk, smp->local_sk)) + if (!generate_ecdh_keys(smp->local_pk, smp->local_sk)) return -EIO; /* This is unlikely, but we need to check that @@ -1895,8 +1898,11 @@ static u8 sc_send_public_key(struct smp_chan *smp) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); } else { while (true) { + /* Seed private key with random number */ + get_random_bytes(smp->local_sk, 32); + /* Generate local key pair for Secure Connections */ - if (!ecc_make_key(smp->local_pk, smp->local_sk)) + if (!generate_ecdh_keys(smp->local_pk, smp->local_sk)) return SMP_UNSPECIFIED; /* This is unlikely, but we need to check that @@ -2670,7 +2676,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32); - if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey)) + if (!compute_ecdh_secret(smp->remote_pk, smp->local_sk, smp->dhkey)) return SMP_UNSPECIFIED; SMP_DBG("DHKey %32phN", smp->dhkey); @@ -3483,6 +3489,32 @@ void smp_unregister(struct hci_dev *hdev) #if IS_ENABLED(CONFIG_BT_SELFTEST_SMP) +static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + out[i] = __swab64(in[ndigits - 1 - i]); +} + +static int __init test_debug_key(void) +{ + u8 pk[64], sk[32]; + + swap_digits((u64 *)debug_sk, (u64 *)sk, 4); + + if (!generate_ecdh_keys(pk, sk)) + return -EINVAL; + + if (memcmp(sk, debug_sk, 32)) + return -EINVAL; + + if (memcmp(pk, debug_pk, 64)) + return -EINVAL; + + return 0; +} + static int __init test_ah(struct crypto_cipher *tfm_aes) { const u8 irk[16] = { @@ -3738,6 +3770,12 @@ static int __init run_selftests(struct crypto_cipher *tfm_aes, calltime = ktime_get(); + err = test_debug_key(); + if (err) { + BT_ERR("debug_key test failed"); + goto done; + } + err = test_ah(tfm_aes); if (err) { BT_ERR("smp_ah test failed"); diff --git a/net/bpf/Makefile b/net/bpf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..27b2992a06925240a8118e8182e8c7f048a73bbf --- /dev/null +++ b/net/bpf/Makefile @@ -0,0 +1 @@ +obj-y := test_run.o diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c new file mode 100644 index 0000000000000000000000000000000000000000..6be41a44d688a44970981fc9c2f1aa35666807fa --- /dev/null +++ b/net/bpf/test_run.c @@ -0,0 +1,173 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx) +{ + u32 ret; + + preempt_disable(); + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, ctx); + rcu_read_unlock(); + preempt_enable(); + + return ret; +} + +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) +{ + u64 time_start, time_spent = 0; + u32 ret = 0, i; + + if (!repeat) + repeat = 1; + time_start = ktime_get_ns(); + for (i = 0; i < repeat; i++) { + ret = bpf_test_run_one(prog, ctx); + if (need_resched()) { + if (signal_pending(current)) + break; + time_spent += ktime_get_ns() - time_start; + cond_resched(); + time_start = ktime_get_ns(); + } + } + time_spent += ktime_get_ns() - time_start; + do_div(time_spent, repeat); + *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; + + return ret; +} + +static int bpf_test_finish(const union bpf_attr *kattr, + union bpf_attr __user *uattr, const void *data, + u32 size, u32 retval, u32 duration) +{ + void __user *data_out = u64_to_user_ptr(kattr->test.data_out); + int err = -EFAULT; + + if (data_out && copy_to_user(data_out, data, size)) + goto out; + if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) + goto out; + if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) + goto out; + if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) + goto out; + err = 0; +out: + return err; +} + +static void *bpf_test_init(const union bpf_attr *kattr, u32 size, + u32 headroom, u32 tailroom) +{ + void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + void *data; + + if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) + return ERR_PTR(-EINVAL); + + data = kzalloc(size + headroom + tailroom, GFP_USER); + if (!data) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(data + headroom, data_in, size)) { + kfree(data); + return ERR_PTR(-EFAULT); + } + return data; +} + +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + bool is_l2 = false, is_direct_pkt_access = false; + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + u32 retval, duration; + struct sk_buff *skb; + void *data; + int ret; + + data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (IS_ERR(data)) + return PTR_ERR(data); + + switch (prog->type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + is_l2 = true; + /* fall through */ + case BPF_PROG_TYPE_LWT_IN: + case BPF_PROG_TYPE_LWT_OUT: + case BPF_PROG_TYPE_LWT_XMIT: + is_direct_pkt_access = true; + break; + default: + break; + } + + skb = build_skb(data, 0); + if (!skb) { + kfree(data); + return -ENOMEM; + } + + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + __skb_put(skb, size); + skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); + skb_reset_network_header(skb); + + if (is_l2) + __skb_push(skb, ETH_HLEN); + if (is_direct_pkt_access) + bpf_compute_data_end(skb); + retval = bpf_test_run(prog, skb, repeat, &duration); + if (!is_l2) + __skb_push(skb, ETH_HLEN); + size = skb->len; + /* bpf program can never convert linear skb to non-linear */ + if (WARN_ON_ONCE(skb_is_nonlinear(skb))) + size = skb_headlen(skb); + ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); + kfree_skb(skb); + return ret; +} + +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + struct xdp_buff xdp = {}; + u32 retval, duration; + void *data; + int ret; + + data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0); + if (IS_ERR(data)) + return PTR_ERR(data); + + xdp.data_hard_start = data; + xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; + xdp.data_end = xdp.data + size; + + retval = bpf_test_run(prog, &xdp, repeat, &duration); + if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN) + size = xdp.data_end - xdp.data; + ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); + kfree(data); + return ret; +} diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 6e08b7199dd7442acdbd4f85e5ef6315b121ca06..ab0c7cc8448f4824d69b9260e79ede7aac14dd9e 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -589,6 +589,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, if (unlikely(source != fdb->dst)) { fdb->dst = source; fdb_modified = true; + /* Take over HW learned entry */ + if (unlikely(fdb->added_by_external_learn)) + fdb->added_by_external_learn = 0; } if (now != fdb->updated) fdb->updated = now; @@ -854,6 +857,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, br_fdb_update(br, p, addr, vid, true); rcu_read_unlock(); local_bh_enable(); + } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { + err = br_fdb_external_learn_add(br, p, addr, vid); } else { spin_lock_bh(&br->hash_lock); err = fdb_add_entry(br, p, addr, ndm->ndm_state, diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 902af6ba481c999f81ed2fba488d91665e03c02e..48fb17417fac3397e74cb712388076ed1ee87865 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -183,13 +183,23 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, struct net_bridge_port *p; list_for_each_entry_rcu(p, &br->port_list, list) { - /* Do not flood unicast traffic to ports that turn it off */ - if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) - continue; - /* Do not flood if mc off, except for traffic we originate */ - if (pkt_type == BR_PKT_MULTICAST && - !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) - continue; + /* Do not flood unicast traffic to ports that turn it off, nor + * other traffic if flood off, except for traffic we originate + */ + switch (pkt_type) { + case BR_PKT_UNICAST: + if (!(p->flags & BR_FLOOD)) + continue; + break; + case BR_PKT_MULTICAST: + if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + case BR_PKT_BROADCAST: + if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + } /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index a8d0ed282a109a1f4075565a0934e742febb387b..7f8d05cf90656e43211d9682657b788f99736722 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -360,7 +361,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; - p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD; + p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; br_init_port(p); br_set_state(p, BR_STATE_DISABLED); br_stp_port_timer_init(p); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 056e6ac49d8fc7727fce8fc8320b8db4ac5351d2..b0845480a3ae7f65f347f7ad9399bc6a5d21d806 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -464,7 +464,8 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, struct net_device *dev; int err; - err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL); + err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL, + NULL); if (err < 0) return err; @@ -568,7 +569,8 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, return ret; } -static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) +static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_bridge_vlan_group *vg; @@ -662,7 +664,8 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) return err; } -static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) +static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_bridge_vlan_group *vg; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 1f1e62095464f99eaca8de49a772289f057bd943..067cf03134492a33f10982755105e103666cd1ef 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -997,13 +997,10 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, if (!elem) return okfn(net, sk, skb); - /* We may already have this, but read-locks nest anyway */ - rcu_read_lock(); nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, sk, net, okfn); ret = nf_hook_slow(skb, &state, elem); - rcu_read_unlock(); if (ret == 1) ret = okfn(net, sk, skb); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 225ef7d5370166baff69080996cf64ff68b055f6..a572db710d4eb68a03ba0d9d958f41f931aff6b9 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -189,6 +189,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, !!(p->flags & BR_MCAST_FLOOD)) || + nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, + !!(p->flags & BR_BCAST_FLOOD)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, !!(p->flags & BR_PROXYARP_WIFI)) || @@ -683,6 +685,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); + br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); @@ -748,8 +751,8 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { - err = nla_parse_nested(tb, IFLA_BRPORT_MAX, - protinfo, br_port_policy); + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, + br_port_policy, NULL); if (err) return err; diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index c913491495ab21ade17dec3512bd1d0a829b8e67..3712c7f0e00cd1addd73cdfe4504b687e425d0ec 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -227,8 +227,8 @@ int br_parse_vlan_tunnel_info(struct nlattr *attr, memset(tinfo, 0, sizeof(*tinfo)); - err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX, - attr, vlan_tunnel_policy); + err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX, attr, + vlan_tunnel_policy, NULL); if (err < 0) return err; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 79aee759aba5906692d0c715c851cd3dca96801b..5d5d413a6cf8a311ddde9e341caa39a8f3640b35 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -173,6 +173,7 @@ BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); BRPORT_ATTR_FLAG(proxyarp_wifi, BR_PROXYARP_WIFI); BRPORT_ATTR_FLAG(multicast_flood, BR_MCAST_FLOOD); +BRPORT_ATTR_FLAG(broadcast_flood, BR_BCAST_FLOOD); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -221,6 +222,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_proxyarp, &brport_attr_proxyarp_wifi, &brport_attr_multicast_flood, + &brport_attr_broadcast_flood, NULL }; diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 98b9c8e8615ebc6e2ddefd1885a01af3ef58781b..707caea397433b66c887e527f0e4532eaf260880 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c @@ -62,10 +62,10 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) pptr = skb_header_pointer(skb, offset, sizeof(_ports), &_ports); if (pptr == NULL) { - printk(" INCOMPLETE TCP/UDP header"); + pr_cont(" INCOMPLETE TCP/UDP header"); return; } - printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); + pr_cont(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); } } @@ -100,11 +100,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) { - printk(" INCOMPLETE IP header"); + pr_cont(" INCOMPLETE IP header"); goto out; } - printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", - &ih->saddr, &ih->daddr, ih->tos, ih->protocol); + pr_cont(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", + &ih->saddr, &ih->daddr, ih->tos, ih->protocol); print_ports(skb, ih->protocol, ih->ihl*4); goto out; } @@ -120,11 +120,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) { - printk(" INCOMPLETE IPv6 header"); + pr_cont(" INCOMPLETE IPv6 header"); goto out; } - printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", - &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); + pr_cont(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", + &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); nexthdr = ih->nexthdr; offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off); if (offset_ph == -1) @@ -142,12 +142,12 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) { - printk(" INCOMPLETE ARP header"); + pr_cont(" INCOMPLETE ARP header"); goto out; } - printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", - ntohs(ah->ar_hrd), ntohs(ah->ar_pro), - ntohs(ah->ar_op)); + pr_cont(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", + ntohs(ah->ar_hrd), ntohs(ah->ar_pro), + ntohs(ah->ar_op)); /* If it's for Ethernet and the lengths are OK, * then log the ARP payload @@ -161,17 +161,17 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); if (ap == NULL) { - printk(" INCOMPLETE ARP payload"); + pr_cont(" INCOMPLETE ARP payload"); goto out; } - printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", - ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); + pr_cont(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", + ap->mac_src, ap->ip_src, + ap->mac_dst, ap->ip_dst); } } out: - printk("\n"); + pr_cont("\n"); spin_unlock_bh(&ebt_log_lock); - } static unsigned int diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 8fe36dc3aab29ceac90443f229b9d745b6dfe5d6..2585b100ebbbc332a33209c2b0bf3643eb0dd961 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -65,13 +65,13 @@ static int ebt_broute(struct sk_buff *skb) static int __net_init broute_net_init(struct net *net) { - net->xt.broute_table = ebt_register_table(net, &broute_table); + net->xt.broute_table = ebt_register_table(net, &broute_table, NULL); return PTR_ERR_OR_ZERO(net->xt.broute_table); } static void __net_exit broute_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.broute_table); + ebt_unregister_table(net, net->xt.broute_table, NULL); } static struct pernet_operations broute_net_ops = { diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 593a1bdc079e8ee55165bd13dfef23a0689f3362..f22ef7c219137231e13e756b2695b281b90b9dd8 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -93,13 +93,13 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { static int __net_init frame_filter_net_init(struct net *net) { - net->xt.frame_filter = ebt_register_table(net, &frame_filter); + net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter); return PTR_ERR_OR_ZERO(net->xt.frame_filter); } static void __net_exit frame_filter_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.frame_filter); + ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter); } static struct pernet_operations frame_filter_net_ops = { @@ -109,20 +109,11 @@ static struct pernet_operations frame_filter_net_ops = { static int __init ebtable_filter_init(void) { - int ret; - - ret = register_pernet_subsys(&frame_filter_net_ops); - if (ret < 0) - return ret; - ret = nf_register_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); - if (ret < 0) - unregister_pernet_subsys(&frame_filter_net_ops); - return ret; + return register_pernet_subsys(&frame_filter_net_ops); } static void __exit ebtable_filter_fini(void) { - nf_unregister_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); unregister_pernet_subsys(&frame_filter_net_ops); } diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index eb33919821ee12e1b74610c894c22d3d0fa05f2a..2f7a4f314406382a655a94d2ce384226ab906bfb 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -93,13 +93,13 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { static int __net_init frame_nat_net_init(struct net *net) { - net->xt.frame_nat = ebt_register_table(net, &frame_nat); + net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat); return PTR_ERR_OR_ZERO(net->xt.frame_nat); } static void __net_exit frame_nat_net_exit(struct net *net) { - ebt_unregister_table(net, net->xt.frame_nat); + ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat); } static struct pernet_operations frame_nat_net_ops = { @@ -109,20 +109,11 @@ static struct pernet_operations frame_nat_net_ops = { static int __init ebtable_nat_init(void) { - int ret; - - ret = register_pernet_subsys(&frame_nat_net_ops); - if (ret < 0) - return ret; - ret = nf_register_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); - if (ret < 0) - unregister_pernet_subsys(&frame_nat_net_ops); - return ret; + return register_pernet_subsys(&frame_nat_net_ops); } static void __exit ebtable_nat_fini(void) { - nf_unregister_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); unregister_pernet_subsys(&frame_nat_net_ops); } diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 79b69917f5210c0065ab5c9c37a497ede7cc273b..9ec0c9f908fa712b18bfa25c87aa7bce12cee786 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1157,8 +1157,30 @@ static int do_replace(struct net *net, const void __user *user, return ret; } +static void __ebt_unregister_table(struct net *net, struct ebt_table *table) +{ + int i; + + mutex_lock(&ebt_mutex); + list_del(&table->list); + mutex_unlock(&ebt_mutex); + EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, + ebt_cleanup_entry, net, NULL); + if (table->private->nentries) + module_put(table->me); + vfree(table->private->entries); + if (table->private->chainstack) { + for_each_possible_cpu(i) + vfree(table->private->chainstack[i]); + vfree(table->private->chainstack); + } + vfree(table->private); + kfree(table); +} + struct ebt_table * -ebt_register_table(struct net *net, const struct ebt_table *input_table) +ebt_register_table(struct net *net, const struct ebt_table *input_table, + const struct nf_hook_ops *ops) { struct ebt_table_info *newinfo; struct ebt_table *t, *table; @@ -1238,6 +1260,16 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) } list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); mutex_unlock(&ebt_mutex); + + if (!ops) + return table; + + ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); + if (ret) { + __ebt_unregister_table(net, table); + return ERR_PTR(ret); + } + return table; free_unlock: mutex_unlock(&ebt_mutex); @@ -1256,29 +1288,12 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) return ERR_PTR(ret); } -void ebt_unregister_table(struct net *net, struct ebt_table *table) +void ebt_unregister_table(struct net *net, struct ebt_table *table, + const struct nf_hook_ops *ops) { - int i; - - if (!table) { - BUGPRINT("Request to unregister NULL table!!!\n"); - return; - } - mutex_lock(&ebt_mutex); - list_del(&table->list); - mutex_unlock(&ebt_mutex); - EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, - ebt_cleanup_entry, net, NULL); - if (table->private->nentries) - module_put(table->me); - vfree(table->private->entries); - if (table->private->chainstack) { - for_each_possible_cpu(i) - vfree(table->private->chainstack[i]); - vfree(table->private->chainstack); - } - vfree(table->private); - kfree(table); + if (ops) + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); + __ebt_unregister_table(net, table); } /* userspace just supplied us with counters */ @@ -1713,7 +1728,7 @@ static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, if (*size < sizeof(*ce)) return -EINVAL; - ce = (struct ebt_entry __user *)*dstptr; + ce = *dstptr; if (copy_to_user(ce, e, sizeof(*ce))) return -EFAULT; diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index 5974dbc1ea240fa6b8b2d109891ab211a8d556ff..bb63c9aed55d22af0f9d6538267e50900701a351 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -111,7 +111,7 @@ nft_meta_bridge_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_meta_bridge_type __read_mostly = { .family = NFPROTO_BRIDGE, .name = "meta", - .select_ops = &nft_meta_bridge_select_ops, + .select_ops = nft_meta_bridge_select_ops, .policy = nft_meta_policy, .maxattr = NFTA_META_MAX, .owner = THIS_MODULE, diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 206dc266ecd237c2874d25352dd631e3bc31b002..346ef6b00b8f05b62edc911d06c01692624596d9 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -375,11 +375,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_reject *priv = nft_expr_priv(expr); - int icmp_code, err; - - err = nft_reject_bridge_validate(ctx, expr, NULL); - if (err < 0) - return err; + int icmp_code; if (tb[NFTA_REJECT_TYPE] == NULL) return -EINVAL; diff --git a/net/can/af_can.c b/net/can/af_can.c index 5488e4a6ccd062e6f6e7e2b841dde5ef055d4337..b6406fe33c76d1e7ca5f439e9b7409ad3a680e6d 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -2,7 +2,7 @@ * af_can.c - Protocol family CAN core module * (used by different CAN protocol modules) * - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,20 +75,12 @@ static int stats_timer __read_mostly = 1; module_param(stats_timer, int, S_IRUGO); MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); -/* receive filters subscribed for 'all' CAN devices */ -struct dev_rcv_lists can_rx_alldev_list; -static DEFINE_SPINLOCK(can_rcvlists_lock); - static struct kmem_cache *rcv_cache __read_mostly; /* table of registered CAN protocols */ static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; static DEFINE_MUTEX(proto_tab_lock); -struct timer_list can_stattimer; /* timer for statistics update */ -struct s_stats can_stats; /* packet statistics */ -struct s_pstats can_pstats; /* receive list statistics */ - static atomic_t skbcounter = ATOMIC_INIT(0); /* @@ -145,9 +137,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= CAN_NPROTO) return -EINVAL; - if (!net_eq(net, &init_net)) - return -EAFNOSUPPORT; - cp = can_get_proto(protocol); #ifdef CONFIG_MODULES @@ -228,6 +217,7 @@ int can_send(struct sk_buff *skb, int loop) { struct sk_buff *newskb = NULL; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; int err = -EINVAL; if (skb->len == CAN_MTU) { @@ -316,8 +306,8 @@ int can_send(struct sk_buff *skb, int loop) netif_rx_ni(newskb); /* update statistics */ - can_stats.tx_frames++; - can_stats.tx_frames_delta++; + can_stats->tx_frames++; + can_stats->tx_frames_delta++; return 0; @@ -331,10 +321,11 @@ EXPORT_SYMBOL(can_send); * af_can rx path */ -static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) +static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net, + struct net_device *dev) { if (!dev) - return &can_rx_alldev_list; + return net->can.can_rx_alldev_list; else return (struct dev_rcv_lists *)dev->ml_priv; } @@ -467,13 +458,14 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, * -ENOMEM on missing cache mem to create subscription entry * -ENODEV unknown device */ -int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, - void (*func)(struct sk_buff *, void *), void *data, - char *ident, struct sock *sk) +int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, + canid_t mask, void (*func)(struct sk_buff *, void *), + void *data, char *ident, struct sock *sk) { struct receiver *r; struct hlist_head *rl; struct dev_rcv_lists *d; + struct s_pstats *can_pstats = net->can.can_pstats; int err = 0; /* insert new receiver (dev,canid,mask) -> (func,data) */ @@ -481,13 +473,16 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, if (dev && dev->type != ARPHRD_CAN) return -ENODEV; + if (dev && !net_eq(net, dev_net(dev))) + return -ENODEV; + r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); if (!r) return -ENOMEM; - spin_lock(&can_rcvlists_lock); + spin_lock(&net->can.can_rcvlists_lock); - d = find_dev_rcv_lists(dev); + d = find_dev_rcv_lists(net, dev); if (d) { rl = find_rcv_list(&can_id, &mask, d); @@ -502,15 +497,15 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, hlist_add_head_rcu(&r->list, rl); d->entries++; - can_pstats.rcv_entries++; - if (can_pstats.rcv_entries_max < can_pstats.rcv_entries) - can_pstats.rcv_entries_max = can_pstats.rcv_entries; + can_pstats->rcv_entries++; + if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) + can_pstats->rcv_entries_max = can_pstats->rcv_entries; } else { kmem_cache_free(rcv_cache, r); err = -ENODEV; } - spin_unlock(&can_rcvlists_lock); + spin_unlock(&net->can.can_rcvlists_lock); return err; } @@ -540,19 +535,24 @@ static void can_rx_delete_receiver(struct rcu_head *rp) * Description: * Removes subscription entry depending on given (subscription) values. */ -void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, - void (*func)(struct sk_buff *, void *), void *data) +void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, + canid_t mask, void (*func)(struct sk_buff *, void *), + void *data) { struct receiver *r = NULL; struct hlist_head *rl; + struct s_pstats *can_pstats = net->can.can_pstats; struct dev_rcv_lists *d; if (dev && dev->type != ARPHRD_CAN) return; - spin_lock(&can_rcvlists_lock); + if (dev && !net_eq(net, dev_net(dev))) + return; + + spin_lock(&net->can.can_rcvlists_lock); - d = find_dev_rcv_lists(dev); + d = find_dev_rcv_lists(net, dev); if (!d) { pr_err("BUG: receive list not found for " "dev %s, id %03X, mask %03X\n", @@ -588,8 +588,8 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, hlist_del_rcu(&r->list); d->entries--; - if (can_pstats.rcv_entries > 0) - can_pstats.rcv_entries--; + if (can_pstats->rcv_entries > 0) + can_pstats->rcv_entries--; /* remove device structure requested by NETDEV_UNREGISTER */ if (d->remove_on_zero_entries && !d->entries) { @@ -598,7 +598,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, } out: - spin_unlock(&can_rcvlists_lock); + spin_unlock(&net->can.can_rcvlists_lock); /* schedule the receiver item for deletion */ if (r) { @@ -683,11 +683,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) static void can_receive(struct sk_buff *skb, struct net_device *dev) { struct dev_rcv_lists *d; + struct net *net = dev_net(dev); + struct s_stats *can_stats = net->can.can_stats; int matches; /* update statistics */ - can_stats.rx_frames++; - can_stats.rx_frames_delta++; + can_stats->rx_frames++; + can_stats->rx_frames_delta++; /* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) @@ -696,10 +698,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); /* deliver the packet to sockets listening on all devices */ - matches = can_rcv_filter(&can_rx_alldev_list, skb); + matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); /* find receive list for this device */ - d = find_dev_rcv_lists(dev); + d = find_dev_rcv_lists(net, dev); if (d) matches += can_rcv_filter(d, skb); @@ -709,8 +711,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); if (matches > 0) { - can_stats.matches++; - can_stats.matches_delta++; + can_stats->matches++; + can_stats->matches_delta++; } } @@ -719,9 +721,6 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(!net_eq(dev_net(dev), &init_net))) - goto drop; - if (WARN_ONCE(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || cfd->len > CAN_MAX_DLEN, @@ -743,9 +742,6 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(!net_eq(dev_net(dev), &init_net))) - goto drop; - if (WARN_ONCE(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || cfd->len > CANFD_MAX_DLEN, @@ -835,9 +831,6 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dev_rcv_lists *d; - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; @@ -855,7 +848,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, break; case NETDEV_UNREGISTER: - spin_lock(&can_rcvlists_lock); + spin_lock(&dev_net(dev)->can.can_rcvlists_lock); d = dev->ml_priv; if (d) { @@ -869,7 +862,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, pr_err("can: notifier: receive list not found for dev " "%s\n", dev->name); - spin_unlock(&can_rcvlists_lock); + spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); break; } @@ -877,6 +870,59 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, return NOTIFY_DONE; } +static int can_pernet_init(struct net *net) +{ + net->can.can_rcvlists_lock = + __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock); + net->can.can_rx_alldev_list = + kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); + + net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); + net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); + + if (IS_ENABLED(CONFIG_PROC_FS)) { + /* the statistics are updated every second (timer triggered) */ + if (stats_timer) { + setup_timer(&net->can.can_stattimer, can_stat_update, + (unsigned long)net); + mod_timer(&net->can.can_stattimer, + round_jiffies(jiffies + HZ)); + } + net->can.can_stats->jiffies_init = jiffies; + can_init_proc(net); + } + + return 0; +} + +static void can_pernet_exit(struct net *net) +{ + struct net_device *dev; + + if (IS_ENABLED(CONFIG_PROC_FS)) { + can_remove_proc(net); + if (stats_timer) + del_timer_sync(&net->can.can_stattimer); + } + + /* remove created dev_rcv_lists from still registered CAN devices */ + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (dev->type == ARPHRD_CAN && dev->ml_priv) { + struct dev_rcv_lists *d = dev->ml_priv; + + BUG_ON(d->entries); + kfree(d); + dev->ml_priv = NULL; + } + } + rcu_read_unlock(); + + kfree(net->can.can_rx_alldev_list); + kfree(net->can.can_stats); + kfree(net->can.can_pstats); +} + /* * af_can module init/exit functions */ @@ -902,6 +948,11 @@ static struct notifier_block can_netdev_notifier __read_mostly = { .notifier_call = can_notifier, }; +static struct pernet_operations can_pernet_ops __read_mostly = { + .init = can_pernet_init, + .exit = can_pernet_exit, +}; + static __init int can_init(void) { /* check for correct padding to be able to use the structs similarly */ @@ -912,21 +963,12 @@ static __init int can_init(void) pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); - memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); - rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 0, 0, NULL); if (!rcv_cache) return -ENOMEM; - if (IS_ENABLED(CONFIG_PROC_FS)) { - if (stats_timer) { - /* the statistics are updated every second (timer triggered) */ - setup_timer(&can_stattimer, can_stat_update, 0); - mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); - } - can_init_proc(); - } + register_pernet_subsys(&can_pernet_ops); /* protocol register */ sock_register(&can_family_ops); @@ -939,34 +981,13 @@ static __init int can_init(void) static __exit void can_exit(void) { - struct net_device *dev; - - if (IS_ENABLED(CONFIG_PROC_FS)) { - if (stats_timer) - del_timer_sync(&can_stattimer); - - can_remove_proc(); - } - /* protocol unregister */ dev_remove_pack(&canfd_packet); dev_remove_pack(&can_packet); unregister_netdevice_notifier(&can_netdev_notifier); sock_unregister(PF_CAN); - /* remove created dev_rcv_lists from still registered CAN devices */ - rcu_read_lock(); - for_each_netdev_rcu(&init_net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv) { - - struct dev_rcv_lists *d = dev->ml_priv; - - BUG_ON(d->entries); - kfree(d); - dev->ml_priv = NULL; - } - } - rcu_read_unlock(); + unregister_pernet_subsys(&can_pernet_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ diff --git a/net/can/af_can.h b/net/can/af_can.h index b86f5129e8385fe84ef671bb914e8e05c2977ca0..d0ef45bb2a72748e95f4ed0c0156e723ea01b7a3 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -110,18 +110,9 @@ struct s_pstats { unsigned long rcv_entries_max; }; -/* receive filters subscribed for 'all' CAN devices */ -extern struct dev_rcv_lists can_rx_alldev_list; - /* function prototypes for the CAN networklayer procfs (proc.c) */ -void can_init_proc(void); -void can_remove_proc(void); +void can_init_proc(struct net *net); +void can_remove_proc(struct net *net); void can_stat_update(unsigned long data); -/* structures and variables from af_can.c needed in proc.c for reading */ -extern struct timer_list can_stattimer; /* timer for statistics update */ -extern struct s_stats can_stats; /* packet statistics */ -extern struct s_pstats can_pstats; /* receive list statistics */ -extern struct hlist_head can_rx_dev_list; /* rx dispatcher structures */ - #endif /* AF_CAN_H */ diff --git a/net/can/bcm.c b/net/can/bcm.c index 95d13b233c65161cf3595a8b0036207f5c2892e3..65432633a25000361c720058f8c3e8499fff9529 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1,7 +1,7 @@ /* * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content * - * Copyright (c) 2002-2016 Volkswagen Group Electronic Research + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -77,7 +77,7 @@ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) -#define CAN_BCM_VERSION "20161123" +#define CAN_BCM_VERSION "20170425" MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); @@ -118,8 +118,6 @@ struct bcm_op { struct net_device *rx_reg_dev; }; -static struct proc_dir_entry *proc_dir; - struct bcm_sock { struct sock sk; int bound; @@ -149,7 +147,8 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) /* * procfs functions */ -static char *bcm_proc_getifname(char *result, int ifindex) +#if IS_ENABLED(CONFIG_PROC_FS) +static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) { struct net_device *dev; @@ -157,7 +156,7 @@ static char *bcm_proc_getifname(char *result, int ifindex) return "any"; rcu_read_lock(); - dev = dev_get_by_index_rcu(&init_net, ifindex); + dev = dev_get_by_index_rcu(net, ifindex); if (dev) strcpy(result, dev->name); else @@ -170,7 +169,8 @@ static char *bcm_proc_getifname(char *result, int ifindex) static int bcm_proc_show(struct seq_file *m, void *v) { char ifname[IFNAMSIZ]; - struct sock *sk = (struct sock *)m->private; + struct net *net = m->private; + struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode); struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; @@ -178,7 +178,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, " / sk %pK", sk); seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); - seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); + seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); list_for_each_entry(op, &bo->rx_ops, list) { @@ -190,7 +190,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) continue; seq_printf(m, "rx_op: %03X %-5s ", op->can_id, - bcm_proc_getifname(ifname, op->ifindex)); + bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u)", op->nframes); @@ -219,7 +219,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) list_for_each_entry(op, &bo->tx_ops, list) { seq_printf(m, "tx_op: %03X %s ", op->can_id, - bcm_proc_getifname(ifname, op->ifindex)); + bcm_proc_getifname(net, ifname, op->ifindex)); if (op->flags & CAN_FD_FRAME) seq_printf(m, "(%u) ", op->nframes); @@ -242,7 +242,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) static int bcm_proc_open(struct inode *inode, struct file *file) { - return single_open(file, bcm_proc_show, PDE_DATA(inode)); + return single_open_net(inode, file, bcm_proc_show); } static const struct file_operations bcm_proc_fops = { @@ -252,6 +252,7 @@ static const struct file_operations bcm_proc_fops = { .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_PROC_FS */ /* * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface @@ -267,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op) if (!op->ifindex) return; - dev = dev_get_by_index(&init_net, op->ifindex); + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ return; @@ -764,8 +765,8 @@ static void bcm_remove_op(struct bcm_op *op) static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) { if (op->rx_reg_dev == dev) { - can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), - bcm_rx_handler, op); + can_rx_unregister(dev_net(dev), dev, op->can_id, + REGMASK(op->can_id), bcm_rx_handler, op); /* mark as removed subscription */ op->rx_reg_dev = NULL; @@ -800,7 +801,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, if (op->rx_reg_dev) { struct net_device *dev; - dev = dev_get_by_index(&init_net, + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (dev) { bcm_rx_unreg(dev, op); @@ -808,7 +809,8 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, } } } else - can_rx_unregister(NULL, op->can_id, + can_rx_unregister(sock_net(op->sk), NULL, + op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); @@ -1220,9 +1222,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, ifindex); + dev = dev_get_by_index(sock_net(sk), ifindex); if (dev) { - err = can_rx_register(dev, op->can_id, + err = can_rx_register(sock_net(sk), dev, + op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); @@ -1232,7 +1235,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } } else - err = can_rx_register(NULL, op->can_id, + err = can_rx_register(sock_net(sk), NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm", sk); if (err) { @@ -1272,7 +1275,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, return err; } - dev = dev_get_by_index(&init_net, ifindex); + dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) { kfree_skb(skb); return -ENODEV; @@ -1337,7 +1340,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, ifindex); + dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) return -ENODEV; @@ -1418,7 +1421,7 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, struct bcm_op *op; int notify_enodev = 0; - if (!net_eq(dev_net(dev), &init_net)) + if (!net_eq(dev_net(dev), sock_net(sk))) return NOTIFY_DONE; if (dev->type != ARPHRD_CAN) @@ -1490,6 +1493,7 @@ static int bcm_init(struct sock *sk) static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; + struct net *net = sock_net(sk); struct bcm_sock *bo; struct bcm_op *op, *next; @@ -1521,23 +1525,25 @@ static int bcm_release(struct socket *sock) if (op->rx_reg_dev) { struct net_device *dev; - dev = dev_get_by_index(&init_net, op->ifindex); + dev = dev_get_by_index(net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else - can_rx_unregister(NULL, op->can_id, + can_rx_unregister(net, NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } +#if IS_ENABLED(CONFIG_PROC_FS) /* remove procfs entry */ - if (proc_dir && bo->bcm_proc_read) - remove_proc_entry(bo->procname, proc_dir); + if (net->can.bcmproc_dir && bo->bcm_proc_read) + remove_proc_entry(bo->procname, net->can.bcmproc_dir); +#endif /* CONFIG_PROC_FS */ /* remove device reference */ if (bo->bound) { @@ -1560,6 +1566,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); + struct net *net = sock_net(sk); int ret = 0; if (len < sizeof(*addr)) @@ -1576,7 +1583,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, if (addr->can_ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, addr->can_ifindex); + dev = dev_get_by_index(net, addr->can_ifindex); if (!dev) { ret = -ENODEV; goto fail; @@ -1595,17 +1602,19 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, bo->ifindex = 0; } - if (proc_dir) { +#if IS_ENABLED(CONFIG_PROC_FS) + if (net->can.bcmproc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_data(bo->procname, 0644, - proc_dir, + net->can.bcmproc_dir, &bcm_proc_fops, sk); if (!bo->bcm_proc_read) { ret = -ENOMEM; goto fail; } } +#endif /* CONFIG_PROC_FS */ bo->bound = 1; @@ -1686,6 +1695,30 @@ static const struct can_proto bcm_can_proto = { .prot = &bcm_proto, }; +static int canbcm_pernet_init(struct net *net) +{ +#if IS_ENABLED(CONFIG_PROC_FS) + /* create /proc/net/can-bcm directory */ + net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); +#endif /* CONFIG_PROC_FS */ + + return 0; +} + +static void canbcm_pernet_exit(struct net *net) +{ +#if IS_ENABLED(CONFIG_PROC_FS) + /* remove /proc/net/can-bcm directory */ + if (net->can.bcmproc_dir) + remove_proc_entry("can-bcm", net->proc_net); +#endif /* CONFIG_PROC_FS */ +} + +static struct pernet_operations canbcm_pernet_ops __read_mostly = { + .init = canbcm_pernet_init, + .exit = canbcm_pernet_exit, +}; + static int __init bcm_module_init(void) { int err; @@ -1698,17 +1731,14 @@ static int __init bcm_module_init(void) return err; } - /* create /proc/net/can-bcm directory */ - proc_dir = proc_mkdir("can-bcm", init_net.proc_net); + register_pernet_subsys(&canbcm_pernet_ops); return 0; } static void __exit bcm_module_exit(void) { can_proto_unregister(&bcm_can_proto); - - if (proc_dir) - remove_proc_entry("can-bcm", init_net.proc_net); + unregister_pernet_subsys(&canbcm_pernet_ops); } module_init(bcm_module_init); diff --git a/net/can/gw.c b/net/can/gw.c index 7056a1a2bb70098e691ce557f05e5bc1f27cb42f..29748d844c3fac1bda35d494cdc1474fe81849a9 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -1,7 +1,7 @@ /* * gw.c - CAN frame Gateway/Router/Bridge with netlink interface * - * Copyright (c) 2011 Volkswagen Group Electronic Research + * Copyright (c) 2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,7 +59,7 @@ #include #include -#define CAN_GW_VERSION "20130117" +#define CAN_GW_VERSION "20170425" #define CAN_GW_NAME "can-gw" MODULE_DESCRIPTION("PF_CAN netlink gateway"); @@ -79,9 +79,7 @@ MODULE_PARM_DESC(max_hops, __stringify(CGW_MAX_HOPS) " hops, " "default: " __stringify(CGW_DEFAULT_HOPS) ")"); -static HLIST_HEAD(cgw_list); static struct notifier_block notifier; - static struct kmem_cache *cgw_cache __read_mostly; /* structure that contains the (on-the-fly) CAN frame modifications */ @@ -438,16 +436,16 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) gwj->handled_frames++; } -static inline int cgw_register_filter(struct cgw_job *gwj) +static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) { - return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, + return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id, gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj, "gw", NULL); } -static inline void cgw_unregister_filter(struct cgw_job *gwj) +static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj) { - can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id, + can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id, gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj); } @@ -455,9 +453,8 @@ static int cgw_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; @@ -468,11 +465,11 @@ static int cgw_notifier(struct notifier_block *nb, ASSERT_RTNL(); - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); - cgw_unregister_filter(gwj); + cgw_unregister_filter(net, gwj); kmem_cache_free(cgw_cache, gwj); } } @@ -592,12 +589,13 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) { + struct net *net = sock_net(skb->sk); struct cgw_job *gwj = NULL; int idx = 0; int s_idx = cb->args[0]; rcu_read_lock(); - hlist_for_each_entry_rcu(gwj, &cgw_list, list) { + hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) { if (idx < s_idx) goto cont; @@ -641,7 +639,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, memset(mod, 0, sizeof(*mod)); err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, - cgw_policy); + cgw_policy, NULL); if (err < 0) return err; @@ -809,8 +807,10 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, return 0; } -static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) +static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { + struct net *net = sock_net(skb->sk); struct rtcanmsg *r; struct cgw_job *gwj; struct cf_mod mod; @@ -841,7 +841,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); /* check for updating an existing job with identical uid */ - hlist_for_each_entry(gwj, &cgw_list, list) { + hlist_for_each_entry(gwj, &net->can.cgw_list, list) { if (gwj->mod.uid != mod.uid) continue; @@ -879,7 +879,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) err = -ENODEV; - gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx); + gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx); if (!gwj->src.dev) goto out; @@ -887,7 +887,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (gwj->src.dev->type != ARPHRD_CAN) goto out; - gwj->dst.dev = __dev_get_by_index(&init_net, gwj->ccgw.dst_idx); + gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx); if (!gwj->dst.dev) goto out; @@ -897,9 +897,9 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); - err = cgw_register_filter(gwj); + err = cgw_register_filter(net, gwj); if (!err) - hlist_add_head_rcu(&gwj->list, &cgw_list); + hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); out: if (err) kmem_cache_free(cgw_cache, gwj); @@ -907,22 +907,24 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } -static void cgw_remove_all_jobs(void) +static void cgw_remove_all_jobs(struct net *net) { struct cgw_job *gwj = NULL; struct hlist_node *nx; ASSERT_RTNL(); - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { hlist_del(&gwj->list); - cgw_unregister_filter(gwj); + cgw_unregister_filter(net, gwj); kmem_cache_free(cgw_cache, gwj); } } -static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) +static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { + struct net *net = sock_net(skb->sk); struct cgw_job *gwj = NULL; struct hlist_node *nx; struct rtcanmsg *r; @@ -951,7 +953,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) /* two interface indices both set to 0 => remove all entries */ if (!ccgw.src_idx && !ccgw.dst_idx) { - cgw_remove_all_jobs(); + cgw_remove_all_jobs(net); return 0; } @@ -960,7 +962,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); /* remove only the first matching entry */ - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { if (gwj->flags != r->flags) continue; @@ -983,7 +985,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) continue; hlist_del(&gwj->list); - cgw_unregister_filter(gwj); + cgw_unregister_filter(net, gwj); kmem_cache_free(cgw_cache, gwj); err = 0; break; @@ -992,6 +994,24 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } +static int __net_init cangw_pernet_init(struct net *net) +{ + INIT_HLIST_HEAD(&net->can.cgw_list); + return 0; +} + +static void __net_exit cangw_pernet_exit(struct net *net) +{ + rtnl_lock(); + cgw_remove_all_jobs(net); + rtnl_unlock(); +} + +static struct pernet_operations cangw_pernet_ops = { + .init = cangw_pernet_init, + .exit = cangw_pernet_exit, +}; + static __init int cgw_module_init(void) { /* sanitize given module parameter */ @@ -1000,6 +1020,7 @@ static __init int cgw_module_init(void) pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n", max_hops); + register_pernet_subsys(&cangw_pernet_ops); cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 0, 0, NULL); @@ -1029,10 +1050,7 @@ static __exit void cgw_module_exit(void) unregister_netdevice_notifier(¬ifier); - rtnl_lock(); - cgw_remove_all_jobs(); - rtnl_unlock(); - + unregister_pernet_subsys(&cangw_pernet_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ kmem_cache_destroy(cgw_cache); diff --git a/net/can/proc.c b/net/can/proc.c index 85ef7bb0f1768fdc34e41baae297893e4758d16c..83045f00c63c147dd4664c76b17a98a9e040c45e 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -62,17 +62,6 @@ #define CAN_PROC_RCVLIST_EFF "rcvlist_eff" #define CAN_PROC_RCVLIST_ERR "rcvlist_err" -static struct proc_dir_entry *can_dir; -static struct proc_dir_entry *pde_version; -static struct proc_dir_entry *pde_stats; -static struct proc_dir_entry *pde_reset_stats; -static struct proc_dir_entry *pde_rcvlist_all; -static struct proc_dir_entry *pde_rcvlist_fil; -static struct proc_dir_entry *pde_rcvlist_inv; -static struct proc_dir_entry *pde_rcvlist_sff; -static struct proc_dir_entry *pde_rcvlist_eff; -static struct proc_dir_entry *pde_rcvlist_err; - static int user_reset; static const char rx_list_name[][8] = { @@ -86,21 +75,23 @@ static const char rx_list_name[][8] = { * af_can statistics stuff */ -static void can_init_stats(void) +static void can_init_stats(struct net *net) { + struct s_stats *can_stats = net->can.can_stats; + struct s_pstats *can_pstats = net->can.can_pstats; /* * This memset function is called from a timer context (when * can_stattimer is active which is the default) OR in a process * context (reading the proc_fs when can_stattimer is disabled). */ - memset(&can_stats, 0, sizeof(can_stats)); - can_stats.jiffies_init = jiffies; + memset(can_stats, 0, sizeof(struct s_stats)); + can_stats->jiffies_init = jiffies; - can_pstats.stats_reset++; + can_pstats->stats_reset++; if (user_reset) { user_reset = 0; - can_pstats.user_reset++; + can_pstats->user_reset++; } } @@ -126,64 +117,66 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif, void can_stat_update(unsigned long data) { + struct net *net = (struct net *)data; + struct s_stats *can_stats = net->can.can_stats; unsigned long j = jiffies; /* snapshot */ /* restart counting in timer context on user request */ if (user_reset) - can_init_stats(); + can_init_stats(net); /* restart counting on jiffies overflow */ - if (j < can_stats.jiffies_init) - can_init_stats(); + if (j < can_stats->jiffies_init) + can_init_stats(net); /* prevent overflow in calc_rate() */ - if (can_stats.rx_frames > (ULONG_MAX / HZ)) - can_init_stats(); + if (can_stats->rx_frames > (ULONG_MAX / HZ)) + can_init_stats(net); /* prevent overflow in calc_rate() */ - if (can_stats.tx_frames > (ULONG_MAX / HZ)) - can_init_stats(); + if (can_stats->tx_frames > (ULONG_MAX / HZ)) + can_init_stats(net); /* matches overflow - very improbable */ - if (can_stats.matches > (ULONG_MAX / 100)) - can_init_stats(); + if (can_stats->matches > (ULONG_MAX / 100)) + can_init_stats(net); /* calc total values */ - if (can_stats.rx_frames) - can_stats.total_rx_match_ratio = (can_stats.matches * 100) / - can_stats.rx_frames; + if (can_stats->rx_frames) + can_stats->total_rx_match_ratio = (can_stats->matches * 100) / + can_stats->rx_frames; - can_stats.total_tx_rate = calc_rate(can_stats.jiffies_init, j, - can_stats.tx_frames); - can_stats.total_rx_rate = calc_rate(can_stats.jiffies_init, j, - can_stats.rx_frames); + can_stats->total_tx_rate = calc_rate(can_stats->jiffies_init, j, + can_stats->tx_frames); + can_stats->total_rx_rate = calc_rate(can_stats->jiffies_init, j, + can_stats->rx_frames); /* calc current values */ - if (can_stats.rx_frames_delta) - can_stats.current_rx_match_ratio = - (can_stats.matches_delta * 100) / - can_stats.rx_frames_delta; + if (can_stats->rx_frames_delta) + can_stats->current_rx_match_ratio = + (can_stats->matches_delta * 100) / + can_stats->rx_frames_delta; - can_stats.current_tx_rate = calc_rate(0, HZ, can_stats.tx_frames_delta); - can_stats.current_rx_rate = calc_rate(0, HZ, can_stats.rx_frames_delta); + can_stats->current_tx_rate = calc_rate(0, HZ, can_stats->tx_frames_delta); + can_stats->current_rx_rate = calc_rate(0, HZ, can_stats->rx_frames_delta); /* check / update maximum values */ - if (can_stats.max_tx_rate < can_stats.current_tx_rate) - can_stats.max_tx_rate = can_stats.current_tx_rate; + if (can_stats->max_tx_rate < can_stats->current_tx_rate) + can_stats->max_tx_rate = can_stats->current_tx_rate; - if (can_stats.max_rx_rate < can_stats.current_rx_rate) - can_stats.max_rx_rate = can_stats.current_rx_rate; + if (can_stats->max_rx_rate < can_stats->current_rx_rate) + can_stats->max_rx_rate = can_stats->current_rx_rate; - if (can_stats.max_rx_match_ratio < can_stats.current_rx_match_ratio) - can_stats.max_rx_match_ratio = can_stats.current_rx_match_ratio; + if (can_stats->max_rx_match_ratio < can_stats->current_rx_match_ratio) + can_stats->max_rx_match_ratio = can_stats->current_rx_match_ratio; /* clear values for 'current rate' calculation */ - can_stats.tx_frames_delta = 0; - can_stats.rx_frames_delta = 0; - can_stats.matches_delta = 0; + can_stats->tx_frames_delta = 0; + can_stats->rx_frames_delta = 0; + can_stats->matches_delta = 0; /* restart timer (one second) */ - mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); + mod_timer(&net->can.can_stattimer, round_jiffies(jiffies + HZ)); } /* @@ -217,57 +210,61 @@ static void can_print_recv_banner(struct seq_file *m) static int can_stats_proc_show(struct seq_file *m, void *v) { + struct net *net = m->private; + struct s_stats *can_stats = net->can.can_stats; + struct s_pstats *can_pstats = net->can.can_pstats; + seq_putc(m, '\n'); - seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames); - seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames); - seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches); + seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats->tx_frames); + seq_printf(m, " %8ld received frames (RXF)\n", can_stats->rx_frames); + seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats->matches); seq_putc(m, '\n'); - if (can_stattimer.function == can_stat_update) { + if (net->can.can_stattimer.function == can_stat_update) { seq_printf(m, " %8ld %% total match ratio (RXMR)\n", - can_stats.total_rx_match_ratio); + can_stats->total_rx_match_ratio); seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", - can_stats.total_tx_rate); + can_stats->total_tx_rate); seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", - can_stats.total_rx_rate); + can_stats->total_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", - can_stats.current_rx_match_ratio); + can_stats->current_rx_match_ratio); seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", - can_stats.current_tx_rate); + can_stats->current_tx_rate); seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", - can_stats.current_rx_rate); + can_stats->current_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", - can_stats.max_rx_match_ratio); + can_stats->max_rx_match_ratio); seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", - can_stats.max_tx_rate); + can_stats->max_tx_rate); seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", - can_stats.max_rx_rate); + can_stats->max_rx_rate); seq_putc(m, '\n'); } seq_printf(m, " %8ld current receive list entries (CRCV)\n", - can_pstats.rcv_entries); + can_pstats->rcv_entries); seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", - can_pstats.rcv_entries_max); + can_pstats->rcv_entries_max); - if (can_pstats.stats_reset) + if (can_pstats->stats_reset) seq_printf(m, "\n %8ld statistic resets (STR)\n", - can_pstats.stats_reset); + can_pstats->stats_reset); - if (can_pstats.user_reset) + if (can_pstats->user_reset) seq_printf(m, " %8ld user statistic resets (USTR)\n", - can_pstats.user_reset); + can_pstats->user_reset); seq_putc(m, '\n'); return 0; @@ -275,7 +272,7 @@ static int can_stats_proc_show(struct seq_file *m, void *v) static int can_stats_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_stats_proc_show, NULL); + return single_open_net(inode, file, can_stats_proc_show); } static const struct file_operations can_stats_proc_fops = { @@ -288,25 +285,28 @@ static const struct file_operations can_stats_proc_fops = { static int can_reset_stats_proc_show(struct seq_file *m, void *v) { + struct net *net = m->private; + struct s_pstats *can_pstats = net->can.can_pstats; + struct s_stats *can_stats = net->can.can_stats; + user_reset = 1; - if (can_stattimer.function == can_stat_update) { + if (net->can.can_stattimer.function == can_stat_update) { seq_printf(m, "Scheduled statistic reset #%ld.\n", - can_pstats.stats_reset + 1); - + can_pstats->stats_reset + 1); } else { - if (can_stats.jiffies_init != jiffies) - can_init_stats(); + if (can_stats->jiffies_init != jiffies) + can_init_stats(net); seq_printf(m, "Performed statistic reset #%ld.\n", - can_pstats.stats_reset); + can_pstats->stats_reset); } return 0; } static int can_reset_stats_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_reset_stats_proc_show, NULL); + return single_open_net(inode, file, can_reset_stats_proc_show); } static const struct file_operations can_reset_stats_proc_fops = { @@ -325,7 +325,7 @@ static int can_version_proc_show(struct seq_file *m, void *v) static int can_version_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_version_proc_show, NULL); + return single_open_net(inode, file, can_version_proc_show); } static const struct file_operations can_version_proc_fops = { @@ -351,20 +351,21 @@ static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx, static int can_rcvlist_proc_show(struct seq_file *m, void *v) { /* double cast to prevent GCC warning */ - int idx = (int)(long)m->private; + int idx = (int)(long)PDE_DATA(m->file->f_inode); struct net_device *dev; struct dev_rcv_lists *d; + struct net *net = m->private; seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); rcu_read_lock(); /* receive list for 'all' CAN devices (dev == NULL) */ - d = &can_rx_alldev_list; + d = net->can.can_rx_alldev_list; can_rcvlist_proc_show_one(m, idx, NULL, d); /* receive list for registered CAN devices */ - for_each_netdev_rcu(&init_net, dev) { + for_each_netdev_rcu(net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv); } @@ -377,7 +378,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v) static int can_rcvlist_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_rcvlist_proc_show, PDE_DATA(inode)); + return single_open_net(inode, file, can_rcvlist_proc_show); } static const struct file_operations can_rcvlist_proc_fops = { @@ -417,6 +418,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) { struct net_device *dev; struct dev_rcv_lists *d; + struct net *net = m->private; /* RX_SFF */ seq_puts(m, "\nreceive list 'rx_sff':\n"); @@ -424,11 +426,11 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) rcu_read_lock(); /* sff receive list for 'all' CAN devices (dev == NULL) */ - d = &can_rx_alldev_list; + d = net->can.can_rx_alldev_list; can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff)); /* sff receive list for registered CAN devices */ - for_each_netdev_rcu(&init_net, dev) { + for_each_netdev_rcu(net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) { d = dev->ml_priv; can_rcvlist_proc_show_array(m, dev, d->rx_sff, @@ -444,7 +446,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_rcvlist_sff_proc_show, NULL); + return single_open_net(inode, file, can_rcvlist_sff_proc_show); } static const struct file_operations can_rcvlist_sff_proc_fops = { @@ -460,6 +462,7 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) { struct net_device *dev; struct dev_rcv_lists *d; + struct net *net = m->private; /* RX_EFF */ seq_puts(m, "\nreceive list 'rx_eff':\n"); @@ -467,11 +470,11 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) rcu_read_lock(); /* eff receive list for 'all' CAN devices (dev == NULL) */ - d = &can_rx_alldev_list; + d = net->can.can_rx_alldev_list; can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff)); /* eff receive list for registered CAN devices */ - for_each_netdev_rcu(&init_net, dev) { + for_each_netdev_rcu(net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) { d = dev->ml_priv; can_rcvlist_proc_show_array(m, dev, d->rx_eff, @@ -487,7 +490,7 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v) static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file) { - return single_open(file, can_rcvlist_eff_proc_show, NULL); + return single_open_net(inode, file, can_rcvlist_eff_proc_show); } static const struct file_operations can_rcvlist_eff_proc_fops = { @@ -498,82 +501,86 @@ static const struct file_operations can_rcvlist_eff_proc_fops = { .release = single_release, }; -/* - * proc utility functions - */ - -static void can_remove_proc_readentry(const char *name) -{ - if (can_dir) - remove_proc_entry(name, can_dir); -} - /* * can_init_proc - create main CAN proc directory and procfs entries */ -void can_init_proc(void) +void can_init_proc(struct net *net) { /* create /proc/net/can directory */ - can_dir = proc_mkdir("can", init_net.proc_net); + net->can.proc_dir = proc_net_mkdir(net, "can", net->proc_net); - if (!can_dir) { - pr_info("can: failed to create /proc/net/can.\n"); + if (!net->can.proc_dir) { + printk(KERN_INFO "can: failed to create /proc/net/can . " + "CONFIG_PROC_FS missing?\n"); return; } /* own procfs entries from the AF_CAN core */ - pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir, - &can_version_proc_fops); - pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir, - &can_stats_proc_fops); - pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir, - &can_reset_stats_proc_fops); - pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir, - &can_rcvlist_proc_fops, (void *)RX_ERR); - pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir, - &can_rcvlist_proc_fops, (void *)RX_ALL); - pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir, - &can_rcvlist_proc_fops, (void *)RX_FIL); - pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, - &can_rcvlist_proc_fops, (void *)RX_INV); - pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir, - &can_rcvlist_eff_proc_fops); - pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, - &can_rcvlist_sff_proc_fops); + net->can.pde_version = proc_create(CAN_PROC_VERSION, 0644, + net->can.proc_dir, + &can_version_proc_fops); + net->can.pde_stats = proc_create(CAN_PROC_STATS, 0644, + net->can.proc_dir, + &can_stats_proc_fops); + net->can.pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, + net->can.proc_dir, + &can_reset_stats_proc_fops); + net->can.pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, + net->can.proc_dir, + &can_rcvlist_proc_fops, + (void *)RX_ERR); + net->can.pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, + net->can.proc_dir, + &can_rcvlist_proc_fops, + (void *)RX_ALL); + net->can.pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, + net->can.proc_dir, + &can_rcvlist_proc_fops, + (void *)RX_FIL); + net->can.pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, + net->can.proc_dir, + &can_rcvlist_proc_fops, + (void *)RX_INV); + net->can.pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, + net->can.proc_dir, + &can_rcvlist_eff_proc_fops); + net->can.pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, + net->can.proc_dir, + &can_rcvlist_sff_proc_fops); } /* * can_remove_proc - remove procfs entries and main CAN proc directory */ -void can_remove_proc(void) +void can_remove_proc(struct net *net) { - if (pde_version) - can_remove_proc_readentry(CAN_PROC_VERSION); + if (net->can.pde_version) + remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir); - if (pde_stats) - can_remove_proc_readentry(CAN_PROC_STATS); + if (net->can.pde_stats) + remove_proc_entry(CAN_PROC_STATS, net->can.proc_dir); - if (pde_reset_stats) - can_remove_proc_readentry(CAN_PROC_RESET_STATS); + if (net->can.pde_reset_stats) + remove_proc_entry(CAN_PROC_RESET_STATS, net->can.proc_dir); - if (pde_rcvlist_err) - can_remove_proc_readentry(CAN_PROC_RCVLIST_ERR); + if (net->can.pde_rcvlist_err) + remove_proc_entry(CAN_PROC_RCVLIST_ERR, net->can.proc_dir); - if (pde_rcvlist_all) - can_remove_proc_readentry(CAN_PROC_RCVLIST_ALL); + if (net->can.pde_rcvlist_all) + remove_proc_entry(CAN_PROC_RCVLIST_ALL, net->can.proc_dir); - if (pde_rcvlist_fil) - can_remove_proc_readentry(CAN_PROC_RCVLIST_FIL); + if (net->can.pde_rcvlist_fil) + remove_proc_entry(CAN_PROC_RCVLIST_FIL, net->can.proc_dir); - if (pde_rcvlist_inv) - can_remove_proc_readentry(CAN_PROC_RCVLIST_INV); + if (net->can.pde_rcvlist_inv) + remove_proc_entry(CAN_PROC_RCVLIST_INV, net->can.proc_dir); - if (pde_rcvlist_eff) - can_remove_proc_readentry(CAN_PROC_RCVLIST_EFF); + if (net->can.pde_rcvlist_eff) + remove_proc_entry(CAN_PROC_RCVLIST_EFF, net->can.proc_dir); - if (pde_rcvlist_sff) - can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF); + if (net->can.pde_rcvlist_sff) + remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir); - if (can_dir) - remove_proc_entry("can", init_net.proc_net); + if (net->can.proc_dir) + remove_proc_entry("can", net->proc_net); } diff --git a/net/can/raw.c b/net/can/raw.c index 6dc546a06673ff41fc121c546ebd0567bb0da05f..864c80dbdb72d11cdd24183cec3769222b339ccc 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -181,20 +181,21 @@ static void raw_rcv(struct sk_buff *oskb, void *data) kfree_skb(skb); } -static int raw_enable_filters(struct net_device *dev, struct sock *sk, - struct can_filter *filter, int count) +static int raw_enable_filters(struct net *net, struct net_device *dev, + struct sock *sk, struct can_filter *filter, + int count) { int err = 0; int i; for (i = 0; i < count; i++) { - err = can_rx_register(dev, filter[i].can_id, + err = can_rx_register(net, dev, filter[i].can_id, filter[i].can_mask, raw_rcv, sk, "raw", sk); if (err) { /* clean up successfully registered filters */ while (--i >= 0) - can_rx_unregister(dev, filter[i].can_id, + can_rx_unregister(net, dev, filter[i].can_id, filter[i].can_mask, raw_rcv, sk); break; @@ -204,57 +205,62 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, return err; } -static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, - can_err_mask_t err_mask) +static int raw_enable_errfilter(struct net *net, struct net_device *dev, + struct sock *sk, can_err_mask_t err_mask) { int err = 0; if (err_mask) - err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, + err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG, raw_rcv, sk, "raw", sk); return err; } -static void raw_disable_filters(struct net_device *dev, struct sock *sk, - struct can_filter *filter, int count) +static void raw_disable_filters(struct net *net, struct net_device *dev, + struct sock *sk, struct can_filter *filter, + int count) { int i; for (i = 0; i < count; i++) - can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask, - raw_rcv, sk); + can_rx_unregister(net, dev, filter[i].can_id, + filter[i].can_mask, raw_rcv, sk); } -static inline void raw_disable_errfilter(struct net_device *dev, +static inline void raw_disable_errfilter(struct net *net, + struct net_device *dev, struct sock *sk, can_err_mask_t err_mask) { if (err_mask) - can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG, + can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG, raw_rcv, sk); } -static inline void raw_disable_allfilters(struct net_device *dev, +static inline void raw_disable_allfilters(struct net *net, + struct net_device *dev, struct sock *sk) { struct raw_sock *ro = raw_sk(sk); - raw_disable_filters(dev, sk, ro->filter, ro->count); - raw_disable_errfilter(dev, sk, ro->err_mask); + raw_disable_filters(net, dev, sk, ro->filter, ro->count); + raw_disable_errfilter(net, dev, sk, ro->err_mask); } -static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) +static int raw_enable_allfilters(struct net *net, struct net_device *dev, + struct sock *sk) { struct raw_sock *ro = raw_sk(sk); int err; - err = raw_enable_filters(dev, sk, ro->filter, ro->count); + err = raw_enable_filters(net, dev, sk, ro->filter, ro->count); if (!err) { - err = raw_enable_errfilter(dev, sk, ro->err_mask); + err = raw_enable_errfilter(net, dev, sk, ro->err_mask); if (err) - raw_disable_filters(dev, sk, ro->filter, ro->count); + raw_disable_filters(net, dev, sk, ro->filter, + ro->count); } return err; @@ -267,7 +273,7 @@ static int raw_notifier(struct notifier_block *nb, struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); struct sock *sk = &ro->sk; - if (!net_eq(dev_net(dev), &init_net)) + if (!net_eq(dev_net(dev), sock_net(sk))) return NOTIFY_DONE; if (dev->type != ARPHRD_CAN) @@ -282,7 +288,7 @@ static int raw_notifier(struct notifier_block *nb, lock_sock(sk); /* remove current filters & unregister */ if (ro->bound) - raw_disable_allfilters(dev, sk); + raw_disable_allfilters(dev_net(dev), dev, sk); if (ro->count > 1) kfree(ro->filter); @@ -358,13 +364,13 @@ static int raw_release(struct socket *sock) if (ro->ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, ro->ifindex); + dev = dev_get_by_index(sock_net(sk), ro->ifindex); if (dev) { - raw_disable_allfilters(dev, sk); + raw_disable_allfilters(dev_net(dev), dev, sk); dev_put(dev); } } else - raw_disable_allfilters(NULL, sk); + raw_disable_allfilters(sock_net(sk), NULL, sk); } if (ro->count > 1) @@ -404,7 +410,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (addr->can_ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, addr->can_ifindex); + dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); if (!dev) { err = -ENODEV; goto out; @@ -420,13 +426,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; /* filters set by default/setsockopt */ - err = raw_enable_allfilters(dev, sk); + err = raw_enable_allfilters(sock_net(sk), dev, sk); dev_put(dev); } else { ifindex = 0; /* filters set by default/setsockopt */ - err = raw_enable_allfilters(NULL, sk); + err = raw_enable_allfilters(sock_net(sk), NULL, sk); } if (!err) { @@ -435,13 +441,15 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (ro->ifindex) { struct net_device *dev; - dev = dev_get_by_index(&init_net, ro->ifindex); + dev = dev_get_by_index(sock_net(sk), + ro->ifindex); if (dev) { - raw_disable_allfilters(dev, sk); + raw_disable_allfilters(dev_net(dev), + dev, sk); dev_put(dev); } } else - raw_disable_allfilters(NULL, sk); + raw_disable_allfilters(sock_net(sk), NULL, sk); } ro->ifindex = ifindex; ro->bound = 1; @@ -517,15 +525,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (ro->bound && ro->ifindex) - dev = dev_get_by_index(&init_net, ro->ifindex); + dev = dev_get_by_index(sock_net(sk), ro->ifindex); if (ro->bound) { /* (try to) register the new filters */ if (count == 1) - err = raw_enable_filters(dev, sk, &sfilter, 1); + err = raw_enable_filters(sock_net(sk), dev, sk, + &sfilter, 1); else - err = raw_enable_filters(dev, sk, filter, - count); + err = raw_enable_filters(sock_net(sk), dev, sk, + filter, count); if (err) { if (count > 1) kfree(filter); @@ -533,7 +542,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, } /* remove old filter registrations */ - raw_disable_filters(dev, sk, ro->filter, ro->count); + raw_disable_filters(sock_net(sk), dev, sk, ro->filter, + ro->count); } /* remove old filter space */ @@ -569,18 +579,20 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (ro->bound && ro->ifindex) - dev = dev_get_by_index(&init_net, ro->ifindex); + dev = dev_get_by_index(sock_net(sk), ro->ifindex); /* remove current error mask */ if (ro->bound) { /* (try to) register the new err_mask */ - err = raw_enable_errfilter(dev, sk, err_mask); + err = raw_enable_errfilter(sock_net(sk), dev, sk, + err_mask); if (err) goto out_err; /* remove old err_mask registration */ - raw_disable_errfilter(dev, sk, ro->err_mask); + raw_disable_errfilter(sock_net(sk), dev, sk, + ro->err_mask); } /* link new err_mask to the socket */ @@ -741,7 +753,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) return -EINVAL; } - dev = dev_get_by_index(&init_net, ifindex); + dev = dev_get_by_index(sock_net(sk), ifindex); if (!dev) return -ENXIO; diff --git a/net/core/datagram.c b/net/core/datagram.c index d797baa69e430b98d2086fd96505908aba2ff98b..db1866f2ffcf6e3ed505ce5c1c5a4f11e6a908bd 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -256,8 +256,12 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags, } spin_unlock_irqrestore(&queue->lock, cpu_flags); - } while (sk_can_busy_loop(sk) && - sk_busy_loop(sk, flags & MSG_DONTWAIT)); + + if (!sk_can_busy_loop(sk)) + break; + + sk_busy_loop(sk, flags & MSG_DONTWAIT); + } while (!skb_queue_empty(&sk->sk_receive_queue)); error = -EAGAIN; diff --git a/net/core/dev.c b/net/core/dev.c index c57878be09d2e9bc33fb926db2a8eb32f6981726..d07aa5ffb511e408b166624d424130c5105560ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -95,6 +95,7 @@ #include #include #include +#include #include #include #include @@ -2975,6 +2976,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device __skb_linearize(skb)) goto out_kfree_skb; + if (validate_xmit_xfrm(skb, features)) + goto out_kfree_skb; + /* If packet is not checksummed and device does not * support checksumming for this protocol, complete * checksumming here. @@ -3444,6 +3448,7 @@ EXPORT_SYMBOL(netdev_max_backlog); int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; +unsigned int __read_mostly netdev_budget_usecs = 2000; int weight_p __read_mostly = 64; /* old backlog weight */ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ @@ -4250,6 +4255,125 @@ static int __netif_receive_skb(struct sk_buff *skb) return ret; } +static struct static_key generic_xdp_needed __read_mostly; + +static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) +{ + struct bpf_prog *new = xdp->prog; + int ret = 0; + + switch (xdp->command) { + case XDP_SETUP_PROG: { + struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); + + rcu_assign_pointer(dev->xdp_prog, new); + if (old) + bpf_prog_put(old); + + if (old && !new) { + static_key_slow_dec(&generic_xdp_needed); + } else if (new && !old) { + static_key_slow_inc(&generic_xdp_needed); + dev_disable_lro(dev); + } + break; + } + + case XDP_QUERY_PROG: + xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u32 netif_receive_generic_xdp(struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + struct xdp_buff xdp; + u32 act = XDP_DROP; + void *orig_data; + int hlen, off; + u32 mac_len; + + /* Reinjected packets coming from act_mirred or similar should + * not get XDP generic processing. + */ + if (skb_cloned(skb)) + return XDP_PASS; + + if (skb_linearize(skb)) + goto do_drop; + + /* The XDP program wants to see the packet starting at the MAC + * header. + */ + mac_len = skb->data - skb_mac_header(skb); + hlen = skb_headlen(skb) + mac_len; + xdp.data = skb->data - mac_len; + xdp.data_end = xdp.data + hlen; + xdp.data_hard_start = skb->data - skb_headroom(skb); + orig_data = xdp.data; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + off = xdp.data - orig_data; + if (off > 0) + __skb_pull(skb, off); + else if (off < 0) + __skb_push(skb, -off); + + switch (act) { + case XDP_TX: + __skb_push(skb, mac_len); + /* fall through */ + case XDP_PASS: + break; + + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(skb->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + do_drop: + kfree_skb(skb); + break; + } + + return act; +} + +/* When doing generic XDP we have to bypass the qdisc layer and the + * network taps in order to match in-driver-XDP behavior. + */ +static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) +{ + struct net_device *dev = skb->dev; + struct netdev_queue *txq; + bool free_skb = true; + int cpu, rc; + + txq = netdev_pick_tx(dev, skb, NULL); + cpu = smp_processor_id(); + HARD_TX_LOCK(dev, txq, cpu); + if (!netif_xmit_stopped(txq)) { + rc = netdev_start_xmit(skb, dev, txq, 0); + if (dev_xmit_complete(rc)) + free_skb = false; + } + HARD_TX_UNLOCK(dev, txq); + if (free_skb) { + trace_xdp_exception(dev, xdp_prog, XDP_TX); + kfree_skb(skb); + } +} + static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; @@ -4261,6 +4385,21 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); + if (static_key_false(&generic_xdp_needed)) { + struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); + + if (xdp_prog) { + u32 act = netif_receive_generic_xdp(skb, xdp_prog); + + if (act != XDP_PASS) { + rcu_read_unlock(); + if (act == XDP_TX) + generic_xdp_tx(skb, xdp_prog); + return NET_RX_DROP; + } + } + } + #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; @@ -4493,7 +4632,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff enum gro_result ret; int grow; - if (!(skb->dev->features & NETIF_F_GRO)) + if (netif_elide_gro(skb->dev)) goto normal; if (skb->csum_bad) @@ -5063,27 +5202,28 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) do_softirq(); } -bool sk_busy_loop(struct sock *sk, int nonblock) +void napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), + void *loop_end_arg) { - unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; + unsigned long start_time = loop_end ? busy_loop_current_time() : 0; int (*napi_poll)(struct napi_struct *napi, int budget); void *have_poll_lock = NULL; struct napi_struct *napi; - int rc; restart: - rc = false; napi_poll = NULL; rcu_read_lock(); - napi = napi_by_id(sk->sk_napi_id); + napi = napi_by_id(napi_id); if (!napi) goto out; preempt_disable(); for (;;) { - rc = 0; + int work = 0; + local_bh_disable(); if (!napi_poll) { unsigned long val = READ_ONCE(napi->state); @@ -5101,16 +5241,15 @@ bool sk_busy_loop(struct sock *sk, int nonblock) have_poll_lock = netpoll_poll_lock(napi); napi_poll = napi->poll; } - rc = napi_poll(napi, BUSY_POLL_BUDGET); - trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); + work = napi_poll(napi, BUSY_POLL_BUDGET); + trace_napi_poll(napi, work, BUSY_POLL_BUDGET); count: - if (rc > 0) - __NET_ADD_STATS(sock_net(sk), - LINUX_MIB_BUSYPOLLRXPACKETS, rc); + if (work > 0) + __NET_ADD_STATS(dev_net(napi->dev), + LINUX_MIB_BUSYPOLLRXPACKETS, work); local_bh_enable(); - if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || - busy_loop_timeout(end_time)) + if (!loop_end || loop_end(loop_end_arg, start_time)) break; if (unlikely(need_resched())) { @@ -5119,9 +5258,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock) preempt_enable(); rcu_read_unlock(); cond_resched(); - rc = !skb_queue_empty(&sk->sk_receive_queue); - if (rc || busy_loop_timeout(end_time)) - return rc; + if (loop_end(loop_end_arg, start_time)) + return; goto restart; } cpu_relax(); @@ -5129,12 +5267,10 @@ bool sk_busy_loop(struct sock *sk, int nonblock) if (napi_poll) busy_poll_stop(napi, have_poll_lock); preempt_enable(); - rc = !skb_queue_empty(&sk->sk_receive_queue); out: rcu_read_unlock(); - return rc; } -EXPORT_SYMBOL(sk_busy_loop); +EXPORT_SYMBOL(napi_busy_loop); #endif /* CONFIG_NET_RX_BUSY_POLL */ @@ -5146,10 +5282,10 @@ static void napi_hash_add(struct napi_struct *napi) spin_lock(&napi_hash_lock); - /* 0..NR_CPUS+1 range is reserved for sender_cpu use */ + /* 0..NR_CPUS range is reserved for sender_cpu use */ do { - if (unlikely(++napi_gen_id < NR_CPUS + 1)) - napi_gen_id = NR_CPUS + 1; + if (unlikely(++napi_gen_id < MIN_NAPI_ID)) + napi_gen_id = MIN_NAPI_ID; } while (napi_by_id(napi_gen_id)); napi->napi_id = napi_gen_id; @@ -5313,7 +5449,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); - unsigned long time_limit = jiffies + 2; + unsigned long time_limit = jiffies + + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; LIST_HEAD(list); LIST_HEAD(repoll); @@ -6717,13 +6854,16 @@ EXPORT_SYMBOL(dev_change_proto_down); /** * dev_change_xdp_fd - set or clear a bpf program for a device rx path * @dev: device + * @extack: netlink extended ack * @fd: new program fd or negative value to clear * @flags: xdp-related flags * * Set or clear a bpf program for a device */ -int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) +int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, + int fd, u32 flags) { + int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp); const struct net_device_ops *ops = dev->netdev_ops; struct bpf_prog *prog = NULL; struct netdev_xdp xdp; @@ -6731,14 +6871,16 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) ASSERT_RTNL(); - if (!ops->ndo_xdp) - return -EOPNOTSUPP; + xdp_op = ops->ndo_xdp; + if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) + xdp_op = generic_xdp_install; + if (fd >= 0) { if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { memset(&xdp, 0, sizeof(xdp)); xdp.command = XDP_QUERY_PROG; - err = ops->ndo_xdp(dev, &xdp); + err = xdp_op(dev, &xdp); if (err < 0) return err; if (xdp.prog_attached) @@ -6752,9 +6894,10 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) memset(&xdp, 0, sizeof(xdp)); xdp.command = XDP_SETUP_PROG; + xdp.extack = extack; xdp.prog = prog; - err = ops->ndo_xdp(dev, &xdp); + err = xdp_op(dev, &xdp); if (err < 0 && prog) bpf_prog_put(prog); @@ -7105,13 +7248,10 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev, else netif_dormant_off(dev); - if (netif_carrier_ok(rootdev)) { - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } else { - if (netif_carrier_ok(dev)) - netif_carrier_off(dev); - } + if (netif_carrier_ok(rootdev)) + netif_carrier_on(dev); + else + netif_carrier_off(dev); } EXPORT_SYMBOL(netif_stacked_transfer_operstate); @@ -7794,6 +7934,7 @@ EXPORT_SYMBOL(alloc_netdev_mqs); void free_netdev(struct net_device *dev) { struct napi_struct *p, *n; + struct bpf_prog *prog; might_sleep(); netif_free_tx_queues(dev); @@ -7812,6 +7953,12 @@ void free_netdev(struct net_device *dev) free_percpu(dev->pcpu_refcnt); dev->pcpu_refcnt = NULL; + prog = rcu_dereference_protected(dev->xdp_prog, 1); + if (prog) { + bpf_prog_put(prog); + static_key_slow_dec(&generic_xdp_needed); + } + /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { netdev_freemem(dev); diff --git a/net/core/devlink.c b/net/core/devlink.c index e9c1e6acfb6d196d4373dcc36bcf76577952dd32..b0b87a292e7ccac2221a2425510b3c28e1df97f7 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1397,10 +1397,10 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; + u8 inline_mode, encap_mode; void *hdr; int err = 0; u16 mode; - u8 inline_mode; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) @@ -1429,6 +1429,15 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, goto nla_put_failure; } + if (ops->eswitch_encap_mode_get) { + err = ops->eswitch_encap_mode_get(devlink, &encap_mode); + if (err) + goto nla_put_failure; + err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode); + if (err) + goto nla_put_failure; + } + genlmsg_end(msg, hdr); return 0; @@ -1468,9 +1477,9 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; - u16 mode; - u8 inline_mode; + u8 inline_mode, encap_mode; int err = 0; + u16 mode; if (!ops) return -EOPNOTSUPP; @@ -1494,7 +1503,695 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, return err; } + if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) { + if (!ops->eswitch_encap_mode_set) + return -EOPNOTSUPP; + encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]); + err = ops->eswitch_encap_mode_set(devlink, encap_mode); + if (err) + return err; + } + + return 0; +} + +int devlink_dpipe_match_put(struct sk_buff *skb, + struct devlink_dpipe_match *match) +{ + struct devlink_dpipe_header *header = match->header; + struct devlink_dpipe_field *field = &header->fields[match->field_id]; + struct nlattr *match_attr; + + match_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_MATCH); + if (!match_attr) + return -EMSGSIZE; + + if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) || + nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global)) + goto nla_put_failure; + + nla_nest_end(skb, match_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, match_attr); + return -EMSGSIZE; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_match_put); + +static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table, + struct sk_buff *skb) +{ + struct nlattr *matches_attr; + + matches_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_MATCHES); + if (!matches_attr) + return -EMSGSIZE; + + if (table->table_ops->matches_dump(table->priv, skb)) + goto nla_put_failure; + + nla_nest_end(skb, matches_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, matches_attr); + return -EMSGSIZE; +} + +int devlink_dpipe_action_put(struct sk_buff *skb, + struct devlink_dpipe_action *action) +{ + struct devlink_dpipe_header *header = action->header; + struct devlink_dpipe_field *field = &header->fields[action->field_id]; + struct nlattr *action_attr; + + action_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ACTION); + if (!action_attr) + return -EMSGSIZE; + + if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) || + nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global)) + goto nla_put_failure; + + nla_nest_end(skb, action_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, action_attr); + return -EMSGSIZE; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_action_put); + +static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table, + struct sk_buff *skb) +{ + struct nlattr *actions_attr; + + actions_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE_ACTIONS); + if (!actions_attr) + return -EMSGSIZE; + + if (table->table_ops->actions_dump(table->priv, skb)) + goto nla_put_failure; + + nla_nest_end(skb, actions_attr); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, actions_attr); + return -EMSGSIZE; +} + +static int devlink_dpipe_table_put(struct sk_buff *skb, + struct devlink_dpipe_table *table) +{ + struct nlattr *table_attr; + + table_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLE); + if (!table_attr) + return -EMSGSIZE; + + if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) || + nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table->size, + DEVLINK_ATTR_PAD)) + goto nla_put_failure; + if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED, + table->counters_enabled)) + goto nla_put_failure; + + if (devlink_dpipe_matches_put(table, skb)) + goto nla_put_failure; + + if (devlink_dpipe_actions_put(table, skb)) + goto nla_put_failure; + + nla_nest_end(skb, table_attr); return 0; + +nla_put_failure: + nla_nest_cancel(skb, table_attr); + return -EMSGSIZE; +} + +static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb, + struct genl_info *info) +{ + int err; + + if (*pskb) { + err = genlmsg_reply(*pskb, info); + if (err) + return err; + } + *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!*pskb) + return -ENOMEM; + return 0; +} + +static int devlink_dpipe_tables_fill(struct genl_info *info, + enum devlink_command cmd, int flags, + struct list_head *dpipe_tables, + const char *table_name) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_dpipe_table *table; + struct nlattr *tables_attr; + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + bool incomplete; + void *hdr; + int i; + int err; + + table = list_first_entry(dpipe_tables, + struct devlink_dpipe_table, list); +start_again: + err = devlink_dpipe_send_and_alloc_skb(&skb, info); + if (err) + return err; + + hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, + &devlink_nl_family, NLM_F_MULTI, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(skb, devlink)) + goto nla_put_failure; + tables_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_TABLES); + if (!tables_attr) + goto nla_put_failure; + + i = 0; + incomplete = false; + list_for_each_entry_from(table, dpipe_tables, list) { + if (!table_name) { + err = devlink_dpipe_table_put(skb, table); + if (err) { + if (!i) + goto err_table_put; + incomplete = true; + break; + } + } else { + if (!strcmp(table->name, table_name)) { + err = devlink_dpipe_table_put(skb, table); + if (err) + break; + } + } + i++; + } + + nla_nest_end(skb, tables_attr); + genlmsg_end(skb, hdr); + if (incomplete) + goto start_again; + +send_done: + nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq, + NLMSG_DONE, 0, flags | NLM_F_MULTI); + if (!nlh) { + err = devlink_dpipe_send_and_alloc_skb(&skb, info); + if (err) + goto err_skb_send_alloc; + goto send_done; + } + + return genlmsg_reply(skb, info); + +nla_put_failure: + err = -EMSGSIZE; +err_table_put: +err_skb_send_alloc: + genlmsg_cancel(skb, hdr); + nlmsg_free(skb); + return err; +} + +static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + const char *table_name = NULL; + + if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]) + table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]); + + return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0, + &devlink->dpipe_table_list, + table_name); +} + +static int devlink_dpipe_value_put(struct sk_buff *skb, + struct devlink_dpipe_value *value) +{ + if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE, + value->value_size, value->value)) + return -EMSGSIZE; + if (value->mask) + if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK, + value->value_size, value->mask)) + return -EMSGSIZE; + if (value->mapping_valid) + if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING, + value->mapping_value)) + return -EMSGSIZE; + return 0; +} + +static int devlink_dpipe_action_value_put(struct sk_buff *skb, + struct devlink_dpipe_value *value) +{ + if (!value->action) + return -EINVAL; + if (devlink_dpipe_action_put(skb, value->action)) + return -EMSGSIZE; + if (devlink_dpipe_value_put(skb, value)) + return -EMSGSIZE; + return 0; +} + +static int devlink_dpipe_action_values_put(struct sk_buff *skb, + struct devlink_dpipe_value *values, + unsigned int values_count) +{ + struct nlattr *action_attr; + int i; + int err; + + for (i = 0; i < values_count; i++) { + action_attr = nla_nest_start(skb, + DEVLINK_ATTR_DPIPE_ACTION_VALUE); + if (!action_attr) + return -EMSGSIZE; + err = devlink_dpipe_action_value_put(skb, &values[i]); + if (err) + goto err_action_value_put; + nla_nest_end(skb, action_attr); + } + return 0; + +err_action_value_put: + nla_nest_cancel(skb, action_attr); + return err; +} + +static int devlink_dpipe_match_value_put(struct sk_buff *skb, + struct devlink_dpipe_value *value) +{ + if (!value->match) + return -EINVAL; + if (devlink_dpipe_match_put(skb, value->match)) + return -EMSGSIZE; + if (devlink_dpipe_value_put(skb, value)) + return -EMSGSIZE; + return 0; +} + +static int devlink_dpipe_match_values_put(struct sk_buff *skb, + struct devlink_dpipe_value *values, + unsigned int values_count) +{ + struct nlattr *match_attr; + int i; + int err; + + for (i = 0; i < values_count; i++) { + match_attr = nla_nest_start(skb, + DEVLINK_ATTR_DPIPE_MATCH_VALUE); + if (!match_attr) + return -EMSGSIZE; + err = devlink_dpipe_match_value_put(skb, &values[i]); + if (err) + goto err_match_value_put; + nla_nest_end(skb, match_attr); + } + return 0; + +err_match_value_put: + nla_nest_cancel(skb, match_attr); + return err; +} + +static int devlink_dpipe_entry_put(struct sk_buff *skb, + struct devlink_dpipe_entry *entry) +{ + struct nlattr *entry_attr, *matches_attr, *actions_attr; + int err; + + entry_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_ENTRY); + if (!entry_attr) + return -EMSGSIZE; + + if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index, + DEVLINK_ATTR_PAD)) + goto nla_put_failure; + if (entry->counter_valid) + if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER, + entry->counter, DEVLINK_ATTR_PAD)) + goto nla_put_failure; + + matches_attr = nla_nest_start(skb, + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES); + if (!matches_attr) + goto nla_put_failure; + + err = devlink_dpipe_match_values_put(skb, entry->match_values, + entry->match_values_count); + if (err) { + nla_nest_cancel(skb, matches_attr); + goto err_match_values_put; + } + nla_nest_end(skb, matches_attr); + + actions_attr = nla_nest_start(skb, + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES); + if (!actions_attr) + goto nla_put_failure; + + err = devlink_dpipe_action_values_put(skb, entry->action_values, + entry->action_values_count); + if (err) { + nla_nest_cancel(skb, actions_attr); + goto err_action_values_put; + } + nla_nest_end(skb, actions_attr); + + nla_nest_end(skb, entry_attr); + return 0; + +nla_put_failure: + err = -EMSGSIZE; +err_match_values_put: +err_action_values_put: + nla_nest_cancel(skb, entry_attr); + return err; +} + +static struct devlink_dpipe_table * +devlink_dpipe_table_find(struct list_head *dpipe_tables, + const char *table_name) +{ + struct devlink_dpipe_table *table; + + list_for_each_entry_rcu(table, dpipe_tables, list) { + if (!strcmp(table->name, table_name)) + return table; + } + return NULL; +} + +int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct devlink *devlink; + int err; + + err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb, + dump_ctx->info); + if (err) + return err; + + dump_ctx->hdr = genlmsg_put(dump_ctx->skb, + dump_ctx->info->snd_portid, + dump_ctx->info->snd_seq, + &devlink_nl_family, NLM_F_MULTI, + dump_ctx->cmd); + if (!dump_ctx->hdr) + goto nla_put_failure; + + devlink = dump_ctx->info->user_ptr[0]; + if (devlink_nl_put_handle(dump_ctx->skb, devlink)) + goto nla_put_failure; + dump_ctx->nest = nla_nest_start(dump_ctx->skb, + DEVLINK_ATTR_DPIPE_ENTRIES); + if (!dump_ctx->nest) + goto nla_put_failure; + return 0; + +nla_put_failure: + genlmsg_cancel(dump_ctx->skb, dump_ctx->hdr); + nlmsg_free(dump_ctx->skb); + return -EMSGSIZE; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare); + +int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx, + struct devlink_dpipe_entry *entry) +{ + return devlink_dpipe_entry_put(dump_ctx->skb, entry); +} +EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append); + +int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx) +{ + nla_nest_end(dump_ctx->skb, dump_ctx->nest); + genlmsg_end(dump_ctx->skb, dump_ctx->hdr); + return 0; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close); + +static int devlink_dpipe_entries_fill(struct genl_info *info, + enum devlink_command cmd, int flags, + struct devlink_dpipe_table *table) +{ + struct devlink_dpipe_dump_ctx dump_ctx; + struct nlmsghdr *nlh; + int err; + + dump_ctx.skb = NULL; + dump_ctx.cmd = cmd; + dump_ctx.info = info; + + err = table->table_ops->entries_dump(table->priv, + table->counters_enabled, + &dump_ctx); + if (err) + goto err_entries_dump; + +send_done: + nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, + NLMSG_DONE, 0, flags | NLM_F_MULTI); + if (!nlh) { + err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); + if (err) + goto err_skb_send_alloc; + goto send_done; + } + return genlmsg_reply(dump_ctx.skb, info); + +err_entries_dump: +err_skb_send_alloc: + genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr); + nlmsg_free(dump_ctx.skb); + return err; +} + +static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + struct devlink_dpipe_table *table; + const char *table_name; + + if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]) + return -EINVAL; + + table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]); + table = devlink_dpipe_table_find(&devlink->dpipe_table_list, + table_name); + if (!table) + return -EINVAL; + + if (!table->table_ops->entries_dump) + return -EINVAL; + + return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET, + 0, table); +} + +static int devlink_dpipe_fields_put(struct sk_buff *skb, + const struct devlink_dpipe_header *header) +{ + struct devlink_dpipe_field *field; + struct nlattr *field_attr; + int i; + + for (i = 0; i < header->fields_count; i++) { + field = &header->fields[i]; + field_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_FIELD); + if (!field_attr) + return -EMSGSIZE; + if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type)) + goto nla_put_failure; + nla_nest_end(skb, field_attr); + } + return 0; + +nla_put_failure: + nla_nest_cancel(skb, field_attr); + return -EMSGSIZE; +} + +static int devlink_dpipe_header_put(struct sk_buff *skb, + struct devlink_dpipe_header *header) +{ + struct nlattr *fields_attr, *header_attr; + int err; + + header_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER); + if (!header_attr) + return -EMSGSIZE; + + if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) || + nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) || + nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global)) + goto nla_put_failure; + + fields_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADER_FIELDS); + if (!fields_attr) + goto nla_put_failure; + + err = devlink_dpipe_fields_put(skb, header); + if (err) { + nla_nest_cancel(skb, fields_attr); + goto nla_put_failure; + } + nla_nest_end(skb, fields_attr); + nla_nest_end(skb, header_attr); + return 0; + +nla_put_failure: + err = -EMSGSIZE; + nla_nest_cancel(skb, header_attr); + return err; +} + +static int devlink_dpipe_headers_fill(struct genl_info *info, + enum devlink_command cmd, int flags, + struct devlink_dpipe_headers * + dpipe_headers) +{ + struct devlink *devlink = info->user_ptr[0]; + struct nlattr *headers_attr; + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + void *hdr; + int i, j; + int err; + + i = 0; +start_again: + err = devlink_dpipe_send_and_alloc_skb(&skb, info); + if (err) + return err; + + hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, + &devlink_nl_family, NLM_F_MULTI, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(skb, devlink)) + goto nla_put_failure; + headers_attr = nla_nest_start(skb, DEVLINK_ATTR_DPIPE_HEADERS); + if (!headers_attr) + goto nla_put_failure; + + j = 0; + for (; i < dpipe_headers->headers_count; i++) { + err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]); + if (err) { + if (!j) + goto err_table_put; + break; + } + j++; + } + nla_nest_end(skb, headers_attr); + genlmsg_end(skb, hdr); + if (i != dpipe_headers->headers_count) + goto start_again; + +send_done: + nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq, + NLMSG_DONE, 0, flags | NLM_F_MULTI); + if (!nlh) { + err = devlink_dpipe_send_and_alloc_skb(&skb, info); + if (err) + goto err_skb_send_alloc; + goto send_done; + } + return genlmsg_reply(skb, info); + +nla_put_failure: + err = -EMSGSIZE; +err_table_put: +err_skb_send_alloc: + genlmsg_cancel(skb, hdr); + nlmsg_free(skb); + return err; +} + +static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + + if (!devlink->dpipe_headers) + return -EOPNOTSUPP; + return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET, + 0, devlink->dpipe_headers); +} + +static int devlink_dpipe_table_counters_set(struct devlink *devlink, + const char *table_name, + bool enable) +{ + struct devlink_dpipe_table *table; + + table = devlink_dpipe_table_find(&devlink->dpipe_table_list, + table_name); + if (!table) + return -EINVAL; + + if (table->counter_control_extern) + return -EOPNOTSUPP; + + if (!(table->counters_enabled ^ enable)) + return 0; + + table->counters_enabled = enable; + if (table->table_ops->counters_set_update) + table->table_ops->counters_set_update(table->priv, enable); + return 0; +} + +static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb, + struct genl_info *info) +{ + struct devlink *devlink = info->user_ptr[0]; + const char *table_name; + bool counters_enable; + + if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME] || + !info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]) + return -EINVAL; + + table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]); + counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]); + + return devlink_dpipe_table_counters_set(devlink, table_name, + counters_enable); } static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { @@ -1512,6 +2209,9 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 }, [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 }, [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 }, + [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -1644,6 +2344,34 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_DPIPE_TABLE_GET, + .doit = devlink_nl_cmd_dpipe_table_get, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET, + .doit = devlink_nl_cmd_dpipe_entries_get, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET, + .doit = devlink_nl_cmd_dpipe_headers_get, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET, + .doit = devlink_nl_cmd_dpipe_table_counters_set, + .policy = devlink_nl_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { @@ -1680,6 +2408,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) devlink_net_set(devlink, &init_net); INIT_LIST_HEAD(&devlink->port_list); INIT_LIST_HEAD(&devlink->sb_list); + INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); return devlink; } EXPORT_SYMBOL_GPL(devlink_alloc); @@ -1880,6 +2609,133 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index) } EXPORT_SYMBOL_GPL(devlink_sb_unregister); +/** + * devlink_dpipe_headers_register - register dpipe headers + * + * @devlink: devlink + * @dpipe_headers: dpipe header array + * + * Register the headers supported by hardware. + */ +int devlink_dpipe_headers_register(struct devlink *devlink, + struct devlink_dpipe_headers *dpipe_headers) +{ + mutex_lock(&devlink_mutex); + devlink->dpipe_headers = dpipe_headers; + mutex_unlock(&devlink_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register); + +/** + * devlink_dpipe_headers_unregister - unregister dpipe headers + * + * @devlink: devlink + * + * Unregister the headers supported by hardware. + */ +void devlink_dpipe_headers_unregister(struct devlink *devlink) +{ + mutex_lock(&devlink_mutex); + devlink->dpipe_headers = NULL; + mutex_unlock(&devlink_mutex); +} +EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister); + +/** + * devlink_dpipe_table_counter_enabled - check if counter allocation + * required + * @devlink: devlink + * @table_name: tables name + * + * Used by driver to check if counter allocation is required. + * After counter allocation is turned on the table entries + * are updated to include counter statistics. + * + * After that point on the driver must respect the counter + * state so that each entry added to the table is added + * with a counter. + */ +bool devlink_dpipe_table_counter_enabled(struct devlink *devlink, + const char *table_name) +{ + struct devlink_dpipe_table *table; + bool enabled; + + rcu_read_lock(); + table = devlink_dpipe_table_find(&devlink->dpipe_table_list, + table_name); + enabled = false; + if (table) + enabled = table->counters_enabled; + rcu_read_unlock(); + return enabled; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled); + +/** + * devlink_dpipe_table_register - register dpipe table + * + * @devlink: devlink + * @table_name: table name + * @table_ops: table ops + * @priv: priv + * @size: size + * @counter_control_extern: external control for counters + */ +int devlink_dpipe_table_register(struct devlink *devlink, + const char *table_name, + struct devlink_dpipe_table_ops *table_ops, + void *priv, u64 size, + bool counter_control_extern) +{ + struct devlink_dpipe_table *table; + + if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name)) + return -EEXIST; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + table->name = table_name; + table->table_ops = table_ops; + table->priv = priv; + table->size = size; + table->counter_control_extern = counter_control_extern; + + mutex_lock(&devlink_mutex); + list_add_tail_rcu(&table->list, &devlink->dpipe_table_list); + mutex_unlock(&devlink_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(devlink_dpipe_table_register); + +/** + * devlink_dpipe_table_unregister - unregister dpipe table + * + * @devlink: devlink + * @table_name: table name + */ +void devlink_dpipe_table_unregister(struct devlink *devlink, + const char *table_name) +{ + struct devlink_dpipe_table *table; + + mutex_lock(&devlink_mutex); + table = devlink_dpipe_table_find(&devlink->dpipe_table_list, + table_name); + if (!table) + goto unlock; + list_del_rcu(&table->list); + mutex_unlock(&devlink_mutex); + kfree_rcu(table, rcu); + return; +unlock: + mutex_unlock(&devlink_mutex); +} +EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister); + static int __init devlink_module_init(void) { return genl_register_family(&devlink_nl_family); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index fb55327dcfeabdaf3eeecc3a8d176ae215612649..70ccda233bd1f1aab18535e6d9d0419bb9a1a23b 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -412,9 +412,8 @@ static int __init init_net_drop_monitor(void) for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); INIT_WORK(&data->dm_alert_work, send_dm_alert); - init_timer(&data->send_timer); - data->send_timer.data = (unsigned long)data; - data->send_timer.function = sched_send_work; + setup_timer(&data->send_timer, sched_send_work, + (unsigned long)data); spin_lock_init(&data->lock); reset_per_cpu_data(data); } diff --git a/net/core/ethtool.c b/net/core/ethtool.c index aecb2c7241b697e79628fdb79467f5087b2bbf9f..03111a2d6653b8f06427540eb5864b255af4ead3 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -90,6 +90,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", + [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", @@ -103,12 +104,15 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_RXALL_BIT] = "rx-all", [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", [NETIF_F_HW_TC_BIT] = "hw-tc-offload", + [NETIF_F_HW_ESP_BIT] = "esp-hw-offload", + [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", }; static const char rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = { [ETH_RSS_HASH_TOP_BIT] = "toeplitz", [ETH_RSS_HASH_XOR_BIT] = "xor", + [ETH_RSS_HASH_CRC32_BIT] = "crc32", }; static const char diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index b6791d94841d56cf8b1027d3ba2d71dd21302caf..f21c4d3aeae0cf59a8d9239cf1e581e6d136c740 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -23,6 +23,20 @@ static const struct fib_kuid_range fib_kuid_range_unset = { KUIDT_INIT(~0), }; +bool fib_rule_matchall(const struct fib_rule *rule) +{ + if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id || + rule->flags) + return false; + if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) + return false; + if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || + !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) + return false; + return true; +} +EXPORT_SYMBOL_GPL(fib_rule_matchall); + int fib_default_rule_add(struct fib_rules_ops *ops, u32 pref, u32 table, u32 flags) { @@ -354,7 +368,8 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, return 0; } -int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) +int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); @@ -372,7 +387,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) goto errout; } - err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); + err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack); if (err < 0) goto errout; @@ -425,6 +440,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) if (tb[FRA_TUN_ID]) rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); + err = -EINVAL; if (tb[FRA_L3MDEV]) { #ifdef CONFIG_NET_L3_MASTER_DEV rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]); @@ -446,7 +462,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) else rule->suppress_ifgroup = -1; - err = -EINVAL; if (tb[FRA_GOTO]) { if (rule->action != FR_ACT_GOTO) goto errout_free; @@ -547,7 +562,8 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) } EXPORT_SYMBOL_GPL(fib_nl_newrule); -int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) +int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); @@ -566,7 +582,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) goto errout; } - err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); + err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack); if (err < 0) goto errout; @@ -576,8 +592,10 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) if (tb[FRA_UID_RANGE]) { range = nla_get_kuid_range(tb); - if (!uid_range_set(&range)) + if (!uid_range_set(&range)) { + err = -EINVAL; goto errout; + } } else { range = fib_kuid_range_unset; } diff --git a/net/core/filter.c b/net/core/filter.c index ebaeaf2e46e8bd0171379604930d232f205afd07..a253a6197e6b37a7ae2fe451c646b01c861a3e22 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -91,7 +93,12 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { - unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); + struct sock *save_sk = skb->sk; + unsigned int pkt_len; + + skb->sk = sk; + pkt_len = bpf_prog_run_save_cb(filter->prog, skb); + skb->sk = save_sk; err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock(); @@ -348,7 +355,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, * @new_prog: buffer where converted program will be stored * @new_len: pointer to store length of converted program * - * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style. + * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' + * style extended BPF (eBPF). * Conversion workflow: * * 1) First pass for calculating the new program length: @@ -928,7 +936,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu) */ static void sk_filter_release(struct sk_filter *fp) { - if (atomic_dec_and_test(&fp->refcnt)) + if (refcount_dec_and_test(&fp->refcnt)) call_rcu(&fp->rcu, sk_filter_release_rcu); } @@ -943,20 +951,27 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) /* try to charge the socket memory if there is space available * return true on success */ -bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) +static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); /* same check as in sock_kmalloc() */ if (filter_size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { - atomic_inc(&fp->refcnt); atomic_add(filter_size, &sk->sk_omem_alloc); return true; } return false; } +bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) +{ + bool ret = __sk_filter_charge(sk, fp); + if (ret) + refcount_inc(&fp->refcnt); + return ret; +} + static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) { struct sock_filter *old_prog; @@ -1179,12 +1194,12 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) return -ENOMEM; fp->prog = prog; - atomic_set(&fp->refcnt, 0); - if (!sk_filter_charge(sk, fp)) { + if (!__sk_filter_charge(sk, fp)) { kfree(fp); return -ENOMEM; } + refcount_set(&fp->refcnt, 1); old_fp = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); @@ -2599,6 +2614,36 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg5_type = ARG_CONST_SIZE, }; +BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) +{ + return skb->sk ? sock_gen_cookie(skb->sk) : 0; +} + +static const struct bpf_func_proto bpf_get_socket_cookie_proto = { + .func = bpf_get_socket_cookie, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) +{ + struct sock *sk = sk_to_full_sk(skb->sk); + kuid_t kuid; + + if (!sk || !sk_fullsock(sk)) + return overflowuid; + kuid = sock_net_uid(sock_net(sk), sk); + return from_kuid_munged(sock_net(sk)->user_ns, kuid); +} + +static const struct bpf_func_proto bpf_get_socket_uid_proto = { + .func = bpf_get_socket_uid, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + static const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) { @@ -2633,6 +2678,10 @@ sk_filter_func_proto(enum bpf_func_id func_id) switch (func_id) { case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_proto; + case BPF_FUNC_get_socket_uid: + return &bpf_get_socket_uid_proto; default: return bpf_base_func_proto(func_id); } @@ -2692,6 +2741,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_get_smp_processor_id_proto; case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_proto; + case BPF_FUNC_get_socket_uid: + return &bpf_get_socket_uid_proto; default: return bpf_base_func_proto(func_id); } @@ -2715,12 +2768,7 @@ xdp_func_proto(enum bpf_func_id func_id) static const struct bpf_func_proto * cg_skb_func_proto(enum bpf_func_id func_id) { - switch (func_id) { - case BPF_FUNC_skb_load_bytes: - return &bpf_skb_load_bytes_proto; - default: - return bpf_base_func_proto(func_id); - } + return sk_filter_func_proto(func_id); } static const struct bpf_func_proto * @@ -3154,6 +3202,19 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); else *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); +#endif + break; + + case offsetof(struct __sk_buff, napi_id): +#if defined(CONFIG_NET_RX_BUSY_POLL) + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, napi_id) != 4); + + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + offsetof(struct sk_buff, napi_id)); + *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); +#else + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; } @@ -3252,111 +3313,55 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static const struct bpf_verifier_ops sk_filter_ops = { +const struct bpf_verifier_ops sk_filter_prog_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; -static const struct bpf_verifier_ops tc_cls_act_ops = { +const struct bpf_verifier_ops tc_cls_act_prog_ops = { .get_func_proto = tc_cls_act_func_proto, .is_valid_access = tc_cls_act_is_valid_access, .convert_ctx_access = tc_cls_act_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, + .test_run = bpf_prog_test_run_skb, }; -static const struct bpf_verifier_ops xdp_ops = { +const struct bpf_verifier_ops xdp_prog_ops = { .get_func_proto = xdp_func_proto, .is_valid_access = xdp_is_valid_access, .convert_ctx_access = xdp_convert_ctx_access, + .test_run = bpf_prog_test_run_xdp, }; -static const struct bpf_verifier_ops cg_skb_ops = { +const struct bpf_verifier_ops cg_skb_prog_ops = { .get_func_proto = cg_skb_func_proto, .is_valid_access = sk_filter_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, + .test_run = bpf_prog_test_run_skb, }; -static const struct bpf_verifier_ops lwt_inout_ops = { +const struct bpf_verifier_ops lwt_inout_prog_ops = { .get_func_proto = lwt_inout_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, + .test_run = bpf_prog_test_run_skb, }; -static const struct bpf_verifier_ops lwt_xmit_ops = { +const struct bpf_verifier_ops lwt_xmit_prog_ops = { .get_func_proto = lwt_xmit_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, .gen_prologue = tc_cls_act_prologue, + .test_run = bpf_prog_test_run_skb, }; -static const struct bpf_verifier_ops cg_sock_ops = { +const struct bpf_verifier_ops cg_sock_prog_ops = { .get_func_proto = bpf_base_func_proto, .is_valid_access = sock_filter_is_valid_access, .convert_ctx_access = sock_filter_convert_ctx_access, }; -static struct bpf_prog_type_list sk_filter_type __ro_after_init = { - .ops = &sk_filter_ops, - .type = BPF_PROG_TYPE_SOCKET_FILTER, -}; - -static struct bpf_prog_type_list sched_cls_type __ro_after_init = { - .ops = &tc_cls_act_ops, - .type = BPF_PROG_TYPE_SCHED_CLS, -}; - -static struct bpf_prog_type_list sched_act_type __ro_after_init = { - .ops = &tc_cls_act_ops, - .type = BPF_PROG_TYPE_SCHED_ACT, -}; - -static struct bpf_prog_type_list xdp_type __ro_after_init = { - .ops = &xdp_ops, - .type = BPF_PROG_TYPE_XDP, -}; - -static struct bpf_prog_type_list cg_skb_type __ro_after_init = { - .ops = &cg_skb_ops, - .type = BPF_PROG_TYPE_CGROUP_SKB, -}; - -static struct bpf_prog_type_list lwt_in_type __ro_after_init = { - .ops = &lwt_inout_ops, - .type = BPF_PROG_TYPE_LWT_IN, -}; - -static struct bpf_prog_type_list lwt_out_type __ro_after_init = { - .ops = &lwt_inout_ops, - .type = BPF_PROG_TYPE_LWT_OUT, -}; - -static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = { - .ops = &lwt_xmit_ops, - .type = BPF_PROG_TYPE_LWT_XMIT, -}; - -static struct bpf_prog_type_list cg_sock_type __ro_after_init = { - .ops = &cg_sock_ops, - .type = BPF_PROG_TYPE_CGROUP_SOCK -}; - -static int __init register_sk_filter_ops(void) -{ - bpf_register_prog_type(&sk_filter_type); - bpf_register_prog_type(&sched_cls_type); - bpf_register_prog_type(&sched_act_type); - bpf_register_prog_type(&xdp_type); - bpf_register_prog_type(&cg_skb_type); - bpf_register_prog_type(&cg_sock_type); - bpf_register_prog_type(&lwt_in_type); - bpf_register_prog_type(&lwt_out_type); - bpf_register_prog_type(&lwt_xmit_type); - - return 0; -} -late_initcall(register_sk_filter_ops); - int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; diff --git a/net/core/flow.c b/net/core/flow.c index f765c11d8df567d704998185482c3d220280c148..f7f5d1932a2720767dd31f4033f196815ff08447 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -47,7 +47,7 @@ struct flow_flush_info { static struct kmem_cache *flow_cachep __read_mostly; -#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) +#define flow_cache_hash_size(cache) (1U << (cache)->hash_shift) #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) static void flow_cache_new_hashrnd(unsigned long arg) @@ -99,7 +99,8 @@ static void flow_cache_gc_task(struct work_struct *work) } static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, - int deleted, struct list_head *gc_list, + unsigned int deleted, + struct list_head *gc_list, struct netns_xfrm *xfrm) { if (deleted) { @@ -114,17 +115,18 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, static void __flow_cache_shrink(struct flow_cache *fc, struct flow_cache_percpu *fcp, - int shrink_to) + unsigned int shrink_to) { struct flow_cache_entry *fle; struct hlist_node *tmp; LIST_HEAD(gc_list); - int i, deleted = 0; + unsigned int deleted = 0; struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, flow_cache_global); + unsigned int i; for (i = 0; i < flow_cache_hash_size(fc); i++) { - int saved = 0; + unsigned int saved = 0; hlist_for_each_entry_safe(fle, tmp, &fcp->hash_table[i], u.hlist) { @@ -145,7 +147,7 @@ static void __flow_cache_shrink(struct flow_cache *fc, static void flow_cache_shrink(struct flow_cache *fc, struct flow_cache_percpu *fcp) { - int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); + unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); __flow_cache_shrink(fc, fcp, shrink_to); } @@ -161,7 +163,7 @@ static void flow_new_hash_rnd(struct flow_cache *fc, static u32 flow_hash_code(struct flow_cache *fc, struct flow_cache_percpu *fcp, const struct flowi *key, - size_t keysize) + unsigned int keysize) { const u32 *k = (const u32 *) key; const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); @@ -174,7 +176,7 @@ static u32 flow_hash_code(struct flow_cache *fc, * important assumptions that we can here, such as alignment. */ static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, - size_t keysize) + unsigned int keysize) { const flow_compare_t *k1, *k1_lim, *k2; @@ -199,7 +201,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, struct flow_cache_percpu *fcp; struct flow_cache_entry *fle, *tfle; struct flow_cache_object *flo; - size_t keysize; + unsigned int keysize; unsigned int hash; local_bh_disable(); @@ -295,9 +297,10 @@ static void flow_cache_flush_tasklet(unsigned long data) struct flow_cache_entry *fle; struct hlist_node *tmp; LIST_HEAD(gc_list); - int i, deleted = 0; + unsigned int deleted = 0; struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, flow_cache_global); + unsigned int i; fcp = this_cpu_ptr(fc->percpu); for (i = 0; i < flow_cache_hash_size(fc); i++) { @@ -327,7 +330,7 @@ static void flow_cache_flush_tasklet(unsigned long data) static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp; - int i; + unsigned int i; fcp = per_cpu_ptr(fc->percpu, cpu); for (i = 0; i < flow_cache_hash_size(fc); i++) @@ -402,12 +405,12 @@ void flow_cache_flush_deferred(struct net *net) static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); - size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); + unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); if (!fcp->hash_table) { fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); if (!fcp->hash_table) { - pr_err("NET: failed to allocate flow cache sz %zu\n", sz); + pr_err("NET: failed to allocate flow cache sz %u\n", sz); return -ENOMEM; } fcp->hash_rnd_recalc = 1; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d98d4998213da6103665d62d5a85613631236f19..28d94bce4df8eb046dd6a03103c9249b29e74cd8 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -113,6 +113,235 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, } EXPORT_SYMBOL(__skb_flow_get_ports); +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD, + FLOW_DISSECT_RET_OUT_BAD, + FLOW_DISSECT_RET_OUT_PROTO_AGAIN, +}; + +static enum flow_dissect_ret +__skb_flow_dissect_mpls(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, int nhoff, int hlen) +{ + struct flow_dissector_key_keyid *key_keyid; + struct mpls_label *hdr, _hdr[2]; + u32 entry, label; + + if (!dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY) && + !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) + return FLOW_DISSECT_RET_OUT_GOOD; + + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, + hlen, &_hdr); + if (!hdr) + return FLOW_DISSECT_RET_OUT_BAD; + + entry = ntohl(hdr[0].entry); + label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_dissector_key_mpls *key_mpls; + + key_mpls = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS, + target_container); + key_mpls->mpls_label = label; + key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK) + >> MPLS_LS_TTL_SHIFT; + key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK) + >> MPLS_LS_TC_SHIFT; + key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK) + >> MPLS_LS_S_SHIFT; + } + + if (label == MPLS_LABEL_ENTROPY) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, + target_container); + key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK); + } + return FLOW_DISSECT_RET_OUT_GOOD; +} + +static enum flow_dissect_ret +__skb_flow_dissect_arp(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, void *data, int nhoff, int hlen) +{ + struct flow_dissector_key_arp *key_arp; + struct { + unsigned char ar_sha[ETH_ALEN]; + unsigned char ar_sip[4]; + unsigned char ar_tha[ETH_ALEN]; + unsigned char ar_tip[4]; + } *arp_eth, _arp_eth; + const struct arphdr *arp; + struct arphdr _arp; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP)) + return FLOW_DISSECT_RET_OUT_GOOD; + + arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, + hlen, &_arp); + if (!arp) + return FLOW_DISSECT_RET_OUT_BAD; + + if (arp->ar_hrd != htons(ARPHRD_ETHER) || + arp->ar_pro != htons(ETH_P_IP) || + arp->ar_hln != ETH_ALEN || + arp->ar_pln != 4 || + (arp->ar_op != htons(ARPOP_REPLY) && + arp->ar_op != htons(ARPOP_REQUEST))) + return FLOW_DISSECT_RET_OUT_BAD; + + arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), + sizeof(_arp_eth), data, + hlen, &_arp_eth); + if (!arp_eth) + return FLOW_DISSECT_RET_OUT_BAD; + + key_arp = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_ARP, + target_container); + + memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip)); + memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip)); + + /* Only store the lower byte of the opcode; + * this covers ARPOP_REPLY and ARPOP_REQUEST. + */ + key_arp->op = ntohs(arp->ar_op) & 0xff; + + ether_addr_copy(key_arp->sha, arp_eth->ar_sha); + ether_addr_copy(key_arp->tha, arp_eth->ar_tha); + + return FLOW_DISSECT_RET_OUT_GOOD; +} + +static enum flow_dissect_ret +__skb_flow_dissect_gre(const struct sk_buff *skb, + struct flow_dissector_key_control *key_control, + struct flow_dissector *flow_dissector, + void *target_container, void *data, + __be16 *p_proto, int *p_nhoff, int *p_hlen, + unsigned int flags) +{ + struct flow_dissector_key_keyid *key_keyid; + struct gre_base_hdr *hdr, _hdr; + int offset = 0; + u16 gre_ver; + + hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), + data, *p_hlen, &_hdr); + if (!hdr) + return FLOW_DISSECT_RET_OUT_BAD; + + /* Only look inside GRE without routing */ + if (hdr->flags & GRE_ROUTING) + return FLOW_DISSECT_RET_OUT_GOOD; + + /* Only look inside GRE for version 0 and 1 */ + gre_ver = ntohs(hdr->flags & GRE_VERSION); + if (gre_ver > 1) + return FLOW_DISSECT_RET_OUT_GOOD; + + *p_proto = hdr->protocol; + if (gre_ver) { + /* Version1 must be PPTP, and check the flags */ + if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) + return FLOW_DISSECT_RET_OUT_GOOD; + } + + offset += sizeof(struct gre_base_hdr); + + if (hdr->flags & GRE_CSUM) + offset += sizeof(((struct gre_full_hdr *) 0)->csum) + + sizeof(((struct gre_full_hdr *) 0)->reserved1); + + if (hdr->flags & GRE_KEY) { + const __be32 *keyid; + __be32 _keyid; + + keyid = __skb_header_pointer(skb, *p_nhoff + offset, + sizeof(_keyid), + data, *p_hlen, &_keyid); + if (!keyid) + return FLOW_DISSECT_RET_OUT_BAD; + + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID)) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID, + target_container); + if (gre_ver == 0) + key_keyid->keyid = *keyid; + else + key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; + } + offset += sizeof(((struct gre_full_hdr *) 0)->key); + } + + if (hdr->flags & GRE_SEQ) + offset += sizeof(((struct pptp_gre_header *) 0)->seq); + + if (gre_ver == 0) { + if (*p_proto == htons(ETH_P_TEB)) { + const struct ethhdr *eth; + struct ethhdr _eth; + + eth = __skb_header_pointer(skb, *p_nhoff + offset, + sizeof(_eth), + data, *p_hlen, &_eth); + if (!eth) + return FLOW_DISSECT_RET_OUT_BAD; + *p_proto = eth->h_proto; + offset += sizeof(*eth); + + /* Cap headers that we access via pointers at the + * end of the Ethernet header as our maximum alignment + * at that point is only 2 bytes. + */ + if (NET_IP_ALIGN) + *p_hlen = *p_nhoff + offset; + } + } else { /* version 1, must be PPTP */ + u8 _ppp_hdr[PPP_HDRLEN]; + u8 *ppp_hdr; + + if (hdr->flags & GRE_ACK) + offset += sizeof(((struct pptp_gre_header *) 0)->ack); + + ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, + sizeof(_ppp_hdr), + data, *p_hlen, _ppp_hdr); + if (!ppp_hdr) + return FLOW_DISSECT_RET_OUT_BAD; + + switch (PPP_PROTOCOL(ppp_hdr)) { + case PPP_IP: + *p_proto = htons(ETH_P_IP); + break; + case PPP_IPV6: + *p_proto = htons(ETH_P_IPV6); + break; + default: + /* Could probably catch some more like MPLS */ + break; + } + + offset += PPP_HDRLEN; + } + + *p_nhoff += offset; + key_control->flags |= FLOW_DIS_ENCAPSULATION; + if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) + return FLOW_DISSECT_RET_OUT_GOOD; + + return FLOW_DISSECT_RET_OUT_PROTO_AGAIN; +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -138,12 +367,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; - struct flow_dissector_key_arp *key_arp; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_icmp *key_icmp; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; - struct flow_dissector_key_keyid *key_keyid; bool skip_vlan = false; u8 ip_proto = 0; bool ret; @@ -181,7 +408,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, memcpy(key_eth_addrs, ð->h_dest, sizeof(*key_eth_addrs)); } -again: +proto_again: switch (proto) { case htons(ETH_P_IP): { const struct iphdr *iph; @@ -284,7 +511,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); if (skip_vlan) - goto again; + goto proto_again; } skip_vlan = true; @@ -307,7 +534,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, } } - goto again; + goto proto_again; } case htons(ETH_P_PPP_SES): { struct { @@ -349,31 +576,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb, } case htons(ETH_P_MPLS_UC): - case htons(ETH_P_MPLS_MC): { - struct mpls_label *hdr, _hdr[2]; + case htons(ETH_P_MPLS_MC): mpls: - hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, - hlen, &_hdr); - if (!hdr) - goto out_bad; - - if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >> - MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) { - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { - key_keyid = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_MPLS_ENTROPY, - target_container); - key_keyid->keyid = hdr[1].entry & - htonl(MPLS_LS_LABEL_MASK); - } - + switch (__skb_flow_dissect_mpls(skb, flow_dissector, + target_container, data, + nhoff, hlen)) { + case FLOW_DISSECT_RET_OUT_GOOD: goto out_good; + case FLOW_DISSECT_RET_OUT_BAD: + default: + goto out_bad; } - - goto out_good; - } - case htons(ETH_P_FCOE): if ((hlen - nhoff) < FCOE_HEADER_LEN) goto out_bad; @@ -382,177 +595,33 @@ bool __skb_flow_dissect(const struct sk_buff *skb, goto out_good; case htons(ETH_P_ARP): - case htons(ETH_P_RARP): { - struct { - unsigned char ar_sha[ETH_ALEN]; - unsigned char ar_sip[4]; - unsigned char ar_tha[ETH_ALEN]; - unsigned char ar_tip[4]; - } *arp_eth, _arp_eth; - const struct arphdr *arp; - struct arphdr _arp; - - arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, - hlen, &_arp); - if (!arp) - goto out_bad; - - if (arp->ar_hrd != htons(ARPHRD_ETHER) || - arp->ar_pro != htons(ETH_P_IP) || - arp->ar_hln != ETH_ALEN || - arp->ar_pln != 4 || - (arp->ar_op != htons(ARPOP_REPLY) && - arp->ar_op != htons(ARPOP_REQUEST))) - goto out_bad; - - arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), - sizeof(_arp_eth), data, - hlen, - &_arp_eth); - if (!arp_eth) + case htons(ETH_P_RARP): + switch (__skb_flow_dissect_arp(skb, flow_dissector, + target_container, data, + nhoff, hlen)) { + case FLOW_DISSECT_RET_OUT_GOOD: + goto out_good; + case FLOW_DISSECT_RET_OUT_BAD: + default: goto out_bad; - - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_ARP)) { - - key_arp = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_ARP, - target_container); - - memcpy(&key_arp->sip, arp_eth->ar_sip, - sizeof(key_arp->sip)); - memcpy(&key_arp->tip, arp_eth->ar_tip, - sizeof(key_arp->tip)); - - /* Only store the lower byte of the opcode; - * this covers ARPOP_REPLY and ARPOP_REQUEST. - */ - key_arp->op = ntohs(arp->ar_op) & 0xff; - - ether_addr_copy(key_arp->sha, arp_eth->ar_sha); - ether_addr_copy(key_arp->tha, arp_eth->ar_tha); } - - goto out_good; - } - default: goto out_bad; } ip_proto_again: switch (ip_proto) { - case IPPROTO_GRE: { - struct gre_base_hdr *hdr, _hdr; - u16 gre_ver; - int offset = 0; - - hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); - if (!hdr) + case IPPROTO_GRE: + switch (__skb_flow_dissect_gre(skb, key_control, flow_dissector, + target_container, data, + &proto, &nhoff, &hlen, flags)) { + case FLOW_DISSECT_RET_OUT_GOOD: + goto out_good; + case FLOW_DISSECT_RET_OUT_BAD: goto out_bad; - - /* Only look inside GRE without routing */ - if (hdr->flags & GRE_ROUTING) - break; - - /* Only look inside GRE for version 0 and 1 */ - gre_ver = ntohs(hdr->flags & GRE_VERSION); - if (gre_ver > 1) - break; - - proto = hdr->protocol; - if (gre_ver) { - /* Version1 must be PPTP, and check the flags */ - if (!(proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) - break; - } - - offset += sizeof(struct gre_base_hdr); - - if (hdr->flags & GRE_CSUM) - offset += sizeof(((struct gre_full_hdr *)0)->csum) + - sizeof(((struct gre_full_hdr *)0)->reserved1); - - if (hdr->flags & GRE_KEY) { - const __be32 *keyid; - __be32 _keyid; - - keyid = __skb_header_pointer(skb, nhoff + offset, sizeof(_keyid), - data, hlen, &_keyid); - if (!keyid) - goto out_bad; - - if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_GRE_KEYID)) { - key_keyid = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_GRE_KEYID, - target_container); - if (gre_ver == 0) - key_keyid->keyid = *keyid; - else - key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; - } - offset += sizeof(((struct gre_full_hdr *)0)->key); - } - - if (hdr->flags & GRE_SEQ) - offset += sizeof(((struct pptp_gre_header *)0)->seq); - - if (gre_ver == 0) { - if (proto == htons(ETH_P_TEB)) { - const struct ethhdr *eth; - struct ethhdr _eth; - - eth = __skb_header_pointer(skb, nhoff + offset, - sizeof(_eth), - data, hlen, &_eth); - if (!eth) - goto out_bad; - proto = eth->h_proto; - offset += sizeof(*eth); - - /* Cap headers that we access via pointers at the - * end of the Ethernet header as our maximum alignment - * at that point is only 2 bytes. - */ - if (NET_IP_ALIGN) - hlen = (nhoff + offset); - } - } else { /* version 1, must be PPTP */ - u8 _ppp_hdr[PPP_HDRLEN]; - u8 *ppp_hdr; - - if (hdr->flags & GRE_ACK) - offset += sizeof(((struct pptp_gre_header *)0)->ack); - - ppp_hdr = __skb_header_pointer(skb, nhoff + offset, - sizeof(_ppp_hdr), - data, hlen, _ppp_hdr); - if (!ppp_hdr) - goto out_bad; - - switch (PPP_PROTOCOL(ppp_hdr)) { - case PPP_IP: - proto = htons(ETH_P_IP); - break; - case PPP_IPV6: - proto = htons(ETH_P_IPV6); - break; - default: - /* Could probably catch some more like MPLS */ - break; - } - - offset += PPP_HDRLEN; + case FLOW_DISSECT_RET_OUT_PROTO_AGAIN: + goto proto_again; } - - nhoff += offset; - key_control->flags |= FLOW_DIS_ENCAPSULATION; - if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) - goto out_good; - - goto again; - } case NEXTHDR_HOP: case NEXTHDR_ROUTING: case NEXTHDR_DEST: { diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c index c98bbfbd26b8645bb2a273bdee61f285606e215d..814e58a3ce8b6a16a3fa60917b359f0fd3bf969d 100644 --- a/net/core/gro_cells.c +++ b/net/core/gro_cells.c @@ -13,7 +13,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) struct net_device *dev = skb->dev; struct gro_cell *cell; - if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) + if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) return netif_rx(skb); cell = this_cpu_ptr(gcells->cells); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 0cfe7b0216c3522a8d05d404423de939ad2cc2d5..b3bc0a31af9f3bd2d21fd12b9b2885d9b1573a4d 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -209,7 +209,8 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, int ret; u32 fd; - ret = nla_parse_nested(tb, LWT_BPF_PROG_MAX, attr, bpf_prog_policy); + ret = nla_parse_nested(tb, LWT_BPF_PROG_MAX, attr, bpf_prog_policy, + NULL); if (ret < 0) return ret; @@ -249,7 +250,7 @@ static int bpf_build_state(struct nlattr *nla, if (family != AF_INET && family != AF_INET6) return -EAFNOSUPPORT; - ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy); + ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, NULL); if (ret < 0) return ret; diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 6df9f8fabf0ca5d2ced3070406900b7ec28a7924..cfae3d5fe11f10035394aea48fb5332244058a03 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -162,7 +162,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining) struct rtnexthop *rtnh = (struct rtnexthop *)attr; struct nlattr *nla_entype; struct nlattr *attrs; - struct nlattr *nla; u16 encap_type; int attrlen; @@ -170,7 +169,6 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining) attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { attrs = rtnh_attrs(rtnh); - nla = nla_find(attrs, attrlen, RTA_ENCAP); nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla_entype) { @@ -205,7 +203,7 @@ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { const struct lwtunnel_encap_ops *ops; struct nlattr *nest; - int ret = -EINVAL; + int ret; if (!lwtstate) return 0; @@ -214,8 +212,11 @@ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate) lwtstate->type > LWTUNNEL_ENCAP_MAX) return 0; - ret = -EOPNOTSUPP; nest = nla_nest_start(skb, RTA_ENCAP); + if (!nest) + return -EMSGSIZE; + + ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); if (likely(ops && ops->fill_encap)) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 4526cbd7e28a1fcdecfc06a41985fd4d19634457..58b0bcc125b5559f299dc2f195deccb5c43d0844 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -52,8 +52,9 @@ do { \ #define PNEIGH_HASHMASK 0xF static void neigh_timer_handler(unsigned long arg); -static void __neigh_notify(struct neighbour *n, int type, int flags); -static void neigh_update_notify(struct neighbour *neigh); +static void __neigh_notify(struct neighbour *n, int type, int flags, + u32 pid); +static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); #ifdef CONFIG_PROC_FS @@ -99,7 +100,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh) if (neigh->parms->neigh_cleanup) neigh->parms->neigh_cleanup(neigh); - __neigh_notify(neigh, RTM_DELNEIGH, 0); + __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); neigh_release(neigh); } @@ -949,7 +950,7 @@ static void neigh_timer_handler(unsigned long arg) } if (notify) - neigh_update_notify(neigh); + neigh_update_notify(neigh, 0); neigh_release(neigh); } @@ -1073,7 +1074,7 @@ static void neigh_update_hhs(struct neighbour *neigh) */ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, - u32 flags) + u32 flags, u32 nlmsg_pid) { u8 old; int err; @@ -1230,7 +1231,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_unlock_bh(&neigh->lock); if (notify) - neigh_update_notify(neigh); + neigh_update_notify(neigh, nlmsg_pid); return err; } @@ -1261,7 +1262,7 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl, lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, - NEIGH_UPDATE_F_OVERRIDE); + NEIGH_UPDATE_F_OVERRIDE, 0); return neigh; } EXPORT_SYMBOL(neigh_event_ns); @@ -1589,7 +1590,8 @@ static struct neigh_table *neigh_find_table(int family) return tbl; } -static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) +static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@ -1639,14 +1641,16 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE | - NEIGH_UPDATE_F_ADMIN); + NEIGH_UPDATE_F_ADMIN, + NETLINK_CB(skb).portid); neigh_release(neigh); out: return err; } -static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) +static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; struct net *net = sock_net(skb->sk); @@ -1659,7 +1663,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) int err; ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); if (err < 0) goto out; @@ -1730,7 +1734,8 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) neigh_event_send(neigh, NULL); err = 0; } else - err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); + err = neigh_update(neigh, lladdr, ndm->ndm_state, flags, + NETLINK_CB(skb).portid); neigh_release(neigh); out: @@ -1933,7 +1938,8 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { [NDTPA_LOCKTIME] = { .type = NLA_U64 }, }; -static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) +static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct neigh_table *tbl; @@ -1943,7 +1949,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) int err, tidx; err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, - nl_neightbl_policy); + nl_neightbl_policy, extack); if (err < 0) goto errout; @@ -1981,7 +1987,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) int i, ifindex = 0; err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS], - nl_ntbl_parm_policy); + nl_ntbl_parm_policy, extack); if (err < 0) goto errout_tbl_lock; @@ -2230,10 +2236,10 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, return -EMSGSIZE; } -static void neigh_update_notify(struct neighbour *neigh) +static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) { call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); - __neigh_notify(neigh, RTM_NEWNEIGH, 0); + __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); } static bool neigh_master_filtered(struct net_device *dev, int master_idx) @@ -2272,7 +2278,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, unsigned int flags = NLM_F_MULTI; int err; - err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL); + err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); if (!err) { if (tb[NDA_IFINDEX]) filter_idx = nla_get_u32(tb[NDA_IFINDEX]); @@ -2831,7 +2837,8 @@ static inline size_t neigh_nlmsg_size(void) + nla_total_size(4); /* NDA_PROBES */ } -static void __neigh_notify(struct neighbour *n, int type, int flags) +static void __neigh_notify(struct neighbour *n, int type, int flags, + u32 pid) { struct net *net = dev_net(n->dev); struct sk_buff *skb; @@ -2841,7 +2848,7 @@ static void __neigh_notify(struct neighbour *n, int type, int flags) if (skb == NULL) goto errout; - err = neigh_fill_info(skb, n, 0, 0, type, flags); + err = neigh_fill_info(skb, n, pid, 0, type, flags); if (err < 0) { /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -2857,7 +2864,7 @@ static void __neigh_notify(struct neighbour *n, int type, int flags) void neigh_app_ns(struct neighbour *n) { - __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST); + __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); } EXPORT_SYMBOL(neigh_app_ns); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 652468ff65b79d5c9d45ece0d3289de480bbc603..1934efd4a9d4986f3497abc40968fd08c91896b3 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -35,7 +35,8 @@ LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); struct net init_net = { - .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), + .count = ATOMIC_INIT(1), + .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), }; EXPORT_SYMBOL(init_net); @@ -571,7 +572,8 @@ static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { [NETNSA_FD] = { .type = NLA_U32 }, }; -static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; @@ -579,7 +581,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh) int nsid, err; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, - rtnl_net_policy); + rtnl_net_policy, extack); if (err < 0) return err; if (!tb[NETNSA_NSID]) @@ -644,7 +646,8 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, return -EMSGSIZE; } -static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; @@ -653,7 +656,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) int err, id; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, - rtnl_net_policy); + rtnl_net_policy, extack); if (err < 0) return err; if (tb[NETNSA_PID]) diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 0f9275ee55958156a6cbac3f0d2b1ff54c3c89a5..1c4810919a0a35900d45a659de0cd780b7e500d3 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index c4e84c55824085b343679cc295a81f7b35e3acaf..6e67315ec36891942a2fedeea794c188236e8052 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -896,15 +896,13 @@ static size_t rtnl_port_size(const struct net_device *dev, return port_self_size; } -static size_t rtnl_xdp_size(const struct net_device *dev) +static size_t rtnl_xdp_size(void) { size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ - nla_total_size(1); /* XDP_ATTACHED */ + nla_total_size(1) + /* XDP_ATTACHED */ + nla_total_size(4); /* XDP_FLAGS */ - if (!dev->netdev_ops->ndo_xdp) - return 0; - else - return xdp_size; + return xdp_size; } static noinline size_t if_nlmsg_size(const struct net_device *dev, @@ -943,7 +941,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ - + rtnl_xdp_size(dev) /* IFLA_XDP */ + + rtnl_xdp_size() /* IFLA_XDP */ + nla_total_size(1); /* IFLA_PROTO_DOWN */ } @@ -1251,23 +1249,35 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) { - struct netdev_xdp xdp_op = {}; struct nlattr *xdp; + u32 xdp_flags = 0; + u8 val = 0; int err; - if (!dev->netdev_ops->ndo_xdp) - return 0; xdp = nla_nest_start(skb, IFLA_XDP); if (!xdp) return -EMSGSIZE; - xdp_op.command = XDP_QUERY_PROG; - err = dev->netdev_ops->ndo_xdp(dev, &xdp_op); - if (err) - goto err_cancel; - err = nla_put_u8(skb, IFLA_XDP_ATTACHED, xdp_op.prog_attached); + if (rcu_access_pointer(dev->xdp_prog)) { + xdp_flags = XDP_FLAGS_SKB_MODE; + val = 1; + } else if (dev->netdev_ops->ndo_xdp) { + struct netdev_xdp xdp_op = {}; + + xdp_op.command = XDP_QUERY_PROG; + err = dev->netdev_ops->ndo_xdp(dev, &xdp_op); + if (err) + goto err_cancel; + val = xdp_op.prog_attached; + } + err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val); if (err) goto err_cancel; + if (xdp_flags) { + err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags); + if (err) + goto err_cancel; + } nla_nest_end(skb, xdp); return 0; @@ -1515,7 +1525,8 @@ static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla const struct rtnl_link_ops *ops = NULL; struct nlattr *linfo[IFLA_INFO_MAX + 1]; - if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, ifla_info_policy) < 0) + if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla, + ifla_info_policy, NULL) < 0) return NULL; if (linfo[IFLA_INFO_KIND]) { @@ -1592,8 +1603,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { - + if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, + ifla_policy, NULL) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@ -1640,9 +1651,10 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) +int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, + struct netlink_ext_ack *exterr) { - return nla_parse(tb, IFLA_MAX, head, len, ifla_policy); + return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr); } EXPORT_SYMBOL(rtnl_nla_parse_ifla); @@ -1907,6 +1919,7 @@ static int do_set_master(struct net_device *dev, int ifindex) #define DO_SETLINK_NOTIFY 0x03 static int do_setlink(const struct sk_buff *skb, struct net_device *dev, struct ifinfomsg *ifm, + struct netlink_ext_ack *extack, struct nlattr **tb, char *ifname, int status) { const struct net_device_ops *ops = dev->netdev_ops; @@ -2078,7 +2091,7 @@ static int do_setlink(const struct sk_buff *skb, goto errout; } err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, - ifla_vf_policy); + ifla_vf_policy, NULL); if (err < 0) goto errout; err = do_setvfinfo(dev, vfinfo); @@ -2106,7 +2119,7 @@ static int do_setlink(const struct sk_buff *skb, goto errout; } err = nla_parse_nested(port, IFLA_PORT_MAX, attr, - ifla_port_policy); + ifla_port_policy, NULL); if (err < 0) goto errout; if (!port[IFLA_PORT_VF]) { @@ -2126,7 +2139,8 @@ static int do_setlink(const struct sk_buff *skb, struct nlattr *port[IFLA_PORT_MAX+1]; err = nla_parse_nested(port, IFLA_PORT_MAX, - tb[IFLA_PORT_SELF], ifla_port_policy); + tb[IFLA_PORT_SELF], ifla_port_policy, + NULL); if (err < 0) goto errout; @@ -2170,7 +2184,7 @@ static int do_setlink(const struct sk_buff *skb, u32 xdp_flags = 0; err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP], - ifla_xdp_policy); + ifla_xdp_policy, NULL); if (err < 0) goto errout; @@ -2188,7 +2202,7 @@ static int do_setlink(const struct sk_buff *skb, } if (xdp[IFLA_XDP_FD]) { - err = dev_change_xdp_fd(dev, + err = dev_change_xdp_fd(dev, extack, nla_get_s32(xdp[IFLA_XDP_FD]), xdp_flags); if (err) @@ -2210,7 +2224,8 @@ static int do_setlink(const struct sk_buff *skb, return err; } -static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@ -2219,7 +2234,8 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) struct nlattr *tb[IFLA_MAX+1]; char ifname[IFNAMSIZ]; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, + extack); if (err < 0) goto errout; @@ -2246,7 +2262,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (err < 0) goto errout; - err = do_setlink(skb, dev, ifm, tb, ifname, 0); + err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); errout: return err; } @@ -2303,7 +2319,8 @@ int rtnl_delete_link(struct net_device *dev) } EXPORT_SYMBOL_GPL(rtnl_delete_link); -static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *dev; @@ -2312,7 +2329,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) struct nlattr *tb[IFLA_MAX+1]; int err; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); if (err < 0) return err; @@ -2407,6 +2424,7 @@ EXPORT_SYMBOL(rtnl_create_link); static int rtnl_group_changelink(const struct sk_buff *skb, struct net *net, int group, struct ifinfomsg *ifm, + struct netlink_ext_ack *extack, struct nlattr **tb) { struct net_device *dev, *aux; @@ -2414,7 +2432,7 @@ static int rtnl_group_changelink(const struct sk_buff *skb, for_each_netdev_safe(net, dev, aux) { if (dev->group == group) { - err = do_setlink(skb, dev, ifm, tb, NULL, 0); + err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0); if (err < 0) return err; } @@ -2423,7 +2441,8 @@ static int rtnl_group_changelink(const struct sk_buff *skb, return 0; } -static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; @@ -2441,7 +2460,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) #ifdef CONFIG_MODULES replay: #endif - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); if (err < 0) return err; @@ -2472,7 +2491,8 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (tb[IFLA_LINKINFO]) { err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, - tb[IFLA_LINKINFO], ifla_info_policy); + tb[IFLA_LINKINFO], ifla_info_policy, + NULL); if (err < 0) return err; } else @@ -2497,7 +2517,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { err = nla_parse_nested(attr, ops->maxtype, linkinfo[IFLA_INFO_DATA], - ops->policy); + ops->policy, NULL); if (err < 0) return err; data = attr; @@ -2515,7 +2535,8 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) err = nla_parse_nested(slave_attr, m_ops->slave_maxtype, linkinfo[IFLA_INFO_SLAVE_DATA], - m_ops->slave_policy); + m_ops->slave_policy, + NULL); if (err < 0) return err; slave_data = slave_attr; @@ -2557,14 +2578,15 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) status |= DO_SETLINK_NOTIFY; } - return do_setlink(skb, dev, ifm, tb, ifname, status); + return do_setlink(skb, dev, ifm, extack, tb, ifname, + status); } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) return rtnl_group_changelink(skb, net, nla_get_u32(tb[IFLA_GROUP]), - ifm, tb); + ifm, extack, tb); return -ENODEV; } @@ -2673,7 +2695,8 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) } } -static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh) +static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@ -2684,7 +2707,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh) int err; u32 ext_filter_mask = 0; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); if (err < 0) return err; @@ -2734,7 +2757,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { + if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); } @@ -2955,7 +2978,8 @@ static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid) return 0; } -static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@ -2965,7 +2989,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) u16 vid; int err; - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); if (err < 0) return err; @@ -3055,7 +3079,8 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, } EXPORT_SYMBOL(ndo_dflt_fdb_del); -static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; @@ -3068,7 +3093,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; - err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack); if (err < 0) return err; @@ -3203,8 +3228,8 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) int err = 0; int fidx = 0; - if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, - ifla_policy) == 0) { + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, + IFLA_MAX, ifla_policy, NULL) == 0) { if (tb[IFLA_MASTER]) br_idx = nla_get_u32(tb[IFLA_MASTER]); } @@ -3498,7 +3523,8 @@ static int rtnl_bridge_notify(struct net_device *dev) return err; } -static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@ -3572,7 +3598,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } -static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; @@ -3940,7 +3967,8 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, return size; } -static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *dev = NULL; @@ -4046,7 +4074,8 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) /* Process one rtnetlink message. */ -static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); rtnl_doit_func doit; @@ -4101,7 +4130,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (doit == NULL) return -EOPNOTSUPP; - return doit(skb, nlh); + return doit(skb, nlh, extack); } static void rtnetlink_rcv(struct sk_buff *skb) @@ -4116,22 +4145,16 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { - case NETDEV_UP: - case NETDEV_DOWN: - case NETDEV_PRE_UP: - case NETDEV_POST_INIT: - case NETDEV_REGISTER: - case NETDEV_CHANGE: - case NETDEV_PRE_TYPE_CHANGE: - case NETDEV_GOING_DOWN: - case NETDEV_UNREGISTER: - case NETDEV_UNREGISTER_FINAL: - case NETDEV_RELEASE: - case NETDEV_JOIN: - case NETDEV_BONDING_INFO: + case NETDEV_REBOOT: + case NETDEV_CHANGENAME: + case NETDEV_FEAT_CHANGE: + case NETDEV_BONDING_FAILOVER: + case NETDEV_NOTIFY_PEERS: + case NETDEV_RESEND_IGMP: + case NETDEV_CHANGEINFODATA: + rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); break; default: - rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); break; } return NOTIFY_DONE; @@ -4185,6 +4208,7 @@ void __init rtnetlink_init(void) rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); + rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, NULL); rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index d28da7d363f170f35d88623e2b864f04a67c3de5..6bd2f8fb0476baabf507557fc0d06b6787511c70 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -64,8 +64,8 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) &ts_secret); } -u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, - __be16 sport, __be16 dport, u32 *tsoff) +u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport, u32 *tsoff) { const struct { struct in6_addr saddr; @@ -85,7 +85,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, *tsoff = secure_tcpv6_ts_off(saddr, daddr); return seq_scale(hash); } -EXPORT_SYMBOL(secure_tcpv6_sequence_number); +EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) @@ -116,14 +116,13 @@ static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) &ts_secret); } -/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), +/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, * it would be easy enough to have the former function use siphash_4u32, passing * the arguments as separate u32. */ - -u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport, u32 *tsoff) +u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport, u32 *tsoff) { u64 hash; net_secret_init(); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b782b4593f8cbc005ebb8d8befc148f96cc325f5..346d3e85dfbc2eca1ded0442ecb78d31e1768523 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3101,7 +3101,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, skb_walk_frags(head_skb, iter) { if (frag_len != iter->len && iter->next) goto normal; - if (skb_headlen(iter)) + if (skb_headlen(iter) && !iter->head_frag) goto normal; len -= iter->len; diff --git a/net/core/sock.c b/net/core/sock.c index b416a537bd0ac24998381fa76d7df65e108917dc..b5baeb9cb0fb1bff2fb7c49c0a850bc493f7e58d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -247,12 +247,66 @@ static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { _sock_locks("k-clock-") }; +static const char *const af_family_rlock_key_strings[AF_MAX+1] = { + "rlock-AF_UNSPEC", "rlock-AF_UNIX" , "rlock-AF_INET" , + "rlock-AF_AX25" , "rlock-AF_IPX" , "rlock-AF_APPLETALK", + "rlock-AF_NETROM", "rlock-AF_BRIDGE" , "rlock-AF_ATMPVC" , + "rlock-AF_X25" , "rlock-AF_INET6" , "rlock-AF_ROSE" , + "rlock-AF_DECnet", "rlock-AF_NETBEUI" , "rlock-AF_SECURITY" , + "rlock-AF_KEY" , "rlock-AF_NETLINK" , "rlock-AF_PACKET" , + "rlock-AF_ASH" , "rlock-AF_ECONET" , "rlock-AF_ATMSVC" , + "rlock-AF_RDS" , "rlock-AF_SNA" , "rlock-AF_IRDA" , + "rlock-AF_PPPOX" , "rlock-AF_WANPIPE" , "rlock-AF_LLC" , + "rlock-27" , "rlock-28" , "rlock-AF_CAN" , + "rlock-AF_TIPC" , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV" , + "rlock-AF_RXRPC" , "rlock-AF_ISDN" , "rlock-AF_PHONET" , + "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG" , + "rlock-AF_NFC" , "rlock-AF_VSOCK" , "rlock-AF_KCM" , + "rlock-AF_QIPCRTR", "rlock-AF_SMC" , "rlock-AF_MAX" +}; +static const char *const af_family_wlock_key_strings[AF_MAX+1] = { + "wlock-AF_UNSPEC", "wlock-AF_UNIX" , "wlock-AF_INET" , + "wlock-AF_AX25" , "wlock-AF_IPX" , "wlock-AF_APPLETALK", + "wlock-AF_NETROM", "wlock-AF_BRIDGE" , "wlock-AF_ATMPVC" , + "wlock-AF_X25" , "wlock-AF_INET6" , "wlock-AF_ROSE" , + "wlock-AF_DECnet", "wlock-AF_NETBEUI" , "wlock-AF_SECURITY" , + "wlock-AF_KEY" , "wlock-AF_NETLINK" , "wlock-AF_PACKET" , + "wlock-AF_ASH" , "wlock-AF_ECONET" , "wlock-AF_ATMSVC" , + "wlock-AF_RDS" , "wlock-AF_SNA" , "wlock-AF_IRDA" , + "wlock-AF_PPPOX" , "wlock-AF_WANPIPE" , "wlock-AF_LLC" , + "wlock-27" , "wlock-28" , "wlock-AF_CAN" , + "wlock-AF_TIPC" , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV" , + "wlock-AF_RXRPC" , "wlock-AF_ISDN" , "wlock-AF_PHONET" , + "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG" , + "wlock-AF_NFC" , "wlock-AF_VSOCK" , "wlock-AF_KCM" , + "wlock-AF_QIPCRTR", "wlock-AF_SMC" , "wlock-AF_MAX" +}; +static const char *const af_family_elock_key_strings[AF_MAX+1] = { + "elock-AF_UNSPEC", "elock-AF_UNIX" , "elock-AF_INET" , + "elock-AF_AX25" , "elock-AF_IPX" , "elock-AF_APPLETALK", + "elock-AF_NETROM", "elock-AF_BRIDGE" , "elock-AF_ATMPVC" , + "elock-AF_X25" , "elock-AF_INET6" , "elock-AF_ROSE" , + "elock-AF_DECnet", "elock-AF_NETBEUI" , "elock-AF_SECURITY" , + "elock-AF_KEY" , "elock-AF_NETLINK" , "elock-AF_PACKET" , + "elock-AF_ASH" , "elock-AF_ECONET" , "elock-AF_ATMSVC" , + "elock-AF_RDS" , "elock-AF_SNA" , "elock-AF_IRDA" , + "elock-AF_PPPOX" , "elock-AF_WANPIPE" , "elock-AF_LLC" , + "elock-27" , "elock-28" , "elock-AF_CAN" , + "elock-AF_TIPC" , "elock-AF_BLUETOOTH", "elock-AF_IUCV" , + "elock-AF_RXRPC" , "elock-AF_ISDN" , "elock-AF_PHONET" , + "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG" , + "elock-AF_NFC" , "elock-AF_VSOCK" , "elock-AF_KCM" , + "elock-AF_QIPCRTR", "elock-AF_SMC" , "elock-AF_MAX" +}; /* - * sk_callback_lock locking rules are per-address-family, + * sk_callback_lock and sk queues locking rules are per-address-family, * so split the lock classes by using a per-AF key: */ static struct lock_class_key af_callback_keys[AF_MAX]; +static struct lock_class_key af_rlock_keys[AF_MAX]; +static struct lock_class_key af_wlock_keys[AF_MAX]; +static struct lock_class_key af_elock_keys[AF_MAX]; static struct lock_class_key af_kern_callback_keys[AF_MAX]; /* Take into consideration the size of the struct sk_buff overhead in the @@ -1029,6 +1083,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, union { int val; + u64 val64; struct linger ling; struct timeval tm; } v; @@ -1259,6 +1314,40 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_incoming_cpu; break; + case SO_MEMINFO: + { + u32 meminfo[SK_MEMINFO_VARS]; + + if (get_user(len, optlen)) + return -EFAULT; + + sk_get_meminfo(sk, meminfo); + + len = min_t(unsigned int, len, sizeof(meminfo)); + if (copy_to_user(optval, &meminfo, len)) + return -EFAULT; + + goto lenout; + } + +#ifdef CONFIG_NET_RX_BUSY_POLL + case SO_INCOMING_NAPI_ID: + v.val = READ_ONCE(sk->sk_napi_id); + + /* aggregate non-NAPI IDs down to 0 */ + if (v.val < MIN_NAPI_ID) + v.val = 0; + + break; +#endif + + case SO_COOKIE: + lv = sizeof(u64); + if (len < lv) + return -EINVAL; + v.val64 = sock_gen_cookie(sk); + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -1483,6 +1572,27 @@ void sk_free(struct sock *sk) } EXPORT_SYMBOL(sk_free); +static void sk_init_common(struct sock *sk) +{ + skb_queue_head_init(&sk->sk_receive_queue); + skb_queue_head_init(&sk->sk_write_queue); + skb_queue_head_init(&sk->sk_error_queue); + + rwlock_init(&sk->sk_callback_lock); + lockdep_set_class_and_name(&sk->sk_receive_queue.lock, + af_rlock_keys + sk->sk_family, + af_family_rlock_key_strings[sk->sk_family]); + lockdep_set_class_and_name(&sk->sk_write_queue.lock, + af_wlock_keys + sk->sk_family, + af_family_wlock_key_strings[sk->sk_family]); + lockdep_set_class_and_name(&sk->sk_error_queue.lock, + af_elock_keys + sk->sk_family, + af_family_elock_key_strings[sk->sk_family]); + lockdep_set_class_and_name(&sk->sk_callback_lock, + af_callback_keys + sk->sk_family, + af_family_clock_key_strings[sk->sk_family]); +} + /** * sk_clone_lock - clone a socket, and lock its clone * @sk: the socket to clone @@ -1516,13 +1626,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ atomic_set(&newsk->sk_wmem_alloc, 1); atomic_set(&newsk->sk_omem_alloc, 0); - skb_queue_head_init(&newsk->sk_receive_queue); - skb_queue_head_init(&newsk->sk_write_queue); - - rwlock_init(&newsk->sk_callback_lock); - lockdep_set_class_and_name(&newsk->sk_callback_lock, - af_callback_keys + newsk->sk_family, - af_family_clock_key_strings[newsk->sk_family]); + sk_init_common(newsk); newsk->sk_dst_cache = NULL; newsk->sk_dst_pending_confirm = 0; @@ -1533,7 +1637,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); - skb_queue_head_init(&newsk->sk_error_queue); filter = rcu_dereference_protected(newsk->sk_filter, 1); if (filter != NULL) @@ -2466,10 +2569,7 @@ EXPORT_SYMBOL(sk_stop_timer); void sock_init_data(struct socket *sock, struct sock *sk) { - skb_queue_head_init(&sk->sk_receive_queue); - skb_queue_head_init(&sk->sk_write_queue); - skb_queue_head_init(&sk->sk_error_queue); - + sk_init_common(sk); sk->sk_send_head = NULL; init_timer(&sk->sk_timer); @@ -2521,7 +2621,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; - sk->sk_stamp = ktime_set(-1L, 0); + sk->sk_stamp = SK_DEFAULT_STAMP; #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = 0; @@ -2802,6 +2902,21 @@ void sk_common_release(struct sock *sk) } EXPORT_SYMBOL(sk_common_release); +void sk_get_meminfo(const struct sock *sk, u32 *mem) +{ + memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); + + mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); + mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; + mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); + mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; + mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; + mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; + mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); + mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; + mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); +} + #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { @@ -3142,3 +3257,14 @@ static int __init proto_init(void) subsys_initcall(proto_init); #endif /* PROC_FS */ + +#ifdef CONFIG_NET_RX_BUSY_POLL +bool sk_busy_loop_end(void *p, unsigned long start_time) +{ + struct sock *sk = p; + + return !skb_queue_empty(&sk->sk_receive_queue) || + sk_busy_loop_timeout(sk, start_time); +} +EXPORT_SYMBOL(sk_busy_loop_end); +#endif /* CONFIG_NET_RX_BUSY_POLL */ diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 6b10573cc9faa790fe261b452b85f3b774c3ec21..217f4e3b82f6edca841b66089ab0772bd8d391e7 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -19,7 +19,7 @@ static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); static struct workqueue_struct *broadcast_wq; -static u64 sock_gen_cookie(struct sock *sk) +u64 sock_gen_cookie(struct sock *sk) { while (1) { u64 res = atomic64_read(&sk->sk_cookie); @@ -59,15 +59,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) { u32 mem[SK_MEMINFO_VARS]; - mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); - mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; - mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); - mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; - mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; - mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; - mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); - mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; - mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); + sk_get_meminfo(sk, mem); return nla_put(skb, attrtype, sizeof(mem), &mem); } @@ -246,7 +238,8 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } -static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { int ret; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 9a1a352fd1ebe598e4925bcda037dc0e4a2288bc..eed1ebf7f29d0fac552074b127e5636fecede65f 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -13,9 +13,9 @@ static DEFINE_SPINLOCK(reuseport_lock); -static struct sock_reuseport *__reuseport_alloc(u16 max_socks) +static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { - size_t size = sizeof(struct sock_reuseport) + + unsigned int size = sizeof(struct sock_reuseport) + sizeof(struct sock *) * max_socks; struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 7f9cc400eca08c01c9014476aa4daf0852505b20..ea23254b2457cf15eeae495130dab437090f8e2e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -452,6 +452,14 @@ static struct ctl_table net_core_table[] = { .extra1 = &one, .extra2 = &max_skb_frags, }, + { + .procname = "netdev_budget_usecs", + .data = &netdev_budget_usecs, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, { } }; diff --git a/net/core/utils.c b/net/core/utils.c index 32c467cf52d65605ef63d63c6c5007636a7f65e5..93066bd0305ab73dbee756125e2cb77b7f46aa44 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -53,7 +53,7 @@ EXPORT_SYMBOL(net_ratelimit); __be32 in_aton(const char *str) { - unsigned long l; + unsigned int l; unsigned int val; int i; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 3202d75329b504b6442441becc31157e0c2be7a5..93106120f98770699d28e8b82701b132144baa87 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -245,8 +245,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, - tb[DCB_ATTR_PFC_CFG], - dcbnl_pfc_up_nest); + tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; @@ -304,7 +303,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], - dcbnl_cap_nest); + dcbnl_cap_nest, NULL); if (ret) return ret; @@ -348,7 +347,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], - dcbnl_numtcs_nest); + dcbnl_numtcs_nest, NULL); if (ret) return ret; @@ -393,7 +392,7 @@ static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], - dcbnl_numtcs_nest); + dcbnl_numtcs_nest, NULL); if (ret) return ret; @@ -452,7 +451,7 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, return -EINVAL; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], - dcbnl_app_nest); + dcbnl_app_nest, NULL); if (ret) return ret; @@ -520,7 +519,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, return -EINVAL; ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], - dcbnl_app_nest); + dcbnl_app_nest, NULL); if (ret) return ret; @@ -577,8 +576,8 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, !netdev->dcbnl_ops->getpgbwgcfgrx) return -EOPNOTSUPP; - ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, - tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); + ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], + dcbnl_pg_nest, NULL); if (ret) return ret; @@ -597,8 +596,8 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; - ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, - data, dcbnl_tc_param_nest); + ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data, + dcbnl_tc_param_nest, NULL); if (ret) goto err_pg; @@ -735,8 +734,7 @@ static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, - tb[DCB_ATTR_PFC_CFG], - dcbnl_pfc_up_nest); + tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; @@ -791,8 +789,8 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, !netdev->dcbnl_ops->setpgbwgcfgrx) return -EOPNOTSUPP; - ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, - tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); + ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], + dcbnl_pg_nest, NULL); if (ret) return ret; @@ -801,7 +799,7 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, continue; ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, - pg_tb[i], dcbnl_tc_param_nest); + pg_tb[i], dcbnl_tc_param_nest, NULL); if (ret) return ret; @@ -889,8 +887,8 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, !netdev->dcbnl_ops->getbcncfg) return -EOPNOTSUPP; - ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, - tb[DCB_ATTR_BCN], dcbnl_bcn_nest); + ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], + dcbnl_bcn_nest, NULL); if (ret) return ret; @@ -948,9 +946,8 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, !netdev->dcbnl_ops->setbcnrp) return -EOPNOTSUPP; - ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, - tb[DCB_ATTR_BCN], - dcbnl_pfc_up_nest); + ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], + dcbnl_pfc_up_nest, NULL); if (ret) return ret; @@ -1424,8 +1421,8 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, if (!tb[DCB_ATTR_IEEE]) return -EINVAL; - err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, - tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); + err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], + dcbnl_ieee_policy, NULL); if (err) return err; @@ -1508,8 +1505,8 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, if (!tb[DCB_ATTR_IEEE]) return -EINVAL; - err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, - tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); + err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], + dcbnl_ieee_policy, NULL); if (err) return err; @@ -1581,8 +1578,8 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; - ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], - dcbnl_featcfg_nest); + ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, + tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) return ret; @@ -1625,8 +1622,8 @@ static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; - ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], - dcbnl_featcfg_nest); + ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, + tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) goto err; @@ -1699,7 +1696,8 @@ static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, }; -static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *netdev; @@ -1715,7 +1713,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh) return -EPERM; ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, - dcbnl_rtnl_policy); + dcbnl_rtnl_policy, extack); if (ret < 0) return ret; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7de5b40a5d0d1245ad995877f779e0d87d1cf398..9afa2a5030b2570c89de8decc3b20aad3a224e5c 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -132,6 +132,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include #include #include +#include #include #include #include @@ -1469,18 +1470,18 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us case DSO_NODELAY: if (optlen != sizeof(int)) return -EINVAL; - if (scp->nonagle == 2) + if (scp->nonagle == TCP_NAGLE_CORK) return -EINVAL; - scp->nonagle = (u.val == 0) ? 0 : 1; + scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF; /* if (scp->nonagle == 1) { Push pending frames } */ break; case DSO_CORK: if (optlen != sizeof(int)) return -EINVAL; - if (scp->nonagle == 1) + if (scp->nonagle == TCP_NAGLE_OFF) return -EINVAL; - scp->nonagle = (u.val == 0) ? 0 : 2; + scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK; /* if (scp->nonagle == 0) { Push pending frames } */ break; @@ -1608,14 +1609,14 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us case DSO_NODELAY: if (r_len > sizeof(int)) r_len = sizeof(int); - val = (scp->nonagle == 1); + val = (scp->nonagle == TCP_NAGLE_OFF); r_data = &val; break; case DSO_CORK: if (r_len > sizeof(int)) r_len = sizeof(int); - val = (scp->nonagle == 2); + val = (scp->nonagle == TCP_NAGLE_CORK); r_data = &val; break; diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 8fdd9f492b0ea2201db70cfe83ebe1f3384c7b04..9017a9a73ab5885002b9d63047e6bd5a7b91cc4b 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -565,7 +565,8 @@ static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { [IFA_FLAGS] = { .type = NLA_U32 }, }; -static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) +static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; @@ -581,7 +582,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) if (!net_eq(net, &init_net)) goto errout; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy, + extack); if (err < 0) goto errout; @@ -609,7 +611,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } -static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) +static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; @@ -625,7 +628,8 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) if (!net_eq(net, &init_net)) return -EINVAL; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy, + extack); if (err < 0) return err; diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 7af0ba6157a108578182eda3ec75ec7bd0c27859..f9058ebeb635c2efff457bf73d7895129b443cde 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -501,7 +501,8 @@ static inline u32 rtm_get_table(struct nlattr *attrs[], u8 table) return table; } -static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct dn_fib_table *tb; @@ -515,7 +516,8 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) if (!net_eq(net, &init_net)) return -EINVAL; - err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy); + err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy, + extack); if (err < 0) return err; @@ -526,7 +528,8 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb)); } -static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct dn_fib_table *tb; @@ -540,7 +543,8 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) if (!net_eq(net, &init_net)) return -EINVAL; - err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy); + err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy, + extack); if (err < 0) return err; diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index b1dc096d22f8c83e771b1df68d7815661ac51bae..4b9518a0d2489494ed88b6808f95803cf1b543ad 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1640,7 +1640,8 @@ const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = { /* * This is called by both endnodes and routers now. */ -static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) +static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm = nlmsg_data(nlh); @@ -1654,7 +1655,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) if (!net_eq(net, &init_net)) return -EINVAL; - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_dn_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_dn_policy, + extack); if (err < 0) return err; diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 85f2fdc360c27b21cb5b9c486e2d05ffb1d557d7..1ed81ac6dd1a28b79dff9f9f4e8c1d0f99306a33 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -96,7 +96,7 @@ static unsigned int dnrmg_hook(void *priv, } -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) +#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err), NULL); return; } while (0) static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { @@ -134,7 +134,7 @@ static int __init dn_rtmsg_init(void) return -ENOMEM; } - rv = nf_register_hook(&dnrmg_ops); + rv = nf_register_net_hook(&init_net, &dnrmg_ops); if (rv) { netlink_kernel_release(dnrmg); } @@ -144,7 +144,7 @@ static int __init dn_rtmsg_init(void) static void __exit dn_rtmsg_fini(void) { - nf_unregister_hook(&dnrmg_ops); + nf_unregister_net_hook(&init_net, &dnrmg_ops); netlink_kernel_release(dnrmg); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 9649238eef404095a89d34a006dc0b504bc47038..81a0868edb1d619a6ade13b7817db90e7a2d69a2 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -6,7 +6,7 @@ config HAVE_NET_DSA config NET_DSA tristate "Distributed Switch Architecture" - depends on HAVE_NET_DSA + depends on HAVE_NET_DSA && MAY_USE_DEVLINK select NET_SWITCHDEV select PHYLIB ---help--- @@ -31,4 +31,10 @@ config NET_DSA_TAG_TRAILER config NET_DSA_TAG_QCA bool +config NET_DSA_TAG_MTK + bool + +config NET_DSA_TAG_LAN9303 + bool + endif diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 31d343796251da06c979b3428f275f5911dfb2ab..0b747d75e65a265dc5059586e250a2b97ae3f067 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,6 +1,6 @@ # the core obj-$(CONFIG_NET_DSA) += dsa_core.o -dsa_core-y += dsa.o slave.o dsa2.o switch.o +dsa_core-y += dsa.o slave.o dsa2.o switch.o legacy.o # tagging formats dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o @@ -8,3 +8,5 @@ dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o +dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o +dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index b6d4f6a23f06c9d794a5eedc4c9f79810d5b06e5..26130ae438da53f3f99a4e9fa712a572f40ca779 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -14,15 +14,17 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include +#include +#include #include "dsa_priv.h" static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, @@ -52,63 +54,16 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { #endif #ifdef CONFIG_NET_DSA_TAG_QCA [DSA_TAG_PROTO_QCA] = &qca_netdev_ops, +#endif +#ifdef CONFIG_NET_DSA_TAG_MTK + [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops, +#endif +#ifdef CONFIG_NET_DSA_TAG_LAN9303 + [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops, #endif [DSA_TAG_PROTO_NONE] = &none_ops, }; -/* switch driver registration ***********************************************/ -static DEFINE_MUTEX(dsa_switch_drivers_mutex); -static LIST_HEAD(dsa_switch_drivers); - -void register_switch_driver(struct dsa_switch_driver *drv) -{ - mutex_lock(&dsa_switch_drivers_mutex); - list_add_tail(&drv->list, &dsa_switch_drivers); - mutex_unlock(&dsa_switch_drivers_mutex); -} -EXPORT_SYMBOL_GPL(register_switch_driver); - -void unregister_switch_driver(struct dsa_switch_driver *drv) -{ - mutex_lock(&dsa_switch_drivers_mutex); - list_del_init(&drv->list); - mutex_unlock(&dsa_switch_drivers_mutex); -} -EXPORT_SYMBOL_GPL(unregister_switch_driver); - -static const struct dsa_switch_ops * -dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, - const char **_name, void **priv) -{ - const struct dsa_switch_ops *ret; - struct list_head *list; - const char *name; - - ret = NULL; - name = NULL; - - mutex_lock(&dsa_switch_drivers_mutex); - list_for_each(list, &dsa_switch_drivers) { - const struct dsa_switch_ops *ops; - struct dsa_switch_driver *drv; - - drv = list_entry(list, struct dsa_switch_driver, list); - ops = drv->ops; - - name = ops->probe(parent, host_dev, sw_addr, priv); - if (name != NULL) { - ret = ops; - break; - } - } - mutex_unlock(&dsa_switch_drivers_mutex); - - *_name = name; - - return ret; -} - -/* basic switch operations **************************************************/ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, struct dsa_port *dport, int port) { @@ -140,23 +95,6 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, return 0; } -static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) -{ - struct dsa_port *dport; - int ret, port; - - for (port = 0; port < ds->num_ports; port++) { - if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) - continue; - - dport = &ds->ports[port]; - ret = dsa_cpu_dsa_setup(ds, dev, dport, port); - if (ret) - return ret; - } - return 0; -} - const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol) { const struct dsa_device_ops *ops; @@ -206,168 +144,6 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds) master->ethtool_ops = ds->dst->master_orig_ethtool_ops; } -static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) -{ - const struct dsa_switch_ops *ops = ds->ops; - struct dsa_switch_tree *dst = ds->dst; - struct dsa_chip_data *cd = ds->cd; - bool valid_name_found = false; - int index = ds->index; - int i, ret; - - /* - * Validate supplied switch configuration. - */ - for (i = 0; i < ds->num_ports; i++) { - char *name; - - name = cd->port_names[i]; - if (name == NULL) - continue; - - if (!strcmp(name, "cpu")) { - if (dst->cpu_switch) { - netdev_err(dst->master_netdev, - "multiple cpu ports?!\n"); - return -EINVAL; - } - dst->cpu_switch = ds; - dst->cpu_port = i; - ds->cpu_port_mask |= 1 << i; - } else if (!strcmp(name, "dsa")) { - ds->dsa_port_mask |= 1 << i; - } else { - ds->enabled_port_mask |= 1 << i; - } - valid_name_found = true; - } - - if (!valid_name_found && i == ds->num_ports) - return -EINVAL; - - /* Make the built-in MII bus mask match the number of ports, - * switch drivers can override this later - */ - ds->phys_mii_mask = ds->enabled_port_mask; - - /* - * If the CPU connects to this switch, set the switch tree - * tagging protocol to the preferred tagging format of this - * switch. - */ - if (dst->cpu_switch == ds) { - enum dsa_tag_protocol tag_protocol; - - tag_protocol = ops->get_tag_protocol(ds); - dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); - if (IS_ERR(dst->tag_ops)) - return PTR_ERR(dst->tag_ops); - - dst->rcv = dst->tag_ops->rcv; - } - - memcpy(ds->rtable, cd->rtable, sizeof(ds->rtable)); - - /* - * Do basic register setup. - */ - ret = ops->setup(ds); - if (ret < 0) - return ret; - - ret = dsa_switch_register_notifier(ds); - if (ret) - return ret; - - if (ops->set_addr) { - ret = ops->set_addr(ds, dst->master_netdev->dev_addr); - if (ret < 0) - return ret; - } - - if (!ds->slave_mii_bus && ops->phy_read) { - ds->slave_mii_bus = devm_mdiobus_alloc(parent); - if (!ds->slave_mii_bus) - return -ENOMEM; - dsa_slave_mii_bus_init(ds); - - ret = mdiobus_register(ds->slave_mii_bus); - if (ret < 0) - return ret; - } - - /* - * Create network devices for physical switch ports. - */ - for (i = 0; i < ds->num_ports; i++) { - ds->ports[i].dn = cd->port_dn[i]; - - if (!(ds->enabled_port_mask & (1 << i))) - continue; - - ret = dsa_slave_create(ds, parent, i, cd->port_names[i]); - if (ret < 0) - netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n", - index, i, cd->port_names[i], ret); - } - - /* Perform configuration of the CPU and DSA ports */ - ret = dsa_cpu_dsa_setups(ds, parent); - if (ret < 0) - netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n", - index); - - ret = dsa_cpu_port_ethtool_setup(ds); - if (ret) - return ret; - - return 0; -} - -static struct dsa_switch * -dsa_switch_setup(struct dsa_switch_tree *dst, int index, - struct device *parent, struct device *host_dev) -{ - struct dsa_chip_data *cd = dst->pd->chip + index; - const struct dsa_switch_ops *ops; - struct dsa_switch *ds; - int ret; - const char *name; - void *priv; - - /* - * Probe for switch model. - */ - ops = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv); - if (!ops) { - netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", - index); - return ERR_PTR(-EINVAL); - } - netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", - index, name); - - - /* - * Allocate and initialise switch state. - */ - ds = dsa_switch_alloc(parent, DSA_MAX_PORTS); - if (!ds) - return ERR_PTR(-ENOMEM); - - ds->dst = dst; - ds->index = index; - ds->cd = cd; - ds->ops = ops; - ds->priv = priv; - - ret = dsa_switch_setup_one(ds, parent); - if (ret) - return ERR_PTR(ret); - - return ds; -} - void dsa_cpu_dsa_destroy(struct dsa_port *port) { struct device_node *port_dn = port->dn; @@ -376,86 +152,6 @@ void dsa_cpu_dsa_destroy(struct dsa_port *port) of_phy_deregister_fixed_link(port_dn); } -static void dsa_switch_destroy(struct dsa_switch *ds) -{ - int port; - - /* Destroy network devices for physical switch ports. */ - for (port = 0; port < ds->num_ports; port++) { - if (!(ds->enabled_port_mask & (1 << port))) - continue; - - if (!ds->ports[port].netdev) - continue; - - dsa_slave_destroy(ds->ports[port].netdev); - } - - /* Disable configuration of the CPU and DSA ports */ - for (port = 0; port < ds->num_ports; port++) { - if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) - continue; - dsa_cpu_dsa_destroy(&ds->ports[port]); - - /* Clearing a bit which is not set does no harm */ - ds->cpu_port_mask |= ~(1 << port); - ds->dsa_port_mask |= ~(1 << port); - } - - if (ds->slave_mii_bus && ds->ops->phy_read) - mdiobus_unregister(ds->slave_mii_bus); - - dsa_switch_unregister_notifier(ds); -} - -#ifdef CONFIG_PM_SLEEP -int dsa_switch_suspend(struct dsa_switch *ds) -{ - int i, ret = 0; - - /* Suspend slave network devices */ - for (i = 0; i < ds->num_ports; i++) { - if (!dsa_is_port_initialized(ds, i)) - continue; - - ret = dsa_slave_suspend(ds->ports[i].netdev); - if (ret) - return ret; - } - - if (ds->ops->suspend) - ret = ds->ops->suspend(ds); - - return ret; -} -EXPORT_SYMBOL_GPL(dsa_switch_suspend); - -int dsa_switch_resume(struct dsa_switch *ds) -{ - int i, ret = 0; - - if (ds->ops->resume) - ret = ds->ops->resume(ds); - - if (ret) - return ret; - - /* Resume slave network devices */ - for (i = 0; i < ds->num_ports; i++) { - if (!dsa_is_port_initialized(ds, i)) - continue; - - ret = dsa_slave_resume(ds->ports[i].netdev); - if (ret) - return ret; - } - - return 0; -} -EXPORT_SYMBOL_GPL(dsa_switch_resume); -#endif - -/* platform driver init and cleanup *****************************************/ static int dev_is_class(struct device *dev, void *class) { if (dev->class != NULL && !strcmp(dev->class->name, class)) @@ -474,24 +170,6 @@ static struct device *dev_find_class(struct device *parent, char *class) return device_find_child(parent, class, dev_is_class); } -struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) -{ - struct device *d; - - d = dev_find_class(dev, "mdio_bus"); - if (d != NULL) { - struct mii_bus *bus; - - bus = to_mii_bus(d); - put_device(d); - - return bus; - } - - return NULL; -} -EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus); - struct net_device *dsa_dev_to_net_device(struct device *dev) { struct device *d; @@ -511,456 +189,43 @@ struct net_device *dsa_dev_to_net_device(struct device *dev) } EXPORT_SYMBOL_GPL(dsa_dev_to_net_device); -#ifdef CONFIG_OF -static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, - struct dsa_chip_data *cd, - int chip_index, int port_index, - struct device_node *link) -{ - const __be32 *reg; - int link_sw_addr; - struct device_node *parent_sw; - int len; - - parent_sw = of_get_parent(link); - if (!parent_sw) - return -EINVAL; - - reg = of_get_property(parent_sw, "reg", &len); - if (!reg || (len != sizeof(*reg) * 2)) - return -EINVAL; - - /* - * Get the destination switch number from the second field of its 'reg' - * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. - */ - link_sw_addr = be32_to_cpup(reg + 1); - - if (link_sw_addr >= pd->nr_chips) - return -EINVAL; - - cd->rtable[link_sw_addr] = port_index; - - return 0; -} - -static int dsa_of_probe_links(struct dsa_platform_data *pd, - struct dsa_chip_data *cd, - int chip_index, int port_index, - struct device_node *port, - const char *port_name) -{ - struct device_node *link; - int link_index; - int ret; - - for (link_index = 0;; link_index++) { - link = of_parse_phandle(port, "link", link_index); - if (!link) - break; - - if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) { - ret = dsa_of_setup_routing_table(pd, cd, chip_index, - port_index, link); - if (ret) - return ret; - } - } - return 0; -} - -static void dsa_of_free_platform_data(struct dsa_platform_data *pd) -{ - int i; - int port_index; - - for (i = 0; i < pd->nr_chips; i++) { - port_index = 0; - while (port_index < DSA_MAX_PORTS) { - kfree(pd->chip[i].port_names[port_index]); - port_index++; - } - - /* Drop our reference to the MDIO bus device */ - if (pd->chip[i].host_dev) - put_device(pd->chip[i].host_dev); - } - kfree(pd->chip); -} - -static int dsa_of_probe(struct device *dev) -{ - struct device_node *np = dev->of_node; - struct device_node *child, *mdio, *ethernet, *port; - struct mii_bus *mdio_bus, *mdio_bus_switch; - struct net_device *ethernet_dev; - struct dsa_platform_data *pd; - struct dsa_chip_data *cd; - const char *port_name; - int chip_index, port_index; - const unsigned int *sw_addr, *port_reg; - u32 eeprom_len; - int ret; - - mdio = of_parse_phandle(np, "dsa,mii-bus", 0); - if (!mdio) - return -EINVAL; - - mdio_bus = of_mdio_find_bus(mdio); - if (!mdio_bus) - return -EPROBE_DEFER; - - ethernet = of_parse_phandle(np, "dsa,ethernet", 0); - if (!ethernet) { - ret = -EINVAL; - goto out_put_mdio; - } - - ethernet_dev = of_find_net_device_by_node(ethernet); - if (!ethernet_dev) { - ret = -EPROBE_DEFER; - goto out_put_mdio; - } - - pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ret = -ENOMEM; - goto out_put_ethernet; - } - - dev->platform_data = pd; - pd->of_netdev = ethernet_dev; - pd->nr_chips = of_get_available_child_count(np); - if (pd->nr_chips > DSA_MAX_SWITCHES) - pd->nr_chips = DSA_MAX_SWITCHES; - - pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data), - GFP_KERNEL); - if (!pd->chip) { - ret = -ENOMEM; - goto out_free; - } - - chip_index = -1; - for_each_available_child_of_node(np, child) { - int i; - - chip_index++; - cd = &pd->chip[chip_index]; - - cd->of_node = child; - - /* Initialize the routing table */ - for (i = 0; i < DSA_MAX_SWITCHES; ++i) - cd->rtable[i] = DSA_RTABLE_NONE; - - /* When assigning the host device, increment its refcount */ - cd->host_dev = get_device(&mdio_bus->dev); - - sw_addr = of_get_property(child, "reg", NULL); - if (!sw_addr) - continue; - - cd->sw_addr = be32_to_cpup(sw_addr); - if (cd->sw_addr >= PHY_MAX_ADDR) - continue; - - if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) - cd->eeprom_len = eeprom_len; - - mdio = of_parse_phandle(child, "mii-bus", 0); - if (mdio) { - mdio_bus_switch = of_mdio_find_bus(mdio); - if (!mdio_bus_switch) { - ret = -EPROBE_DEFER; - goto out_free_chip; - } - - /* Drop the mdio_bus device ref, replacing the host - * device with the mdio_bus_switch device, keeping - * the refcount from of_mdio_find_bus() above. - */ - put_device(cd->host_dev); - cd->host_dev = &mdio_bus_switch->dev; - } - - for_each_available_child_of_node(child, port) { - port_reg = of_get_property(port, "reg", NULL); - if (!port_reg) - continue; - - port_index = be32_to_cpup(port_reg); - if (port_index >= DSA_MAX_PORTS) - break; - - port_name = of_get_property(port, "label", NULL); - if (!port_name) - continue; - - cd->port_dn[port_index] = port; - - cd->port_names[port_index] = kstrdup(port_name, - GFP_KERNEL); - if (!cd->port_names[port_index]) { - ret = -ENOMEM; - goto out_free_chip; - } - - ret = dsa_of_probe_links(pd, cd, chip_index, - port_index, port, port_name); - if (ret) - goto out_free_chip; - - } - } - - /* The individual chips hold their own refcount on the mdio bus, - * so drop ours */ - put_device(&mdio_bus->dev); - - return 0; - -out_free_chip: - dsa_of_free_platform_data(pd); -out_free: - kfree(pd); - dev->platform_data = NULL; -out_put_ethernet: - put_device(ðernet_dev->dev); -out_put_mdio: - put_device(&mdio_bus->dev); - return ret; -} - -static void dsa_of_remove(struct device *dev) -{ - struct dsa_platform_data *pd = dev->platform_data; - - if (!dev->of_node) - return; - - dsa_of_free_platform_data(pd); - put_device(&pd->of_netdev->dev); - kfree(pd); -} -#else -static inline int dsa_of_probe(struct device *dev) -{ - return 0; -} - -static inline void dsa_of_remove(struct device *dev) -{ -} -#endif - -static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, - struct device *parent, struct dsa_platform_data *pd) -{ - int i; - unsigned configured = 0; - - dst->pd = pd; - dst->master_netdev = dev; - dst->cpu_port = -1; - - for (i = 0; i < pd->nr_chips; i++) { - struct dsa_switch *ds; - - ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev); - if (IS_ERR(ds)) { - netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", - i, PTR_ERR(ds)); - continue; - } - - dst->ds[i] = ds; - - ++configured; - } - - /* - * If no switch was found, exit cleanly - */ - if (!configured) - return -EPROBE_DEFER; - - /* - * If we use a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point on get - * sent to the tag format's receive function. - */ - wmb(); - dev->dsa_ptr = (void *)dst; - - return 0; -} - -static int dsa_probe(struct platform_device *pdev) -{ - struct dsa_platform_data *pd = pdev->dev.platform_data; - struct net_device *dev; - struct dsa_switch_tree *dst; - int ret; - - if (pdev->dev.of_node) { - ret = dsa_of_probe(&pdev->dev); - if (ret) - return ret; - - pd = pdev->dev.platform_data; - } - - if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL)) - return -EINVAL; - - if (pd->of_netdev) { - dev = pd->of_netdev; - dev_hold(dev); - } else { - dev = dsa_dev_to_net_device(pd->netdev); - } - if (dev == NULL) { - ret = -EPROBE_DEFER; - goto out; - } - - if (dev->dsa_ptr != NULL) { - dev_put(dev); - ret = -EEXIST; - goto out; - } - - dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); - if (dst == NULL) { - dev_put(dev); - ret = -ENOMEM; - goto out; - } - - platform_set_drvdata(pdev, dst); - - ret = dsa_setup_dst(dst, dev, &pdev->dev, pd); - if (ret) { - dev_put(dev); - goto out; - } - - return 0; - -out: - dsa_of_remove(&pdev->dev); - - return ret; -} - -static void dsa_remove_dst(struct dsa_switch_tree *dst) -{ - int i; - - dst->master_netdev->dsa_ptr = NULL; - - /* If we used a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point get sent - * without the tag and go through the regular receive path. - */ - wmb(); - - for (i = 0; i < dst->pd->nr_chips; i++) { - struct dsa_switch *ds = dst->ds[i]; - - if (ds) - dsa_switch_destroy(ds); - } - - dsa_cpu_port_ethtool_restore(dst->cpu_switch); - - dev_put(dst->master_netdev); -} - -static int dsa_remove(struct platform_device *pdev) -{ - struct dsa_switch_tree *dst = platform_get_drvdata(pdev); - - dsa_remove_dst(dst); - dsa_of_remove(&pdev->dev); - - return 0; -} - -static void dsa_shutdown(struct platform_device *pdev) -{ -} - static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; + struct sk_buff *nskb = NULL; if (unlikely(dst == NULL)) { kfree_skb(skb); return 0; } - return dst->rcv(skb, dev, pt, orig_dev); -} - -static struct packet_type dsa_pack_type __read_mostly = { - .type = cpu_to_be16(ETH_P_XDSA), - .func = dsa_switch_rcv, -}; - -#ifdef CONFIG_PM_SLEEP -static int dsa_suspend(struct device *d) -{ - struct platform_device *pdev = to_platform_device(d); - struct dsa_switch_tree *dst = platform_get_drvdata(pdev); - int i, ret = 0; - - for (i = 0; i < dst->pd->nr_chips; i++) { - struct dsa_switch *ds = dst->ds[i]; + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return 0; - if (ds != NULL) - ret = dsa_switch_suspend(ds); + nskb = dst->rcv(skb, dev, pt, orig_dev); + if (!nskb) { + kfree_skb(skb); + return 0; } - return ret; -} - -static int dsa_resume(struct device *d) -{ - struct platform_device *pdev = to_platform_device(d); - struct dsa_switch_tree *dst = platform_get_drvdata(pdev); - int i, ret = 0; + skb = nskb; + skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_type_trans(skb, skb->dev); - for (i = 0; i < dst->pd->nr_chips; i++) { - struct dsa_switch *ds = dst->ds[i]; + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; - if (ds != NULL) - ret = dsa_switch_resume(ds); - } + netif_receive_skb(skb); - return ret; + return 0; } -#endif - -static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); -static const struct of_device_id dsa_of_match_table[] = { - { .compatible = "marvell,dsa", }, - {} -}; -MODULE_DEVICE_TABLE(of, dsa_of_match_table); - -static struct platform_driver dsa_driver = { - .probe = dsa_probe, - .remove = dsa_remove, - .shutdown = dsa_shutdown, - .driver = { - .name = "dsa", - .of_match_table = dsa_of_match_table, - .pm = &dsa_pm_ops, - }, +static struct packet_type dsa_pack_type __read_mostly = { + .type = cpu_to_be16(ETH_P_XDSA), + .func = dsa_switch_rcv, }; static int __init dsa_init_module(void) @@ -971,7 +236,7 @@ static int __init dsa_init_module(void) if (rc) return rc; - rc = platform_driver_register(&dsa_driver); + rc = dsa_legacy_register(); if (rc) return rc; @@ -985,7 +250,7 @@ static void __exit dsa_cleanup_module(void) { dsa_slave_unregister_notifier(); dev_remove_pack(&dsa_pack_type); - platform_driver_unregister(&dsa_driver); + dsa_legacy_unregister(); } module_exit(dsa_cleanup_module); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 737be6470c7f27ba032d01667e039f3c03c17ae8..033b3bfb63dc1887b15b3e08f00a00f70b706ec4 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -13,16 +13,20 @@ #include #include #include +#include #include #include -#include #include #include +#include #include "dsa_priv.h" static LIST_HEAD(dsa_switch_trees); static DEFINE_MUTEX(dsa2_mutex); +static const struct devlink_ops dsa_devlink_ops = { +}; + static struct dsa_switch_tree *dsa_get_dst(u32 tree) { struct dsa_switch_tree *dst; @@ -222,12 +226,18 @@ static int dsa_dsa_port_apply(struct dsa_port *port, u32 index, return err; } - return 0; + memset(&ds->ports[index].devlink_port, 0, + sizeof(ds->ports[index].devlink_port)); + + return devlink_port_register(ds->devlink, + &ds->ports[index].devlink_port, + index); } static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { + devlink_port_unregister(&ds->ports[index].devlink_port); dsa_cpu_dsa_destroy(port); } @@ -245,12 +255,17 @@ static int dsa_cpu_port_apply(struct dsa_port *port, u32 index, ds->cpu_port_mask |= BIT(index); - return 0; + memset(&ds->ports[index].devlink_port, 0, + sizeof(ds->ports[index].devlink_port)); + err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port, + index); + return err; } static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { + devlink_port_unregister(&ds->ports[index].devlink_port); dsa_cpu_dsa_destroy(port); ds->cpu_port_mask &= ~BIT(index); @@ -275,12 +290,23 @@ static int dsa_user_port_apply(struct dsa_port *port, u32 index, return err; } + memset(&ds->ports[index].devlink_port, 0, + sizeof(ds->ports[index].devlink_port)); + err = devlink_port_register(ds->devlink, &ds->ports[index].devlink_port, + index); + if (err) + return err; + + devlink_port_type_eth_set(&ds->ports[index].devlink_port, + ds->ports[index].netdev); + return 0; } static void dsa_user_port_unapply(struct dsa_port *port, u32 index, struct dsa_switch *ds) { + devlink_port_unregister(&ds->ports[index].devlink_port); if (ds->ports[index].netdev) { dsa_slave_destroy(ds->ports[index].netdev); ds->ports[index].netdev = NULL; @@ -301,6 +327,17 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds) */ ds->phys_mii_mask = ds->enabled_port_mask; + /* Add the switch to devlink before calling setup, so that setup can + * add dpipe tables + */ + ds->devlink = devlink_alloc(&dsa_devlink_ops, 0); + if (!ds->devlink) + return -ENOMEM; + + err = devlink_register(ds->devlink, ds->dev); + if (err) + return err; + err = ds->ops->setup(ds); if (err < 0) return err; @@ -381,6 +418,13 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds) mdiobus_unregister(ds->slave_mii_bus); dsa_switch_unregister_notifier(ds); + + if (ds->devlink) { + devlink_unregister(ds->devlink); + devlink_free(ds->devlink); + ds->devlink = NULL; + } + } static int dsa_dst_apply(struct dsa_switch_tree *dst) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 0706a511244e92ff0174173eb388a41ff59141f4..f4a88e4852138864081c1871fc882803e53247db 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -17,8 +17,9 @@ struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); - int (*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev); }; struct dsa_slave_priv { @@ -54,6 +55,10 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds); void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds); +/* legacy.c */ +int dsa_legacy_register(void); +void dsa_legacy_unregister(void); + /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); @@ -85,4 +90,10 @@ extern const struct dsa_device_ops brcm_netdev_ops; /* tag_qca.c */ extern const struct dsa_device_ops qca_netdev_ops; +/* tag_mtk.c */ +extern const struct dsa_device_ops mtk_netdev_ops; + +/* tag_lan9303.c */ +extern const struct dsa_device_ops lan9303_netdev_ops; + #endif diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c new file mode 100644 index 0000000000000000000000000000000000000000..ad345c8b0b0693cc214b212ccbb402859d910134 --- /dev/null +++ b/net/dsa/legacy.c @@ -0,0 +1,818 @@ +/* + * net/dsa/legacy.c - Hardware switch handling + * Copyright (c) 2008-2009 Marvell Semiconductor + * Copyright (c) 2013 Florian Fainelli + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dsa_priv.h" + +/* switch driver registration ***********************************************/ +static DEFINE_MUTEX(dsa_switch_drivers_mutex); +static LIST_HEAD(dsa_switch_drivers); + +void register_switch_driver(struct dsa_switch_driver *drv) +{ + mutex_lock(&dsa_switch_drivers_mutex); + list_add_tail(&drv->list, &dsa_switch_drivers); + mutex_unlock(&dsa_switch_drivers_mutex); +} +EXPORT_SYMBOL_GPL(register_switch_driver); + +void unregister_switch_driver(struct dsa_switch_driver *drv) +{ + mutex_lock(&dsa_switch_drivers_mutex); + list_del_init(&drv->list); + mutex_unlock(&dsa_switch_drivers_mutex); +} +EXPORT_SYMBOL_GPL(unregister_switch_driver); + +static const struct dsa_switch_ops * +dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, + const char **_name, void **priv) +{ + const struct dsa_switch_ops *ret; + struct list_head *list; + const char *name; + + ret = NULL; + name = NULL; + + mutex_lock(&dsa_switch_drivers_mutex); + list_for_each(list, &dsa_switch_drivers) { + const struct dsa_switch_ops *ops; + struct dsa_switch_driver *drv; + + drv = list_entry(list, struct dsa_switch_driver, list); + ops = drv->ops; + + name = ops->probe(parent, host_dev, sw_addr, priv); + if (name != NULL) { + ret = ops; + break; + } + } + mutex_unlock(&dsa_switch_drivers_mutex); + + *_name = name; + + return ret; +} + +/* basic switch operations **************************************************/ +static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) +{ + struct dsa_port *dport; + int ret, port; + + for (port = 0; port < ds->num_ports; port++) { + if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) + continue; + + dport = &ds->ports[port]; + ret = dsa_cpu_dsa_setup(ds, dev, dport, port); + if (ret) + return ret; + } + return 0; +} + +static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) +{ + const struct dsa_switch_ops *ops = ds->ops; + struct dsa_switch_tree *dst = ds->dst; + struct dsa_chip_data *cd = ds->cd; + bool valid_name_found = false; + int index = ds->index; + int i, ret; + + /* + * Validate supplied switch configuration. + */ + for (i = 0; i < ds->num_ports; i++) { + char *name; + + name = cd->port_names[i]; + if (name == NULL) + continue; + + if (!strcmp(name, "cpu")) { + if (dst->cpu_switch) { + netdev_err(dst->master_netdev, + "multiple cpu ports?!\n"); + return -EINVAL; + } + dst->cpu_switch = ds; + dst->cpu_port = i; + ds->cpu_port_mask |= 1 << i; + } else if (!strcmp(name, "dsa")) { + ds->dsa_port_mask |= 1 << i; + } else { + ds->enabled_port_mask |= 1 << i; + } + valid_name_found = true; + } + + if (!valid_name_found && i == ds->num_ports) + return -EINVAL; + + /* Make the built-in MII bus mask match the number of ports, + * switch drivers can override this later + */ + ds->phys_mii_mask = ds->enabled_port_mask; + + /* + * If the CPU connects to this switch, set the switch tree + * tagging protocol to the preferred tagging format of this + * switch. + */ + if (dst->cpu_switch == ds) { + enum dsa_tag_protocol tag_protocol; + + tag_protocol = ops->get_tag_protocol(ds); + dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); + if (IS_ERR(dst->tag_ops)) + return PTR_ERR(dst->tag_ops); + + dst->rcv = dst->tag_ops->rcv; + } + + memcpy(ds->rtable, cd->rtable, sizeof(ds->rtable)); + + /* + * Do basic register setup. + */ + ret = ops->setup(ds); + if (ret < 0) + return ret; + + ret = dsa_switch_register_notifier(ds); + if (ret) + return ret; + + if (ops->set_addr) { + ret = ops->set_addr(ds, dst->master_netdev->dev_addr); + if (ret < 0) + return ret; + } + + if (!ds->slave_mii_bus && ops->phy_read) { + ds->slave_mii_bus = devm_mdiobus_alloc(parent); + if (!ds->slave_mii_bus) + return -ENOMEM; + dsa_slave_mii_bus_init(ds); + + ret = mdiobus_register(ds->slave_mii_bus); + if (ret < 0) + return ret; + } + + /* + * Create network devices for physical switch ports. + */ + for (i = 0; i < ds->num_ports; i++) { + ds->ports[i].dn = cd->port_dn[i]; + + if (!(ds->enabled_port_mask & (1 << i))) + continue; + + ret = dsa_slave_create(ds, parent, i, cd->port_names[i]); + if (ret < 0) + netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n", + index, i, cd->port_names[i], ret); + } + + /* Perform configuration of the CPU and DSA ports */ + ret = dsa_cpu_dsa_setups(ds, parent); + if (ret < 0) + netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n", + index); + + ret = dsa_cpu_port_ethtool_setup(ds); + if (ret) + return ret; + + return 0; +} + +static struct dsa_switch * +dsa_switch_setup(struct dsa_switch_tree *dst, int index, + struct device *parent, struct device *host_dev) +{ + struct dsa_chip_data *cd = dst->pd->chip + index; + const struct dsa_switch_ops *ops; + struct dsa_switch *ds; + int ret; + const char *name; + void *priv; + + /* + * Probe for switch model. + */ + ops = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv); + if (!ops) { + netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", + index); + return ERR_PTR(-EINVAL); + } + netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", + index, name); + + + /* + * Allocate and initialise switch state. + */ + ds = dsa_switch_alloc(parent, DSA_MAX_PORTS); + if (!ds) + return ERR_PTR(-ENOMEM); + + ds->dst = dst; + ds->index = index; + ds->cd = cd; + ds->ops = ops; + ds->priv = priv; + + ret = dsa_switch_setup_one(ds, parent); + if (ret) + return ERR_PTR(ret); + + return ds; +} + +static void dsa_switch_destroy(struct dsa_switch *ds) +{ + int port; + + /* Destroy network devices for physical switch ports. */ + for (port = 0; port < ds->num_ports; port++) { + if (!(ds->enabled_port_mask & (1 << port))) + continue; + + if (!ds->ports[port].netdev) + continue; + + dsa_slave_destroy(ds->ports[port].netdev); + } + + /* Disable configuration of the CPU and DSA ports */ + for (port = 0; port < ds->num_ports; port++) { + if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) + continue; + dsa_cpu_dsa_destroy(&ds->ports[port]); + + /* Clearing a bit which is not set does no harm */ + ds->cpu_port_mask |= ~(1 << port); + ds->dsa_port_mask |= ~(1 << port); + } + + if (ds->slave_mii_bus && ds->ops->phy_read) + mdiobus_unregister(ds->slave_mii_bus); + + dsa_switch_unregister_notifier(ds); +} + +#ifdef CONFIG_PM_SLEEP +int dsa_switch_suspend(struct dsa_switch *ds) +{ + int i, ret = 0; + + /* Suspend slave network devices */ + for (i = 0; i < ds->num_ports; i++) { + if (!dsa_is_port_initialized(ds, i)) + continue; + + ret = dsa_slave_suspend(ds->ports[i].netdev); + if (ret) + return ret; + } + + if (ds->ops->suspend) + ret = ds->ops->suspend(ds); + + return ret; +} +EXPORT_SYMBOL_GPL(dsa_switch_suspend); + +int dsa_switch_resume(struct dsa_switch *ds) +{ + int i, ret = 0; + + if (ds->ops->resume) + ret = ds->ops->resume(ds); + + if (ret) + return ret; + + /* Resume slave network devices */ + for (i = 0; i < ds->num_ports; i++) { + if (!dsa_is_port_initialized(ds, i)) + continue; + + ret = dsa_slave_resume(ds->ports[i].netdev); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dsa_switch_resume); +#endif + +/* platform driver init and cleanup *****************************************/ +static int dev_is_class(struct device *dev, void *class) +{ + if (dev->class != NULL && !strcmp(dev->class->name, class)) + return 1; + + return 0; +} + +static struct device *dev_find_class(struct device *parent, char *class) +{ + if (dev_is_class(parent, class)) { + get_device(parent); + return parent; + } + + return device_find_child(parent, class, dev_is_class); +} + +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) +{ + struct device *d; + + d = dev_find_class(dev, "mdio_bus"); + if (d != NULL) { + struct mii_bus *bus; + + bus = to_mii_bus(d); + put_device(d); + + return bus; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus); + +#ifdef CONFIG_OF +static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, + struct dsa_chip_data *cd, + int chip_index, int port_index, + struct device_node *link) +{ + const __be32 *reg; + int link_sw_addr; + struct device_node *parent_sw; + int len; + + parent_sw = of_get_parent(link); + if (!parent_sw) + return -EINVAL; + + reg = of_get_property(parent_sw, "reg", &len); + if (!reg || (len != sizeof(*reg) * 2)) + return -EINVAL; + + /* + * Get the destination switch number from the second field of its 'reg' + * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. + */ + link_sw_addr = be32_to_cpup(reg + 1); + + if (link_sw_addr >= pd->nr_chips) + return -EINVAL; + + cd->rtable[link_sw_addr] = port_index; + + return 0; +} + +static int dsa_of_probe_links(struct dsa_platform_data *pd, + struct dsa_chip_data *cd, + int chip_index, int port_index, + struct device_node *port, + const char *port_name) +{ + struct device_node *link; + int link_index; + int ret; + + for (link_index = 0;; link_index++) { + link = of_parse_phandle(port, "link", link_index); + if (!link) + break; + + if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) { + ret = dsa_of_setup_routing_table(pd, cd, chip_index, + port_index, link); + if (ret) + return ret; + } + } + return 0; +} + +static void dsa_of_free_platform_data(struct dsa_platform_data *pd) +{ + int i; + int port_index; + + for (i = 0; i < pd->nr_chips; i++) { + port_index = 0; + while (port_index < DSA_MAX_PORTS) { + kfree(pd->chip[i].port_names[port_index]); + port_index++; + } + + /* Drop our reference to the MDIO bus device */ + if (pd->chip[i].host_dev) + put_device(pd->chip[i].host_dev); + } + kfree(pd->chip); +} + +static int dsa_of_probe(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct device_node *child, *mdio, *ethernet, *port; + struct mii_bus *mdio_bus, *mdio_bus_switch; + struct net_device *ethernet_dev; + struct dsa_platform_data *pd; + struct dsa_chip_data *cd; + const char *port_name; + int chip_index, port_index; + const unsigned int *sw_addr, *port_reg; + u32 eeprom_len; + int ret; + + mdio = of_parse_phandle(np, "dsa,mii-bus", 0); + if (!mdio) + return -EINVAL; + + mdio_bus = of_mdio_find_bus(mdio); + if (!mdio_bus) + return -EPROBE_DEFER; + + ethernet = of_parse_phandle(np, "dsa,ethernet", 0); + if (!ethernet) { + ret = -EINVAL; + goto out_put_mdio; + } + + ethernet_dev = of_find_net_device_by_node(ethernet); + if (!ethernet_dev) { + ret = -EPROBE_DEFER; + goto out_put_mdio; + } + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ret = -ENOMEM; + goto out_put_ethernet; + } + + dev->platform_data = pd; + pd->of_netdev = ethernet_dev; + pd->nr_chips = of_get_available_child_count(np); + if (pd->nr_chips > DSA_MAX_SWITCHES) + pd->nr_chips = DSA_MAX_SWITCHES; + + pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data), + GFP_KERNEL); + if (!pd->chip) { + ret = -ENOMEM; + goto out_free; + } + + chip_index = -1; + for_each_available_child_of_node(np, child) { + int i; + + chip_index++; + cd = &pd->chip[chip_index]; + + cd->of_node = child; + + /* Initialize the routing table */ + for (i = 0; i < DSA_MAX_SWITCHES; ++i) + cd->rtable[i] = DSA_RTABLE_NONE; + + /* When assigning the host device, increment its refcount */ + cd->host_dev = get_device(&mdio_bus->dev); + + sw_addr = of_get_property(child, "reg", NULL); + if (!sw_addr) + continue; + + cd->sw_addr = be32_to_cpup(sw_addr); + if (cd->sw_addr >= PHY_MAX_ADDR) + continue; + + if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) + cd->eeprom_len = eeprom_len; + + mdio = of_parse_phandle(child, "mii-bus", 0); + if (mdio) { + mdio_bus_switch = of_mdio_find_bus(mdio); + if (!mdio_bus_switch) { + ret = -EPROBE_DEFER; + goto out_free_chip; + } + + /* Drop the mdio_bus device ref, replacing the host + * device with the mdio_bus_switch device, keeping + * the refcount from of_mdio_find_bus() above. + */ + put_device(cd->host_dev); + cd->host_dev = &mdio_bus_switch->dev; + } + + for_each_available_child_of_node(child, port) { + port_reg = of_get_property(port, "reg", NULL); + if (!port_reg) + continue; + + port_index = be32_to_cpup(port_reg); + if (port_index >= DSA_MAX_PORTS) + break; + + port_name = of_get_property(port, "label", NULL); + if (!port_name) + continue; + + cd->port_dn[port_index] = port; + + cd->port_names[port_index] = kstrdup(port_name, + GFP_KERNEL); + if (!cd->port_names[port_index]) { + ret = -ENOMEM; + goto out_free_chip; + } + + ret = dsa_of_probe_links(pd, cd, chip_index, + port_index, port, port_name); + if (ret) + goto out_free_chip; + + } + } + + /* The individual chips hold their own refcount on the mdio bus, + * so drop ours */ + put_device(&mdio_bus->dev); + + return 0; + +out_free_chip: + dsa_of_free_platform_data(pd); +out_free: + kfree(pd); + dev->platform_data = NULL; +out_put_ethernet: + put_device(ðernet_dev->dev); +out_put_mdio: + put_device(&mdio_bus->dev); + return ret; +} + +static void dsa_of_remove(struct device *dev) +{ + struct dsa_platform_data *pd = dev->platform_data; + + if (!dev->of_node) + return; + + dsa_of_free_platform_data(pd); + put_device(&pd->of_netdev->dev); + kfree(pd); +} +#else +static inline int dsa_of_probe(struct device *dev) +{ + return 0; +} + +static inline void dsa_of_remove(struct device *dev) +{ +} +#endif + +static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, + struct device *parent, struct dsa_platform_data *pd) +{ + int i; + unsigned configured = 0; + + dst->pd = pd; + dst->master_netdev = dev; + dst->cpu_port = -1; + + for (i = 0; i < pd->nr_chips; i++) { + struct dsa_switch *ds; + + ds = dsa_switch_setup(dst, i, parent, pd->chip[i].host_dev); + if (IS_ERR(ds)) { + netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", + i, PTR_ERR(ds)); + continue; + } + + dst->ds[i] = ds; + + ++configured; + } + + /* + * If no switch was found, exit cleanly + */ + if (!configured) + return -EPROBE_DEFER; + + /* + * If we use a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point on get + * sent to the tag format's receive function. + */ + wmb(); + dev->dsa_ptr = (void *)dst; + + return 0; +} + +static int dsa_probe(struct platform_device *pdev) +{ + struct dsa_platform_data *pd = pdev->dev.platform_data; + struct net_device *dev; + struct dsa_switch_tree *dst; + int ret; + + if (pdev->dev.of_node) { + ret = dsa_of_probe(&pdev->dev); + if (ret) + return ret; + + pd = pdev->dev.platform_data; + } + + if (pd == NULL || (pd->netdev == NULL && pd->of_netdev == NULL)) + return -EINVAL; + + if (pd->of_netdev) { + dev = pd->of_netdev; + dev_hold(dev); + } else { + dev = dsa_dev_to_net_device(pd->netdev); + } + if (dev == NULL) { + ret = -EPROBE_DEFER; + goto out; + } + + if (dev->dsa_ptr != NULL) { + dev_put(dev); + ret = -EEXIST; + goto out; + } + + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); + if (dst == NULL) { + dev_put(dev); + ret = -ENOMEM; + goto out; + } + + platform_set_drvdata(pdev, dst); + + ret = dsa_setup_dst(dst, dev, &pdev->dev, pd); + if (ret) { + dev_put(dev); + goto out; + } + + return 0; + +out: + dsa_of_remove(&pdev->dev); + + return ret; +} + +static void dsa_remove_dst(struct dsa_switch_tree *dst) +{ + int i; + + dst->master_netdev->dsa_ptr = NULL; + + /* If we used a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point get sent + * without the tag and go through the regular receive path. + */ + wmb(); + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds) + dsa_switch_destroy(ds); + } + + dsa_cpu_port_ethtool_restore(dst->cpu_switch); + + dev_put(dst->master_netdev); +} + +static int dsa_remove(struct platform_device *pdev) +{ + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + + dsa_remove_dst(dst); + dsa_of_remove(&pdev->dev); + + return 0; +} + +static void dsa_shutdown(struct platform_device *pdev) +{ +} + +#ifdef CONFIG_PM_SLEEP +static int dsa_suspend(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_suspend(ds); + } + + return ret; +} + +static int dsa_resume(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_resume(ds); + } + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); + +static const struct of_device_id dsa_of_match_table[] = { + { .compatible = "marvell,dsa", }, + {} +}; +MODULE_DEVICE_TABLE(of, dsa_of_match_table); + +static struct platform_driver dsa_driver = { + .probe = dsa_probe, + .remove = dsa_remove, + .shutdown = dsa_shutdown, + .driver = { + .name = "dsa", + .of_match_table = dsa_of_match_table, + .pm = &dsa_pm_ops, + }, +}; + +int dsa_legacy_register(void) +{ + return platform_driver_register(&dsa_driver); +} + +void dsa_legacy_unregister(void) +{ + platform_driver_unregister(&dsa_driver); +} diff --git a/net/dsa/slave.c b/net/dsa/slave.c index c34872e1febc4b75d1b69b18a8a1189405ca30fa..7693182df81e61d14540cd43be3ae7e134eef576 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -419,8 +420,8 @@ static int dsa_slave_vlan_filtering(struct net_device *dev, return 0; } -static int dsa_fastest_ageing_time(struct dsa_switch *ds, - unsigned int ageing_time) +static unsigned int dsa_fastest_ageing_time(struct dsa_switch *ds, + unsigned int ageing_time) { int i; @@ -443,9 +444,13 @@ static int dsa_slave_ageing_time(struct net_device *dev, unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time); unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies); - /* bridge skips -EOPNOTSUPP, so skip the prepare phase */ - if (switchdev_trans_ph_prepare(trans)) + if (switchdev_trans_ph_prepare(trans)) { + if (ds->ageing_time_min && ageing_time < ds->ageing_time_min) + return -ERANGE; + if (ds->ageing_time_max && ageing_time > ds->ageing_time_max) + return -ERANGE; return 0; + } /* Keep the fastest ageing time in case of multiple bridges */ p->dp->ageing_time = ageing_time; diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 6456dacf9ae9e0b88585defc026179423a80e4e4..ca6e26e514f089cfa8991ef6a7bd790bd8db14fe 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -1,7 +1,8 @@ /* * Handling of a single switch chip, part of a switch fabric * - * Copyright (c) 2017 Vivien Didelot + * Copyright (c) 2017 Savoir-faire Linux Inc. + * Vivien Didelot * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -19,9 +20,9 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, if (ds->index == info->sw_index && ds->ops->port_bridge_join) return ds->ops->port_bridge_join(ds, info->port, info->br); - if (ds->index != info->sw_index) - dev_dbg(ds->dev, "crosschip DSA port %d.%d bridged to %s\n", - info->sw_index, info->port, netdev_name(info->br)); + if (ds->index != info->sw_index && ds->ops->crosschip_bridge_join) + return ds->ops->crosschip_bridge_join(ds, info->sw_index, + info->port, info->br); return 0; } @@ -32,9 +33,9 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, if (ds->index == info->sw_index && ds->ops->port_bridge_leave) ds->ops->port_bridge_leave(ds, info->port, info->br); - if (ds->index != info->sw_index) - dev_dbg(ds->dev, "crosschip DSA port %d.%d unbridged from %s\n", - info->sw_index, info->port, netdev_name(info->br)); + if (ds->index != info->sw_index && ds->ops->crosschip_bridge_leave) + ds->ops->crosschip_bridge_leave(ds, info->sw_index, info->port, + info->br); return 0; } diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 5d925b6b2bb14f78f84a06b84b4fa19bd6846e82..2a9b52c5af86b5308d7d71a3340a0e48768bdb60 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "dsa_priv.h" /* This tag length is 4 bytes, older ones were 6 bytes, we do not @@ -91,23 +92,17 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev return NULL; } -static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; int source_port; u8 *brcm_tag; - if (unlikely(dst == NULL)) - goto out_drop; - ds = dst->cpu_switch; - skb = skb_unshare(skb, GFP_ATOMIC); - if (skb == NULL) - goto out; - if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN))) goto out_drop; @@ -139,22 +134,12 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, skb->data - ETH_HLEN - BRCM_TAG_LEN, 2 * ETH_ALEN); - skb_push(skb, ETH_HLEN); - skb->pkt_type = PACKET_HOST; skb->dev = ds->ports[source_port].netdev; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - - return 0; + return skb; out_drop: - kfree_skb(skb); -out: - return 0; + return NULL; } const struct dsa_device_ops brcm_netdev_ops = { diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 72579ceea381b7e2bce99a28208810b707434f09..1c6633f0de01909f950a03349b1b4c08c2151839 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "dsa_priv.h" #define DSA_HLEN 4 @@ -67,8 +68,9 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev) return NULL; } -static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; @@ -76,13 +78,6 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, int source_device; int source_port; - if (unlikely(dst == NULL)) - goto out_drop; - - skb = skb_unshare(skb, GFP_ATOMIC); - if (skb == NULL) - goto out; - if (unlikely(!pskb_may_pull(skb, DSA_HLEN))) goto out_drop; @@ -164,21 +159,11 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, } skb->dev = ds->ports[source_port].netdev; - skb_push(skb, ETH_HLEN); - skb->pkt_type = PACKET_HOST; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - - return 0; + return skb; out_drop: - kfree_skb(skb); -out: - return 0; + return NULL; } const struct dsa_device_ops dsa_netdev_ops = { diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 648c051817a1b4a4e64cda67bab8c81288027a4e..d9c668aa5e54682914a0e61b3cb6373a71a0ee8b 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "dsa_priv.h" #define DSA_HLEN 4 @@ -80,8 +81,9 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) return NULL; } -static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; @@ -89,13 +91,6 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, int source_device; int source_port; - if (unlikely(dst == NULL)) - goto out_drop; - - skb = skb_unshare(skb, GFP_ATOMIC); - if (skb == NULL) - goto out; - if (unlikely(!pskb_may_pull(skb, EDSA_HLEN))) goto out_drop; @@ -183,21 +178,11 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, } skb->dev = ds->ports[source_port].netdev; - skb_push(skb, ETH_HLEN); - skb->pkt_type = PACKET_HOST; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - - return 0; + return skb; out_drop: - kfree_skb(skb); -out: - return 0; + return NULL; } const struct dsa_device_ops edsa_netdev_ops = { diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c new file mode 100644 index 0000000000000000000000000000000000000000..70130ed5c21a9fa5e218e51ab4448c35c9005eba --- /dev/null +++ b/net/dsa/tag_lan9303.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2017 Pengutronix, Juergen Borleis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include "dsa_priv.h" + +/* To define the outgoing port and to discover the incoming port a regular + * VLAN tag is used by the LAN9303. But its VID meaning is 'special': + * + * Dest MAC Src MAC TAG Type + * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 | 1 2 |... + * |<------->| + * TAG: + * |<------------->| + * | 1 2 | 3 4 | + * TPID VID + * 0x8100 + * + * VID bit 3 indicates a request for an ALR lookup. + * + * If VID bit 3 is zero, then bits 0 and 1 specify the destination port + * (0, 1, 2) or broadcast (3) or the source port (1, 2). + * + * VID bit 4 is used to specify if the STP port state should be overridden. + * Required when no forwarding between the external ports should happen. + */ + +#define LAN9303_TAG_LEN 4 +#define LAN9303_MAX_PORTS 3 + +static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + u16 *lan9303_tag; + + /* insert a special VLAN tag between the MAC addresses + * and the current ethertype field. + */ + if (skb_cow_head(skb, LAN9303_TAG_LEN) < 0) { + dev_dbg(&dev->dev, + "Cannot make room for the special tag. Dropping packet\n"); + goto out_free; + } + + /* provide 'LAN9303_TAG_LEN' bytes additional space */ + skb_push(skb, LAN9303_TAG_LEN); + + /* make room between MACs and Ether-Type */ + memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN); + + lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN); + lan9303_tag[0] = htons(ETH_P_8021Q); + lan9303_tag[1] = htons(p->dp->index | BIT(4)); + + return skb; +out_free: + kfree_skb(skb); + return NULL; +} + +static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + u16 *lan9303_tag; + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds; + unsigned int source_port; + + ds = dst->ds[0]; + + if (unlikely(!ds)) { + dev_warn_ratelimited(&dev->dev, "Dropping packet, due to missing DSA switch device\n"); + return NULL; + } + + if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) { + dev_warn_ratelimited(&dev->dev, + "Dropping packet, cannot pull\n"); + return NULL; + } + + /* '->data' points into the middle of our special VLAN tag information: + * + * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type + * ^ + * ->data + */ + lan9303_tag = (u16 *)(skb->data - 2); + + if (lan9303_tag[0] != htons(ETH_P_8021Q)) { + dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n"); + return NULL; + } + + source_port = ntohs(lan9303_tag[1]) & 0x3; + + if (source_port >= LAN9303_MAX_PORTS) { + dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); + return NULL; + } + + if (!ds->ports[source_port].netdev) { + dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid netdev or device\n"); + return NULL; + } + + /* remove the special VLAN tag between the MAC addresses + * and the current ethertype field. + */ + skb_pull_rcsum(skb, 2 + 2); + memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN), + 2 * ETH_ALEN); + + /* forward the packet to the dedicated interface */ + skb->dev = ds->ports[source_port].netdev; + + return skb; +} + +const struct dsa_device_ops lan9303_netdev_ops = { + .xmit = lan9303_xmit, + .rcv = lan9303_rcv, +}; diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c new file mode 100644 index 0000000000000000000000000000000000000000..837cdddb53f093c6283451f7626612312008bd0d --- /dev/null +++ b/net/dsa/tag_mtk.c @@ -0,0 +1,100 @@ +/* + * Mediatek DSA Tag support + * Copyright (C) 2017 Landen Chao + * Sean Wang + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "dsa_priv.h" + +#define MTK_HDR_LEN 4 +#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) +#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) + +static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + u8 *mtk_tag; + + if (skb_cow_head(skb, MTK_HDR_LEN) < 0) + goto out_free; + + skb_push(skb, MTK_HDR_LEN); + + memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN); + + /* Build the tag after the MAC Source Address */ + mtk_tag = skb->data + 2 * ETH_ALEN; + mtk_tag[0] = 0; + mtk_tag[1] = (1 << p->dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; + mtk_tag[2] = 0; + mtk_tag[3] = 0; + + return skb; + +out_free: + kfree_skb(skb); + return NULL; +} + +static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds; + int port; + __be16 *phdr, hdr; + + if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) + goto out_drop; + + /* The MTK header is added by the switch between src addr + * and ethertype at this point, skb->data points to 2 bytes + * after src addr so header should be 2 bytes right before. + */ + phdr = (__be16 *)(skb->data - 2); + hdr = ntohs(*phdr); + + /* Remove MTK tag and recalculate checksum. */ + skb_pull_rcsum(skb, MTK_HDR_LEN); + + memmove(skb->data - ETH_HLEN, + skb->data - ETH_HLEN - MTK_HDR_LEN, + 2 * ETH_ALEN); + + /* This protocol doesn't support cascading multiple + * switches so it's safe to assume the switch is first + * in the tree. + */ + ds = dst->ds[0]; + if (!ds) + goto out_drop; + + /* Get source port information */ + port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK); + if (!ds->ports[port].netdev) + goto out_drop; + + skb->dev = ds->ports[port].netdev; + + return skb; + +out_drop: + return NULL; +} + +const struct dsa_device_ops mtk_netdev_ops = { + .xmit = mtk_tag_xmit, + .rcv = mtk_tag_rcv, +}; diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 30240f343aea8450b13936159b4b9a76126a8977..3ba3f59f7a3433b3731a5b3dc501eb22660d7cce 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -12,6 +12,7 @@ */ #include +#include #include "dsa_priv.h" #define QCA_HDR_LEN 2 @@ -65,8 +66,9 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) return NULL; } -static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; @@ -74,13 +76,6 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, int port; __be16 *phdr, hdr; - if (unlikely(!dst)) - goto out_drop; - - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - goto out; - if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) goto out_drop; @@ -114,22 +109,12 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, goto out_drop; /* Update skb & forward the frame accordingly */ - skb_push(skb, ETH_HLEN); - skb->pkt_type = PACKET_HOST; skb->dev = ds->ports[port].netdev; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - - return 0; + return skb; out_drop: - kfree_skb(skb); -out: - return 0; + return NULL; } const struct dsa_device_ops qca_netdev_ops = { diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 26f977176978085af9c034319c754a1ac7501d4c..aafc2fc74c3067dd05c67c409e40e8ceef33cf0c 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "dsa_priv.h" static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) @@ -57,22 +58,17 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) return nskb; } -static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) +static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; u8 *trailer; int source_port; - if (unlikely(dst == NULL)) - goto out_drop; ds = dst->cpu_switch; - skb = skb_unshare(skb, GFP_ATOMIC); - if (skb == NULL) - goto out; - if (skb_linearize(skb)) goto out_drop; @@ -88,21 +84,11 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, pskb_trim_rcsum(skb, skb->len - 4); skb->dev = ds->ports[source_port].netdev; - skb_push(skb, ETH_HLEN); - skb->pkt_type = PACKET_HOST; - skb->protocol = eth_type_trans(skb, skb->dev); - - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - - return 0; + return skb; out_drop: - kfree_skb(skb); -out: - return 0; + return NULL; } const struct dsa_device_ops trailer_netdev_ops = { diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 1ab30e7d3f99e19c5e54fd9747cfce7a5c1f559b..81dac16933fc05a19224a7c9ef450d34dea1be91 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -350,7 +350,7 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) return 0; invalid: - netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL); + netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; nla_put_failure: @@ -432,7 +432,7 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) return 0; invalid: - netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL); + netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; nla_put_failure: diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index fc60cd061f3966a2381803a8e4d85206770253dc..99f6c254ea777c8786421d069f8f2414d0eb34e5 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -249,8 +249,7 @@ nl802154_prepare_wpan_dev_dump(struct sk_buff *skb, if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, genl_family_attrbuf(&nl802154_fam), - nl802154_fam.maxattr, - nl802154_policy); + nl802154_fam.maxattr, nl802154_policy, NULL); if (err) goto out_unlock; @@ -562,8 +561,8 @@ static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb, struct nl802154_dump_wpan_phy_state *state) { struct nlattr **tb = genl_family_attrbuf(&nl802154_fam); - int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, - tb, nl802154_fam.maxattr, nl802154_policy); + int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, tb, + nl802154_fam.maxattr, nl802154_policy, NULL); /* TODO check if we can handle error here, * we have no backward compatibility @@ -1308,7 +1307,7 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla, struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1]; if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, - nl802154_dev_addr_policy)) + nl802154_dev_addr_policy, NULL)) return -EINVAL; if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || @@ -1348,7 +1347,7 @@ ieee802154_llsec_parse_key_id(struct nlattr *nla, struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1]; if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla, - nl802154_key_id_policy)) + nl802154_key_id_policy, NULL)) return -EINVAL; if (!attrs[NL802154_KEY_ID_ATTR_MODE]) @@ -1565,7 +1564,7 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], - nl802154_key_policy)) + nl802154_key_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] || @@ -1615,7 +1614,7 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], - nl802154_key_policy)) + nl802154_key_policy, info->extack)) return -EINVAL; if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) @@ -1729,8 +1728,8 @@ ieee802154_llsec_parse_device(struct nlattr *nla, { struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; - if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla, - nl802154_dev_policy)) + if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, + nla, nl802154_dev_policy, NULL)) return -EINVAL; memset(dev, 0, sizeof(*dev)); @@ -1783,7 +1782,7 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], - nl802154_dev_policy)) + nl802154_dev_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]) @@ -1911,7 +1910,7 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], - nl802154_devkey_policy) < 0) + nl802154_devkey_policy, info->extack) < 0) return -EINVAL; if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] || @@ -1943,7 +1942,7 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], - nl802154_devkey_policy)) + nl802154_devkey_policy, info->extack)) return -EINVAL; if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) @@ -2063,8 +2062,8 @@ llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl) { struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1]; - if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla, - nl802154_seclevel_policy)) + if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, + nla, nl802154_seclevel_policy, NULL)) return -EINVAL; memset(sl, 0, sizeof(*sl)); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index c6d4238ff94a8a329bf44bf59b30fb09fb07f707..f83de23a30e7e77303d011dee35eb92342adab5c 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \ tcp_rate.o tcp_recovery.o \ tcp_offload.o datagram.o raw.o udp.o udplite.o \ udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ - fib_frontend.o fib_semantics.o fib_trie.o \ + fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \ inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 13a9a3297eae3ac48a77214e9365657202d44f08..f3dad16613437c0c7ac3e9c7518a0929cddb3ca7 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1602,8 +1602,9 @@ static const struct net_protocol igmp_protocol = { }; #endif -static const struct net_protocol tcp_protocol = { +static struct net_protocol tcp_protocol = { .early_demux = tcp_v4_early_demux, + .early_demux_handler = tcp_v4_early_demux, .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, @@ -1611,8 +1612,9 @@ static const struct net_protocol tcp_protocol = { .icmp_strict_tag_validation = 1, }; -static const struct net_protocol udp_protocol = { +static struct net_protocol udp_protocol = { .early_demux = udp_v4_early_demux, + .early_demux_handler = udp_v4_early_demux, .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, @@ -1723,6 +1725,8 @@ static __net_init int inet_init_net(struct net *net) net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; net->ipv4.sysctl_ip_dynaddr = 0; net->ipv4.sysctl_ip_early_demux = 1; + net->ipv4.sysctl_udp_early_demux = 1; + net->ipv4.sysctl_tcp_early_demux = 1; #ifdef CONFIG_SYSCTL net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; #endif diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 51b27ae09fbd725bcd8030982e5850215ac4ce5c..0937b34c27cacb2dec73a67a76ff11fe26722500 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -872,7 +872,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) skb->pkt_type != PACKET_HOST) state = NUD_STALE; neigh_update(n, sha, state, - override ? NEIGH_UPDATE_F_OVERRIDE : 0); + override ? NEIGH_UPDATE_F_OVERRIDE : 0, 0); neigh_release(n); } @@ -1033,7 +1033,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, err = neigh_update(neigh, (r->arp_flags & ATF_COM) ? r->arp_ha.sa_data : NULL, state, NEIGH_UPDATE_F_OVERRIDE | - NEIGH_UPDATE_F_ADMIN); + NEIGH_UPDATE_F_ADMIN, 0); neigh_release(neigh); } return err; @@ -1084,7 +1084,7 @@ static int arp_invalidate(struct net_device *dev, __be32 ip) if (neigh->nud_state & ~NUD_NOARP) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE| - NEIGH_UPDATE_F_ADMIN); + NEIGH_UPDATE_F_ADMIN, 0); neigh_release(neigh); } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index cebedd545e5e2863afcfe116309725e2cd57206c..df14815a3b8ce74aeb613458ffaf5f6eee4a263d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -571,7 +571,8 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) return ret; } -static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; @@ -582,7 +583,8 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, + extack); if (err < 0) goto errout; @@ -752,7 +754,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, struct in_device *in_dev; int err; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, + NULL); if (err < 0) goto errout; @@ -843,7 +846,8 @@ static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa) return NULL; } -static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct in_ifaddr *ifa; @@ -1192,6 +1196,18 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len) return done; } +static __be32 in_dev_select_addr(const struct in_device *in_dev, + int scope) +{ + for_primary_ifa(in_dev) { + if (ifa->ifa_scope != RT_SCOPE_LINK && + ifa->ifa_scope <= scope) + return ifa->ifa_local; + } endfor_ifa(in_dev); + + return 0; +} + __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) { __be32 addr = 0; @@ -1228,13 +1244,9 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) if (master_idx && (dev = dev_get_by_index_rcu(net, master_idx)) && (in_dev = __in_dev_get_rcu(dev))) { - for_primary_ifa(in_dev) { - if (ifa->ifa_scope != RT_SCOPE_LINK && - ifa->ifa_scope <= scope) { - addr = ifa->ifa_local; - goto out_unlock; - } - } endfor_ifa(in_dev); + addr = in_dev_select_addr(in_dev, scope); + if (addr) + goto out_unlock; } /* Not loopback addresses on loopback should be preferred @@ -1249,13 +1261,9 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) if (!in_dev) continue; - for_primary_ifa(in_dev) { - if (ifa->ifa_scope != RT_SCOPE_LINK && - ifa->ifa_scope <= scope) { - addr = ifa->ifa_local; - goto out_unlock; - } - } endfor_ifa(in_dev); + addr = in_dev_select_addr(in_dev, scope); + if (addr) + goto out_unlock; } out_unlock: rcu_read_unlock(); @@ -1713,7 +1721,7 @@ static int inet_validate_link_af(const struct net_device *dev, if (dev && !__in_dev_get_rtnl(dev)) return -EAFNOSUPPORT; - err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy); + err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy, NULL); if (err < 0) return err; @@ -1741,7 +1749,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla) if (!in_dev) return -EAFNOSUPPORT; - if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0) + if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0) BUG(); if (tb[IFLA_INET_CONF]) { @@ -1798,6 +1806,9 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) goto nla_put_failure; + if (!devconf) + goto out; + if ((all || type == NETCONFA_FORWARDING) && nla_put_s32(skb, NETCONFA_FORWARDING, IPV4_DEVCONF(*devconf, FORWARDING)) < 0) @@ -1819,6 +1830,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0) goto nla_put_failure; +out: nlmsg_end(skb, nlh); return 0; @@ -1827,8 +1839,8 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, return -EMSGSIZE; } -void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, - struct ipv4_devconf *devconf) +void inet_netconf_notify_devconf(struct net *net, int event, int type, + int ifindex, struct ipv4_devconf *devconf) { struct sk_buff *skb; int err = -ENOBUFS; @@ -1838,7 +1850,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, goto errout; err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, - RTM_NEWNETCONF, 0, type); + event, 0, type); if (err < 0) { /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); @@ -1861,7 +1873,8 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = { }; static int inet_netconf_get_devconf(struct sk_buff *in_skb, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX+1]; @@ -1874,7 +1887,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, int err; err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, - devconf_ipv4_policy); + devconf_ipv4_policy, extack); if (err < 0) goto errout; @@ -2017,10 +2030,12 @@ static void inet_forward_change(struct net *net) IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; IPV4_DEVCONF_DFLT(net, FORWARDING) = on; - inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all); - inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt); @@ -2033,7 +2048,8 @@ static void inet_forward_change(struct net *net) in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IN_DEV_CONF_SET(in_dev, FORWARDING, on); - inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, dev->ifindex, &in_dev->cnf); } } @@ -2078,19 +2094,22 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write, if (i == IPV4_DEVCONF_RP_FILTER - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); - inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_RP_FILTER, ifindex, cnf); } if (i == IPV4_DEVCONF_PROXY_ARP - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); - inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_PROXY_NEIGH, ifindex, cnf); } if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); - inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, ifindex, cnf); } } @@ -2125,7 +2144,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write, container_of(cnf, struct in_device, cnf); if (*valp) dev_disable_lro(idev->dev); - inet_netconf_notify_devconf(net, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, idev->dev->ifindex, cnf); @@ -2133,7 +2152,8 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write, rtnl_unlock(); rt_cache_flush(net); } else - inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt); } @@ -2255,7 +2275,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, p->sysctl = t; - inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p); + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, + ifindex, p); return 0; free: @@ -2264,16 +2285,18 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, return -ENOBUFS; } -static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) +static void __devinet_sysctl_unregister(struct net *net, + struct ipv4_devconf *cnf, int ifindex) { struct devinet_sysctl_table *t = cnf->sysctl; - if (!t) - return; + if (t) { + cnf->sysctl = NULL; + unregister_net_sysctl_table(t->sysctl_header); + kfree(t); + } - cnf->sysctl = NULL; - unregister_net_sysctl_table(t->sysctl_header); - kfree(t); + inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL); } static int devinet_sysctl_register(struct in_device *idev) @@ -2295,7 +2318,9 @@ static int devinet_sysctl_register(struct in_device *idev) static void devinet_sysctl_unregister(struct in_device *idev) { - __devinet_sysctl_unregister(&idev->cnf); + struct net *net = dev_net(idev->dev); + + __devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex); neigh_sysctl_unregister(idev->arp_parms); } @@ -2370,9 +2395,9 @@ static __net_init int devinet_init_net(struct net *net) #ifdef CONFIG_SYSCTL err_reg_ctl: - __devinet_sysctl_unregister(dflt); + __devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT); err_reg_dflt: - __devinet_sysctl_unregister(all); + __devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); err_reg_all: if (tbl != ctl_forward_entry) kfree(tbl); @@ -2394,8 +2419,10 @@ static __net_exit void devinet_exit_net(struct net *net) tbl = net->ipv4.forw_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.forw_hdr); - __devinet_sysctl_unregister(net->ipv4.devconf_dflt); - __devinet_sysctl_unregister(net->ipv4.devconf_all); + __devinet_sysctl_unregister(net, net->ipv4.devconf_dflt, + NETCONFA_IFINDEX_DEFAULT); + __devinet_sysctl_unregister(net, net->ipv4.devconf_all, + NETCONFA_IFINDEX_ALL); kfree(tbl); #endif kfree(net->ipv4.devconf_dflt); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b1e24446e2972444835dd85c02434f3b089ba552..65cc02bd82bc87991a29135a58c059c7916a5a35 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -152,21 +152,28 @@ static void esp_output_restore_header(struct sk_buff *skb) } static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, + struct xfrm_state *x, struct ip_esp_hdr *esph, struct esp_output_extra *extra) { - struct xfrm_state *x = skb_dst(skb)->xfrm; - /* For ESN we move the header forward by 4 bytes to * accomodate the high bits. We will move it back after * encryption. */ if ((x->props.flags & XFRM_STATE_ESN)) { + __u32 seqhi; + struct xfrm_offload *xo = xfrm_offload(skb); + + if (xo) + seqhi = xo->seq.hi; + else + seqhi = XFRM_SKB_CB(skb)->seq.output.hi; + extra->esphoff = (unsigned char *)esph - skb_transport_header(skb); esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); extra->seqhi = esph->spi; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); + esph->seq_no = htonl(seqhi); } esph->spi = x->id.spi; @@ -198,98 +205,56 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) tail[plen - 1] = proto; } -static int esp_output(struct xfrm_state *x, struct sk_buff *skb) +static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { - struct esp_output_extra *extra; - int err = -ENOMEM; - struct ip_esp_hdr *esph; - struct crypto_aead *aead; - struct aead_request *req; - struct scatterlist *sg, *dsg; - struct sk_buff *trailer; - struct page *page; - void *tmp; - u8 *iv; - u8 *tail; - u8 *vaddr; - int blksize; - int clen; - int alen; - int plen; - int ivlen; - int tfclen; - int nfrags; - int assoclen; - int extralen; - int tailen; - __be64 seqno; - __u8 proto = *skb_mac_header(skb); - - /* skb is pure payload to encrypt */ - - aead = x->data; - alen = crypto_aead_authsize(aead); - ivlen = crypto_aead_ivsize(aead); - - tfclen = 0; - if (x->tfcpad) { - struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); - u32 padto; - - padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); - if (skb->len < padto) - tfclen = padto - skb->len; + int encap_type; + struct udphdr *uh; + __be32 *udpdata32; + __be16 sport, dport; + struct xfrm_encap_tmpl *encap = x->encap; + struct ip_esp_hdr *esph = esp->esph; + + spin_lock_bh(&x->lock); + sport = encap->encap_sport; + dport = encap->encap_dport; + encap_type = encap->encap_type; + spin_unlock_bh(&x->lock); + + uh = (struct udphdr *)esph; + uh->source = sport; + uh->dest = dport; + uh->len = htons(skb->len + esp->tailen + - skb_transport_offset(skb)); + uh->check = 0; + + switch (encap_type) { + default: + case UDP_ENCAP_ESPINUDP: + esph = (struct ip_esp_hdr *)(uh + 1); + break; + case UDP_ENCAP_ESPINUDP_NON_IKE: + udpdata32 = (__be32 *)(uh + 1); + udpdata32[0] = udpdata32[1] = 0; + esph = (struct ip_esp_hdr *)(udpdata32 + 2); + break; } - blksize = ALIGN(crypto_aead_blocksize(aead), 4); - clen = ALIGN(skb->len + 2 + tfclen, blksize); - plen = clen - skb->len - tfclen; - tailen = tfclen + plen + alen; - assoclen = sizeof(*esph); - extralen = 0; - if (x->props.flags & XFRM_STATE_ESN) { - extralen += sizeof(*extra); - assoclen += sizeof(__be32); - } + *skb_mac_header(skb) = IPPROTO_UDP; + esp->esph = esph; +} - *skb_mac_header(skb) = IPPROTO_ESP; - esph = ip_esp_hdr(skb); +int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) +{ + u8 *tail; + u8 *vaddr; + int nfrags; + struct page *page; + struct sk_buff *trailer; + int tailen = esp->tailen; /* this is non-NULL only with UDP Encapsulation */ - if (x->encap) { - struct xfrm_encap_tmpl *encap = x->encap; - struct udphdr *uh; - __be32 *udpdata32; - __be16 sport, dport; - int encap_type; - - spin_lock_bh(&x->lock); - sport = encap->encap_sport; - dport = encap->encap_dport; - encap_type = encap->encap_type; - spin_unlock_bh(&x->lock); - - uh = (struct udphdr *)esph; - uh->source = sport; - uh->dest = dport; - uh->len = htons(skb->len + tailen - - skb_transport_offset(skb)); - uh->check = 0; - - switch (encap_type) { - default: - case UDP_ENCAP_ESPINUDP: - esph = (struct ip_esp_hdr *)(uh + 1); - break; - case UDP_ENCAP_ESPINUDP_NON_IKE: - udpdata32 = (__be32 *)(uh + 1); - udpdata32[0] = udpdata32[1] = 0; - esph = (struct ip_esp_hdr *)(udpdata32 + 2); - break; - } - - *skb_mac_header(skb) = IPPROTO_UDP; - } + if (x->encap) + esp_output_udp_encap(x, skb, esp); if (!skb_cloned(skb)) { if (tailen <= skb_availroom(skb)) { @@ -304,6 +269,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct sock *sk = skb->sk; struct page_frag *pfrag = &x->xfrag; + esp->inplace = false; + allocsize = ALIGN(tailen, L1_CACHE_BYTES); spin_lock_bh(&x->lock); @@ -320,10 +287,12 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) tail = vaddr + pfrag->offset; - esp_output_fill_trailer(tail, tfclen, plen, proto); + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); kunmap_atomic(vaddr); + spin_unlock_bh(&x->lock); + nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, @@ -339,107 +308,113 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) if (sk) atomic_add(tailen, &sk->sk_wmem_alloc); - skb_push(skb, -skb_network_offset(skb)); - - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); - esph->spi = x->id.spi; - - tmp = esp_alloc_tmp(aead, nfrags + 2, extralen); - if (!tmp) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; - goto error; - } - - extra = esp_tmp_extra(tmp); - iv = esp_tmp_iv(aead, tmp, extralen); - req = esp_tmp_req(aead, iv); - sg = esp_req_sg(aead, req); - dsg = &sg[nfrags]; - - esph = esp_output_set_extra(skb, esph, extra); - - sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); - - allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); - - if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; - goto error; - } - - skb_shinfo(skb)->nr_frags = 1; - - page = pfrag->page; - get_page(page); - /* replace page frags in skb with new page */ - __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); - pfrag->offset = pfrag->offset + allocsize; - - sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); - skb_to_sgvec(skb, dsg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); - - spin_unlock_bh(&x->lock); - - goto skip_cow2; + goto out; } } cow: - err = skb_cow_data(skb, tailen, &trailer); - if (err < 0) - goto error; - nfrags = err; + nfrags = skb_cow_data(skb, tailen, &trailer); + if (nfrags < 0) + goto out; tail = skb_tail_pointer(trailer); - esph = ip_esp_hdr(skb); + esp->esph = ip_esp_hdr(skb); skip_cow: - esp_output_fill_trailer(tail, tfclen, plen, proto); + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); + pskb_put(skb, trailer, tailen); - pskb_put(skb, trailer, clen - skb->len + alen); - skb_push(skb, -skb_network_offset(skb)); - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); - esph->spi = x->id.spi; +out: + return nfrags; +} +EXPORT_SYMBOL_GPL(esp_output_head); - tmp = esp_alloc_tmp(aead, nfrags, extralen); - if (!tmp) { - err = -ENOMEM; - goto error; +int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) +{ + u8 *iv; + int alen; + void *tmp; + int ivlen; + int assoclen; + int extralen; + struct page *page; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct aead_request *req; + struct scatterlist *sg, *dsg; + struct esp_output_extra *extra; + int err = -ENOMEM; + + assoclen = sizeof(struct ip_esp_hdr); + extralen = 0; + + if (x->props.flags & XFRM_STATE_ESN) { + extralen += sizeof(*extra); + assoclen += sizeof(__be32); } + aead = x->data; + alen = crypto_aead_authsize(aead); + ivlen = crypto_aead_ivsize(aead); + + tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); + if (!tmp) + goto error; + extra = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, extralen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); - dsg = sg; - esph = esp_output_set_extra(skb, esph, extra); + if (esp->inplace) + dsg = sg; + else + dsg = &sg[esp->nfrags]; - sg_init_table(sg, nfrags); + esph = esp_output_set_extra(skb, x, esp->esph, extra); + esp->esph = esph; + + sg_init_table(sg, esp->nfrags); skb_to_sgvec(skb, sg, (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); + assoclen + ivlen + esp->clen + alen); + + if (!esp->inplace) { + int allocsize; + struct page_frag *pfrag = &x->xfrag; + + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); + + spin_lock_bh(&x->lock); + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + goto error; + } + + skb_shinfo(skb)->nr_frags = 1; + + page = pfrag->page; + get_page(page); + /* replace page frags in skb with new page */ + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); + pfrag->offset = pfrag->offset + allocsize; + spin_unlock_bh(&x->lock); + + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); + skb_to_sgvec(skb, dsg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + esp->clen + alen); + } -skip_cow2: if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_output_done_esn, skb); else aead_request_set_callback(req, 0, esp_output_done, skb); - aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); + aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); aead_request_set_ad(req, assoclen); - seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + - ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); - memset(iv, 0, ivlen); - memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), + memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), min(ivlen, 8)); ESP_SKB_CB(skb)->tmp = tmp; @@ -465,11 +440,63 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) error: return err; } +EXPORT_SYMBOL_GPL(esp_output_tail); -static int esp_input_done2(struct sk_buff *skb, int err) +static int esp_output(struct xfrm_state *x, struct sk_buff *skb) +{ + int alen; + int blksize; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; + + esp.inplace = true; + + esp.proto = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + + /* skb is pure payload to encrypt */ + + aead = x->data; + alen = crypto_aead_authsize(aead); + + esp.tfclen = 0; + if (x->tfcpad) { + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); + u32 padto; + + padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); + if (skb->len < padto) + esp.tfclen = padto - skb->len; + } + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); + esp.plen = esp.clen - skb->len - esp.tfclen; + esp.tailen = esp.tfclen + esp.plen + alen; + + esp.esph = ip_esp_hdr(skb); + + esp.nfrags = esp_output_head(x, skb, &esp); + if (esp.nfrags < 0) + return esp.nfrags; + + esph = esp.esph; + esph->spi = x->id.spi; + + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); + + skb_push(skb, -skb_network_offset(skb)); + + return esp_output_tail(x, skb, &esp); +} + +int esp_input_done2(struct sk_buff *skb, int err) { const struct iphdr *iph; struct xfrm_state *x = xfrm_input_state(skb); + struct xfrm_offload *xo = xfrm_offload(skb); struct crypto_aead *aead = x->data; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); @@ -478,7 +505,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) u8 nexthdr[2]; int padlen; - kfree(ESP_SKB_CB(skb)->tmp); + if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) + kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; @@ -549,6 +577,7 @@ static int esp_input_done2(struct sk_buff *skb, int err) out: return err; } +EXPORT_SYMBOL_GPL(esp_input_done2); static void esp_input_done(struct crypto_async_request *base, int err) { @@ -751,13 +780,17 @@ static int esp_init_aead(struct xfrm_state *x) char aead_name[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *aead; int err; + u32 mask = 0; err = -ENAMETOOLONG; if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; - aead = crypto_alloc_aead(aead_name, 0, 0); + if (x->xso.offload_handle) + mask |= CRYPTO_ALG_ASYNC; + + aead = crypto_alloc_aead(aead_name, 0, mask); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -787,6 +820,7 @@ static int esp_init_authenc(struct xfrm_state *x) char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; + u32 mask = 0; err = -EINVAL; if (!x->ealg) @@ -812,7 +846,10 @@ static int esp_init_authenc(struct xfrm_state *x) goto error; } - aead = crypto_alloc_aead(authenc_name, 0, 0); + if (x->xso.offload_handle) + mask |= CRYPTO_ALG_ASYNC; + + aead = crypto_alloc_aead(authenc_name, 0, mask); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -931,7 +968,7 @@ static const struct xfrm_type esp_type = .destructor = esp_destroy, .get_mtu = esp4_get_mtu, .input = esp_input, - .output = esp_output + .output = esp_output, }; static struct xfrm4_protocol esp4_protocol = { diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 1de4426324064ff991399e7ef0f0b98c68386a5c..e0666016a7642c017b4693d32723245adc484982 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -43,27 +43,31 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) goto out; - err = secpath_set(skb); - if (err) - goto out; + xo = xfrm_offload(skb); + if (!xo || !(xo->flags & CRYPTO_DONE)) { + err = secpath_set(skb); + if (err) + goto out; - if (skb->sp->len == XFRM_MAX_DEPTH) - goto out; + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; - x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, - (xfrm_address_t *)&ip_hdr(skb)->daddr, - spi, IPPROTO_ESP, AF_INET); - if (!x) - goto out; + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ip_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET); + if (!x) + goto out; - skb->sp->xvec[skb->sp->len++] = x; - skb->sp->olen++; + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; - xo = xfrm_offload(skb); - if (!xo) { - xfrm_state_put(x); - goto out; + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } } + xo->flags |= XFRM_GRO; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; @@ -84,19 +88,214 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, return NULL; } +static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ip_esp_hdr *esph; + struct iphdr *iph = ip_hdr(skb); + struct xfrm_offload *xo = xfrm_offload(skb); + int proto = iph->protocol; + + skb_push(skb, -skb_network_offset(skb)); + esph = ip_esp_hdr(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + + esph->spi = x->id.spi; + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + + xo->proto = proto; +} + +static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + __u32 seq; + int err = 0; + struct sk_buff *skb2; + struct xfrm_state *x; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct sk_buff *segs = ERR_PTR(-EINVAL); + netdev_features_t esp_features = features; + struct xfrm_offload *xo = xfrm_offload(skb); + + if (!xo) + goto out; + + seq = xo->seq.low; + + x = skb->sp->xvec[skb->sp->len - 1]; + aead = x->data; + esph = ip_esp_hdr(skb); + + if (esph->spi != x->id.spi) + goto out; + + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) + goto out; + + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); + + skb->encap_hdr_csum = 1; + + if (!(features & NETIF_F_HW_ESP)) + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + + segs = x->outer_mode->gso_segment(x, skb, esp_features); + if (IS_ERR_OR_NULL(segs)) + goto out; + + __skb_pull(skb, skb->data - skb_mac_header(skb)); + + skb2 = segs; + do { + struct sk_buff *nskb = skb2->next; + + xo = xfrm_offload(skb2); + xo->flags |= XFRM_GSO_SEGMENT; + xo->seq.low = seq; + xo->seq.hi = xfrm_replay_seqhi(x, seq); + + if(!(features & NETIF_F_HW_ESP)) + xo->flags |= CRYPTO_FALLBACK; + + x->outer_mode->xmit(x, skb2); + + err = x->type_offload->xmit(x, skb2, esp_features); + if (err) { + kfree_skb_list(segs); + return ERR_PTR(err); + } + + if (!skb_is_gso(skb2)) + seq++; + else + seq += skb_shinfo(skb2)->gso_segs; + + skb_push(skb2, skb2->mac_len); + skb2 = nskb; + } while (skb2); + +out: + return segs; +} + +static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) +{ + struct crypto_aead *aead = x->data; + + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) + return -EINVAL; + + skb->ip_summed = CHECKSUM_NONE; + + return esp_input_done2(skb, 0); +} + +static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) +{ + int err; + int alen; + int blksize; + struct xfrm_offload *xo; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; + bool hw_offload = true; + + esp.inplace = true; + + xo = xfrm_offload(skb); + + if (!xo) + return -EINVAL; + + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || + (x->xso.dev != skb->dev)) { + xo->flags |= CRYPTO_FALLBACK; + hw_offload = false; + } + + esp.proto = xo->proto; + + /* skb is pure payload to encrypt */ + + aead = x->data; + alen = crypto_aead_authsize(aead); + + esp.tfclen = 0; + /* XXX: Add support for tfc padding here. */ + + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); + esp.plen = esp.clen - skb->len - esp.tfclen; + esp.tailen = esp.tfclen + esp.plen + alen; + + esp.esph = ip_esp_hdr(skb); + + + if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { + esp.nfrags = esp_output_head(x, skb, &esp); + if (esp.nfrags < 0) + return esp.nfrags; + } + + esph = esp.esph; + esph->spi = x->id.spi; + + skb_push(skb, -skb_network_offset(skb)); + + if (xo->flags & XFRM_GSO_SEGMENT) { + esph->seq_no = htonl(xo->seq.low); + } else { + ip_hdr(skb)->tot_len = htons(skb->len); + ip_send_check(ip_hdr(skb)); + } + + if (hw_offload) + return 0; + + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); + + err = esp_output_tail(x, skb, &esp); + if (err < 0) + return err; + + secpath_reset(skb); + + return 0; +} + static const struct net_offload esp4_offload = { .callbacks = { .gro_receive = esp4_gro_receive, + .gso_segment = esp4_gso_segment, }, }; +static const struct xfrm_type_offload esp_type_offload = { + .description = "ESP4 OFFLOAD", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .input_tail = esp_input_tail, + .xmit = esp_xmit, + .encap = esp4_gso_encap, +}; + static int __init esp4_offload_init(void) { + if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { + pr_info("%s: can't add xfrm type offload\n", __func__); + return -EAGAIN; + } + return inet_add_offload(&esp4_offload, IPPROTO_ESP); } static void __exit esp4_offload_exit(void) { + if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) + pr_info("%s: can't remove xfrm type offload\n", __func__); + inet_del_offload(&esp4_offload, IPPROTO_ESP); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 8f2133ffc2ff1b94871408a5f934cb938d3462b5..39bd1edee67649af86eb582c0ea478713360a506 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -632,7 +632,8 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, int err, remaining; struct rtmsg *rtm; - err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy); + err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy, + NULL); if (err < 0) goto errout; @@ -709,7 +710,8 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, return err; } -static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_config cfg; @@ -731,7 +733,8 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } -static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_config cfg; @@ -1127,7 +1130,8 @@ static void fib_disable_ip(struct net_device *dev, unsigned long event, { if (fib_sync_down_dev(dev, event, force)) fib_flush(dev_net(dev)); - rt_cache_flush(dev_net(dev)); + else + rt_cache_flush(dev_net(dev)); arp_ifdown(dev); } diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c new file mode 100644 index 0000000000000000000000000000000000000000..e0714d975947160565dfb1bd9a86bc7a592c73b1 --- /dev/null +++ b/net/ipv4/fib_notifier.c @@ -0,0 +1,86 @@ +#include +#include +#include +#include +#include +#include +#include + +static ATOMIC_NOTIFIER_HEAD(fib_chain); + +int call_fib_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->net = net; + return nb->notifier_call(nb, event_type, info); +} + +int call_fib_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + net->ipv4.fib_seq++; + info->net = net; + return atomic_notifier_call_chain(&fib_chain, event_type, info); +} + +static unsigned int fib_seq_sum(void) +{ + unsigned int fib_seq = 0; + struct net *net; + + rtnl_lock(); + for_each_net(net) + fib_seq += net->ipv4.fib_seq; + rtnl_unlock(); + + return fib_seq; +} + +static bool fib_dump_is_consistent(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb), + unsigned int fib_seq) +{ + atomic_notifier_chain_register(&fib_chain, nb); + if (fib_seq == fib_seq_sum()) + return true; + atomic_notifier_chain_unregister(&fib_chain, nb); + if (cb) + cb(nb); + return false; +} + +#define FIB_DUMP_MAX_RETRIES 5 +int register_fib_notifier(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb)) +{ + int retries = 0; + + do { + unsigned int fib_seq = fib_seq_sum(); + struct net *net; + + /* Mutex semantics guarantee that every change done to + * FIB tries before we read the change sequence counter + * is now visible to us. + */ + rcu_read_lock(); + for_each_net_rcu(net) { + fib_rules_notify(net, nb); + fib_notify(net, nb); + } + rcu_read_unlock(); + + if (fib_dump_is_consistent(nb, cb, fib_seq)) + return 0; + } while (++retries < FIB_DUMP_MAX_RETRIES); + + return -EBUSY; +} +EXPORT_SYMBOL(register_fib_notifier); + +int unregister_fib_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&fib_chain, nb); +} +EXPORT_SYMBOL(unregister_fib_notifier); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 2e50062f642d61bdc0c0893de4e4ff84b5be6c8d..778ecf977eb2bd7b7b3808691aaf818fc4bd680d 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -47,6 +47,27 @@ struct fib4_rule { #endif }; +static bool fib4_rule_matchall(const struct fib_rule *rule) +{ + struct fib4_rule *r = container_of(rule, struct fib4_rule, common); + + if (r->dst_len || r->src_len || r->tos) + return false; + return fib_rule_matchall(rule); +} + +bool fib4_rule_default(const struct fib_rule *rule) +{ + if (!fib4_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL || + rule->l3mdev) + return false; + if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN && + rule->table != RT_TABLE_DEFAULT) + return false; + return true; +} +EXPORT_SYMBOL_GPL(fib4_rule_default); + int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { @@ -164,12 +185,36 @@ static struct fib_table *fib_empty_table(struct net *net) return NULL; } +static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_rule *rule) +{ + struct fib_rule_notifier_info info = { + .rule = rule, + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + static int call_fib_rule_notifiers(struct net *net, - enum fib_event_type event_type) + enum fib_event_type event_type, + struct fib_rule *rule) +{ + struct fib_rule_notifier_info info = { + .rule = rule, + }; + + return call_fib_notifiers(net, event_type, &info.info); +} + +/* Called with rcu_read_lock() */ +void fib_rules_notify(struct net *net, struct notifier_block *nb) { - struct fib_notifier_info info; + struct fib_rules_ops *ops = net->ipv4.rules_ops; + struct fib_rule *rule; - return call_fib_notifiers(net, event_type, &info); + list_for_each_entry_rcu(rule, &ops->rules_list, list) + call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule); } static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { @@ -228,7 +273,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, rule4->tos = frh->tos; net->ipv4.fib_has_custom_rules = true; - call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD); + call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule); err = 0; errout: @@ -250,7 +295,7 @@ static int fib4_rule_delete(struct fib_rule *rule) net->ipv4.fib_num_tclassid_users--; #endif net->ipv4.fib_has_custom_rules = true; - call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL); + call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule); errout: return err; } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 317026a39cfa2b49bf06182d89a11af0fa2688af..da449ddb8cc172bd9091c00057a69a095f98b56d 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -57,7 +57,6 @@ static unsigned int fib_info_cnt; static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; #ifdef CONFIG_IP_ROUTE_MULTIPATH -u32 fib_multipath_secret __read_mostly; #define for_nexthops(fi) { \ int nhsel; const struct fib_nh *nh; \ @@ -576,9 +575,6 @@ static void fib_rebalance(struct fib_info *fi) atomic_set(&nexthop_nh->nh_upper_bound, upper_bound); } endfor_nexthops(fi); - - net_get_random_once(&fib_multipath_secret, - sizeof(fib_multipath_secret)); } static inline void fib_add_weight(struct fib_info *fi, @@ -1641,7 +1637,7 @@ void fib_select_multipath(struct fib_result *res, int hash) #endif void fib_select_path(struct net *net, struct fib_result *res, - struct flowi4 *fl4, int mp_hash) + struct flowi4 *fl4, const struct sk_buff *skb) { bool oif_check; @@ -1650,10 +1646,9 @@ void fib_select_path(struct net *net, struct fib_result *res, #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi->fib_nhs > 1 && oif_check) { - if (mp_hash < 0) - mp_hash = get_hash_from_flowi4(fl4) >> 1; + int h = fib_multipath_hash(res->fi, fl4, skb); - fib_select_multipath(res, mp_hash); + fib_select_multipath(res, h); } else #endif diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2f0d8233950faeac91f287644bc9476f19f74578..1201409ba1dcb18ee028003b065410b87bf4a602 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -84,43 +84,6 @@ #include #include "fib_lookup.h" -static unsigned int fib_seq_sum(void) -{ - unsigned int fib_seq = 0; - struct net *net; - - rtnl_lock(); - for_each_net(net) - fib_seq += net->ipv4.fib_seq; - rtnl_unlock(); - - return fib_seq; -} - -static ATOMIC_NOTIFIER_HEAD(fib_chain); - -static int call_fib_notifier(struct notifier_block *nb, struct net *net, - enum fib_event_type event_type, - struct fib_notifier_info *info) -{ - info->net = net; - return nb->notifier_call(nb, event_type, info); -} - -static void fib_rules_notify(struct net *net, struct notifier_block *nb, - enum fib_event_type event_type) -{ -#ifdef CONFIG_IP_MULTIPLE_TABLES - struct fib_notifier_info info; - - if (net->ipv4.fib_has_custom_rules) - call_fib_notifier(nb, net, event_type, &info); -#endif -} - -static void fib_notify(struct net *net, struct notifier_block *nb, - enum fib_event_type event_type); - static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_info *fi, @@ -137,62 +100,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, return call_fib_notifier(nb, net, event_type, &info.info); } -static bool fib_dump_is_consistent(struct notifier_block *nb, - void (*cb)(struct notifier_block *nb), - unsigned int fib_seq) -{ - atomic_notifier_chain_register(&fib_chain, nb); - if (fib_seq == fib_seq_sum()) - return true; - atomic_notifier_chain_unregister(&fib_chain, nb); - if (cb) - cb(nb); - return false; -} - -#define FIB_DUMP_MAX_RETRIES 5 -int register_fib_notifier(struct notifier_block *nb, - void (*cb)(struct notifier_block *nb)) -{ - int retries = 0; - - do { - unsigned int fib_seq = fib_seq_sum(); - struct net *net; - - /* Mutex semantics guarantee that every change done to - * FIB tries before we read the change sequence counter - * is now visible to us. - */ - rcu_read_lock(); - for_each_net_rcu(net) { - fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD); - fib_notify(net, nb, FIB_EVENT_ENTRY_ADD); - } - rcu_read_unlock(); - - if (fib_dump_is_consistent(nb, cb, fib_seq)) - return 0; - } while (++retries < FIB_DUMP_MAX_RETRIES); - - return -EBUSY; -} -EXPORT_SYMBOL(register_fib_notifier); - -int unregister_fib_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&fib_chain, nb); -} -EXPORT_SYMBOL(unregister_fib_notifier); - -int call_fib_notifiers(struct net *net, enum fib_event_type event_type, - struct fib_notifier_info *info) -{ - net->ipv4.fib_seq++; - info->net = net; - return atomic_notifier_call_chain(&fib_chain, event_type, info); -} - static int call_fib_entry_notifiers(struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_info *fi, @@ -1995,8 +1902,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb) } static void fib_leaf_notify(struct net *net, struct key_vector *l, - struct fib_table *tb, struct notifier_block *nb, - enum fib_event_type event_type) + struct fib_table *tb, struct notifier_block *nb) { struct fib_alias *fa; @@ -2012,22 +1918,21 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l, if (tb->tb_id != fa->tb_id) continue; - call_fib_entry_notifier(nb, net, event_type, l->key, + call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key, KEYLENGTH - fa->fa_slen, fi, fa->fa_tos, fa->fa_type, fa->tb_id); } } static void fib_table_notify(struct net *net, struct fib_table *tb, - struct notifier_block *nb, - enum fib_event_type event_type) + struct notifier_block *nb) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *l, *tp = t->kv; t_key key = 0; while ((l = leaf_walk_rcu(&tp, key)) != NULL) { - fib_leaf_notify(net, l, tb, nb, event_type); + fib_leaf_notify(net, l, tb, nb); key = l->key + 1; /* stop in case of wrap around */ @@ -2036,8 +1941,7 @@ static void fib_table_notify(struct net *net, struct fib_table *tb, } } -static void fib_notify(struct net *net, struct notifier_block *nb, - enum fib_event_type event_type) +void fib_notify(struct net *net, struct notifier_block *nb) { unsigned int h; @@ -2046,7 +1950,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb, struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist) - fib_table_notify(net, tb, nb, event_type); + fib_table_notify(net, tb, nb); } } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index fc310db2708bf6c9e96befe413e89ac931818f74..43318b5f56474bc15253e74e156962dd2c8df01f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -464,22 +464,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) local_bh_enable(); } -#ifdef CONFIG_IP_ROUTE_MULTIPATH - -/* Source and destination is swapped. See ip_multipath_icmp_hash */ -static int icmp_multipath_hash_skb(const struct sk_buff *skb) -{ - const struct iphdr *iph = ip_hdr(skb); - - return fib_multipath_hash(iph->daddr, iph->saddr); -} - -#else - -#define icmp_multipath_hash_skb(skb) (-1) - -#endif - static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, @@ -505,8 +489,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); - rt = __ip_route_output_key_hash(net, fl4, - icmp_multipath_hash_skb(skb_in)); + rt = __ip_route_output_key_hash(net, fl4, skb_in); if (IS_ERR(rt)) return rt; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c9c1cb635d9afd0c5ccdec4402aa2aac29cc889a..e90c80a548ad8f61b8542fdd8a2a55a2562a0e07 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -829,7 +829,8 @@ static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) static int ipgre_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm *parms, + __u32 *fwmark) { struct ip_tunnel *t = netdev_priv(dev); @@ -886,6 +887,9 @@ static int ipgre_netlink_parms(struct net_device *dev, t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]); } + if (data[IFLA_GRE_FWMARK]) + *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); + return 0; } @@ -957,6 +961,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + __u32 fwmark = 0; int err; if (ipgre_netlink_encap_parms(data, &ipencap)) { @@ -967,31 +972,32 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, return err; } - err = ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) return err; - return ip_tunnel_newlink(dev, tb, &p); + return ip_tunnel_newlink(dev, tb, &p, fwmark); } static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { + struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + __u32 fwmark = t->fwmark; int err; if (ipgre_netlink_encap_parms(data, &ipencap)) { - struct ip_tunnel *t = netdev_priv(dev); err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } - err = ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) return err; - return ip_tunnel_changelink(dev, tb, &p); + return ip_tunnel_changelink(dev, tb, &p, fwmark); } static size_t ipgre_get_size(const struct net_device *dev) @@ -1029,6 +1035,8 @@ static size_t ipgre_get_size(const struct net_device *dev) nla_total_size(0) + /* IFLA_GRE_IGNORE_DF */ nla_total_size(1) + + /* IFLA_GRE_FWMARK */ + nla_total_size(4) + 0; } @@ -1049,7 +1057,8 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) || nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) || nla_put_u8(skb, IFLA_GRE_PMTUDISC, - !!(p->iph.frag_off & htons(IP_DF)))) + !!(p->iph.frag_off & htons(IP_DF))) || + nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, @@ -1093,6 +1102,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, + [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index d6feabb0351607f282e1f78f159c0ccb88bcec96..fa2dc8f692c631f1ff7fe814c3ee27f0de2a41d8 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -313,6 +313,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct net_device *dev = skb->dev; + void (*edemux)(struct sk_buff *skb); /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing @@ -329,8 +330,8 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) int protocol = iph->protocol; ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot && ipprot->early_demux) { - ipprot->early_demux(skb); + if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { + edemux(skb); /* must reload iph, skb->head might have changed */ iph = ip_hdr(skb); } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1d46d05efb0ff067c35750ac43be8e7babd60446..ec4fe3d4b5c9c17d53d7464b42b82a564ae48e54 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -330,7 +330,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, sent to multicast group to reach destination designated router. */ struct ip_ra_chain __rcu *ip_ra_chain; -static DEFINE_SPINLOCK(ip_ra_lock); static void ip_ra_destroy_rcu(struct rcu_head *head) @@ -352,21 +351,17 @@ int ip_ra_control(struct sock *sk, unsigned char on, new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; - spin_lock_bh(&ip_ra_lock); for (rap = &ip_ra_chain; - (ra = rcu_dereference_protected(*rap, - lockdep_is_held(&ip_ra_lock))) != NULL; + (ra = rtnl_dereference(*rap)) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (on) { - spin_unlock_bh(&ip_ra_lock); kfree(new_ra); return -EADDRINUSE; } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; RCU_INIT_POINTER(*rap, ra->next); - spin_unlock_bh(&ip_ra_lock); if (ra->destructor) ra->destructor(sk); @@ -380,17 +375,14 @@ int ip_ra_control(struct sock *sk, unsigned char on, return 0; } } - if (!new_ra) { - spin_unlock_bh(&ip_ra_lock); + if (!new_ra) return -ENOBUFS; - } new_ra->sk = sk; new_ra->destructor = destructor; RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); - spin_unlock_bh(&ip_ra_lock); return 0; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 823abaef006bd353cf0466f14dd3abaa26f80c07..b878ecbc0608fb433ae858b70e6e0101aa20fc4e 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -293,7 +293,8 @@ static struct net_device *__ip_tunnel_create(struct net *net, static inline void init_tunnel_flow(struct flowi4 *fl4, int proto, __be32 daddr, __be32 saddr, - __be32 key, __u8 tos, int oif) + __be32 key, __u8 tos, int oif, + __u32 mark) { memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_oif = oif; @@ -302,6 +303,7 @@ static inline void init_tunnel_flow(struct flowi4 *fl4, fl4->flowi4_tos = tos; fl4->flowi4_proto = proto; fl4->fl4_gre_key = key; + fl4->flowi4_mark = mark; } static int ip_tunnel_bind_dev(struct net_device *dev) @@ -322,7 +324,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev) init_tunnel_flow(&fl4, iph->protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, - RT_TOS(iph->tos), tunnel->parms.link); + RT_TOS(iph->tos), tunnel->parms.link, + tunnel->fwmark); rt = ip_route_output_key(tunnel->net, &fl4); if (!IS_ERR(rt)) { @@ -578,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto) tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, - RT_TOS(tos), tunnel->parms.link); + RT_TOS(tos), tunnel->parms.link, tunnel->fwmark); if (tunnel->encap.type != TUNNEL_ENCAP_NONE) goto tx_error; rt = ip_route_output_key(tunnel->net, &fl4); @@ -707,7 +710,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); + tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link, + tunnel->fwmark); if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) goto tx_error; @@ -795,7 +799,8 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, struct ip_tunnel *t, struct net_device *dev, struct ip_tunnel_parm *p, - bool set_mtu) + bool set_mtu, + __u32 fwmark) { ip_tunnel_del(itn, t); t->parms.iph.saddr = p->iph.saddr; @@ -812,10 +817,11 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, t->parms.iph.tos = p->iph.tos; t->parms.iph.frag_off = p->iph.frag_off; - if (t->parms.link != p->link) { + if (t->parms.link != p->link || t->fwmark != fwmark) { int mtu; t->parms.link = p->link; + t->fwmark = fwmark; mtu = ip_tunnel_bind_dev(dev); if (set_mtu) dev->mtu = mtu; @@ -893,7 +899,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) if (t) { err = 0; - ip_tunnel_update(itn, t, dev, p, true); + ip_tunnel_update(itn, t, dev, p, true, 0); } else { err = -ENOENT; } @@ -1066,7 +1072,7 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) EXPORT_SYMBOL_GPL(ip_tunnel_delete_net); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p) + struct ip_tunnel_parm *p, __u32 fwmark) { struct ip_tunnel *nt; struct net *net = dev_net(dev); @@ -1087,6 +1093,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], nt->net = net; nt->parms = *p; + nt->fwmark = fwmark; err = register_netdevice(dev); if (err) goto out; @@ -1105,7 +1112,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], EXPORT_SYMBOL_GPL(ip_tunnel_newlink); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p) + struct ip_tunnel_parm *p, __u32 fwmark) { struct ip_tunnel *t; struct ip_tunnel *tunnel = netdev_priv(dev); @@ -1137,7 +1144,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], } } - ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]); + ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU], fwmark); return 0; } EXPORT_SYMBOL_GPL(ip_tunnel_changelink); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index a31f47ccaad90deadb686b2a9091694c8c1ecb9d..baf196eaf1d81809a9264fb94daa58eae51f1bff 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -235,7 +235,7 @@ static int ip_tun_build_state(struct nlattr *attr, struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; int err; - err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy); + err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy, NULL); if (err < 0) return err; @@ -332,7 +332,8 @@ static int ip6_tun_build_state(struct nlattr *attr, struct nlattr *tb[LWTUNNEL_IP6_MAX + 1]; int err; - err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy); + err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy, + NULL); if (err < 0) return err; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 8b14f1404c8f7315495877a34ad6757aee4232a8..40977413fd4843f304a6782286e46403bbab9d57 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -471,7 +471,8 @@ static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) } static void vti_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm *parms, + __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -497,24 +498,29 @@ static void vti_netlink_parms(struct nlattr *data[], if (data[IFLA_VTI_REMOTE]) parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]); + if (data[IFLA_VTI_FWMARK]) + *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); } static int vti_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm parms; + __u32 fwmark = 0; - vti_netlink_parms(data, &parms); - return ip_tunnel_newlink(dev, tb, &parms); + vti_netlink_parms(data, &parms, &fwmark); + return ip_tunnel_newlink(dev, tb, &parms, fwmark); } static int vti_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { + struct ip_tunnel *t = netdev_priv(dev); + __u32 fwmark = t->fwmark; struct ip_tunnel_parm p; - vti_netlink_parms(data, &p); - return ip_tunnel_changelink(dev, tb, &p); + vti_netlink_parms(data, &p, &fwmark); + return ip_tunnel_changelink(dev, tb, &p, fwmark); } static size_t vti_get_size(const struct net_device *dev) @@ -530,6 +536,8 @@ static size_t vti_get_size(const struct net_device *dev) nla_total_size(4) + /* IFLA_VTI_REMOTE */ nla_total_size(4) + + /* IFLA_VTI_FWMARK */ + nla_total_size(4) + 0; } @@ -543,6 +551,7 @@ static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key); nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr); nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr); + nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark); return 0; } @@ -553,6 +562,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = { [IFLA_VTI_OKEY] = { .type = NLA_U32 }, [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops vti_link_ops __read_mostly = { diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index dfb2ab2dd3c84d93b8d77df41d1160d26f162fc3..c3b12b1c71621b942dd4f9ad1d60ab5bb3c66e20 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 00d4229b6954262e556f7a8fd9c072638d49a8d6..1e441c6f216025339caf5d82a04ea3570566641a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -390,7 +390,8 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) } static void ipip_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms, bool *collect_md) + struct ip_tunnel_parm *parms, bool *collect_md, + __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -428,6 +429,9 @@ static void ipip_netlink_parms(struct nlattr *data[], if (data[IFLA_IPTUN_COLLECT_METADATA]) *collect_md = true; + + if (data[IFLA_IPTUN_FWMARK]) + *fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); } /* This function returns true when ENCAP attributes are present in the nl msg */ @@ -470,6 +474,7 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev, struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + __u32 fwmark = 0; if (ipip_netlink_encap_parms(data, &ipencap)) { int err = ip_tunnel_encap_setup(t, &ipencap); @@ -478,26 +483,27 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev, return err; } - ipip_netlink_parms(data, &p, &t->collect_md); - return ip_tunnel_newlink(dev, tb, &p); + ipip_netlink_parms(data, &p, &t->collect_md, &fwmark); + return ip_tunnel_newlink(dev, tb, &p, fwmark); } static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { + struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; bool collect_md; + __u32 fwmark = t->fwmark; if (ipip_netlink_encap_parms(data, &ipencap)) { - struct ip_tunnel *t = netdev_priv(dev); int err = ip_tunnel_encap_setup(t, &ipencap); if (err < 0) return err; } - ipip_netlink_parms(data, &p, &collect_md); + ipip_netlink_parms(data, &p, &collect_md, &fwmark); if (collect_md) return -EINVAL; @@ -505,7 +511,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) return -EINVAL; - return ip_tunnel_changelink(dev, tb, &p); + return ip_tunnel_changelink(dev, tb, &p, fwmark); } static size_t ipip_get_size(const struct net_device *dev) @@ -535,6 +541,8 @@ static size_t ipip_get_size(const struct net_device *dev) nla_total_size(2) + /* IFLA_IPTUN_COLLECT_METADATA */ nla_total_size(0) + + /* IFLA_IPTUN_FWMARK */ + nla_total_size(4) + 0; } @@ -550,7 +558,8 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) || nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) || nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, - !!(parm->iph.frag_off & htons(IP_DF)))) + !!(parm->iph.frag_off & htons(IP_DF))) || + nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, @@ -585,6 +594,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops ipip_link_ops __read_mostly = { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b036e85e093b3e97cee1b0dbffc8d1dfeb6a2b72..3a02d52ed50ec54ce3f9f3eb07dc9f4133535a80 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -631,7 +631,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify, in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; - inet_netconf_notify_devconf(dev_net(dev), + inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in_dev->cnf); ip_rt_multicast_event(in_dev); @@ -820,8 +820,8 @@ static int vif_add(struct net *net, struct mr_table *mrt, return -EADDRNOTAVAIL; } IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; - inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex, - &in_dev->cnf); + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, + dev->ifindex, &in_dev->cnf); ip_rt_multicast_event(in_dev); /* Fill in the VIF structures */ @@ -1282,7 +1282,8 @@ static void mrtsock_destruct(struct sock *sk) ipmr_for_each_table(mrt, net) { if (sk == rtnl_dereference(mrt->mroute_sk)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; - inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_MC_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all); RCU_INIT_POINTER(mrt->mroute_sk, NULL); @@ -1343,7 +1344,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, if (ret == 0) { rcu_assign_pointer(mrt->mroute_sk, sk); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; - inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + inet_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_MC_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all); } @@ -2421,7 +2423,8 @@ static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc) /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, struct mfcctl *mfcc, int *mrtsock, - struct mr_table **mrtret) + struct mr_table **mrtret, + struct netlink_ext_ack *extack) { struct net_device *dev = NULL; u32 tblid = RT_TABLE_DEFAULT; @@ -2430,7 +2433,8 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, struct rtmsg *rtm; int ret, rem; - ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy); + ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy, + extack); if (ret < 0) goto out; rtm = nlmsg_data(nlh); @@ -2489,7 +2493,8 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, } /* takes care of both newroute and delroute */ -static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh) +static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); int ret, mrtsock, parent; @@ -2498,7 +2503,7 @@ static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh) mrtsock = 0; tbl = NULL; - ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl); + ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack); if (ret < 0) return ret; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6241a81fd7f5a3df8fb3cf251bfdd407dda6a1f6..0bc3c3d73e61e00a04d73f32800256a62c8943a5 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -309,8 +309,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo, */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; - struct arpt_entry *e - = (struct arpt_entry *)(entry0 + pos); + struct arpt_entry *e = entry0 + pos; if (!(valid_hooks & (1 << hook))) continue; @@ -354,14 +353,12 @@ static int mark_source_chains(const struct xt_table_info *newinfo, if (pos == oldpos) goto next; - e = (struct arpt_entry *) - (entry0 + pos); + e = entry0 + pos; } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; - e = (struct arpt_entry *) - (entry0 + pos + size); + e = entry0 + pos + size; if (pos + size >= newinfo->size) return 0; e->counters.pcnt = pos; @@ -376,16 +373,14 @@ static int mark_source_chains(const struct xt_table_info *newinfo, if (!xt_find_jump_offset(offsets, newpos, newinfo->number)) return 0; - e = (struct arpt_entry *) - (entry0 + newpos); + e = entry0 + newpos; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; if (newpos >= newinfo->size) return 0; } - e = (struct arpt_entry *) - (entry0 + newpos); + e = entry0 + newpos; e->counters.pcnt = pos; pos = newpos; } @@ -562,8 +557,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } - if (ret != 0) - goto out_free; ret = -EINVAL; if (i != repl->num_entries) @@ -683,7 +676,7 @@ static int copy_entries_to_user(unsigned int total_size, for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; - e = (struct arpt_entry *)(loc_cpu_entry + off); + e = loc_cpu_entry + off; if (copy_to_user(userptr + off, e, sizeof(*e))) { ret = -EFAULT; goto free_counters; @@ -1130,7 +1123,7 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, int h; origsize = *size; - de = (struct arpt_entry *)*dstptr; + de = *dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); @@ -1324,7 +1317,7 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, int ret; origsize = *size; - ce = (struct compat_arpt_entry __user *)*dstptr; + ce = *dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 384b85713e062881d3d2554b0828f08ff28f443d..2a55a40211cbfb94a9ade41c33678bba4be2a7e4 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -382,7 +382,7 @@ mark_source_chains(const struct xt_table_info *newinfo, to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; - struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); + struct ipt_entry *e = entry0 + pos; if (!(valid_hooks & (1 << hook))) continue; @@ -424,14 +424,12 @@ mark_source_chains(const struct xt_table_info *newinfo, if (pos == oldpos) goto next; - e = (struct ipt_entry *) - (entry0 + pos); + e = entry0 + pos; } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; - e = (struct ipt_entry *) - (entry0 + pos + size); + e = entry0 + pos + size; if (pos + size >= newinfo->size) return 0; e->counters.pcnt = pos; @@ -446,16 +444,14 @@ mark_source_chains(const struct xt_table_info *newinfo, if (!xt_find_jump_offset(offsets, newpos, newinfo->number)) return 0; - e = (struct ipt_entry *) - (entry0 + newpos); + e = entry0 + newpos; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; if (newpos >= newinfo->size) return 0; } - e = (struct ipt_entry *) - (entry0 + newpos); + e = entry0 + newpos; e->counters.pcnt = pos; pos = newpos; } @@ -834,7 +830,7 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_match *m; const struct xt_entry_target *t; - e = (struct ipt_entry *)(loc_cpu_entry + off); + e = loc_cpu_entry + off; if (copy_to_user(userptr + off, e, sizeof(*e))) { ret = -EFAULT; goto free_counters; @@ -1229,7 +1225,7 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, int ret = 0; origsize = *size; - ce = (struct compat_ipt_entry __user *)*dstptr; + ce = *dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) @@ -1366,7 +1362,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, struct xt_entry_match *ematch; origsize = *size; - de = (struct ipt_entry *)*dstptr; + de = *dstptr; memcpy(de, e, sizeof(struct ipt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 9b8841316e7b94e375cc52d0dfd7f9fe89205195..038f293c23766de6cd77f361269743498c2dd18e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -40,8 +41,8 @@ MODULE_DESCRIPTION("Xtables: CLUSTERIP target"); struct clusterip_config { struct list_head list; /* list of all configs */ - atomic_t refcount; /* reference count */ - atomic_t entries; /* number of entries/rules + refcount_t refcount; /* reference count */ + refcount_t entries; /* number of entries/rules * referencing us */ __be32 clusterip; /* the IP address */ @@ -77,7 +78,7 @@ struct clusterip_net { static inline void clusterip_config_get(struct clusterip_config *c) { - atomic_inc(&c->refcount); + refcount_inc(&c->refcount); } @@ -89,7 +90,7 @@ static void clusterip_config_rcu_free(struct rcu_head *head) static inline void clusterip_config_put(struct clusterip_config *c) { - if (atomic_dec_and_test(&c->refcount)) + if (refcount_dec_and_test(&c->refcount)) call_rcu_bh(&c->rcu, clusterip_config_rcu_free); } @@ -103,7 +104,7 @@ clusterip_config_entry_put(struct clusterip_config *c) struct clusterip_net *cn = net_generic(net, clusterip_net_id); local_bh_disable(); - if (atomic_dec_and_lock(&c->entries, &cn->lock)) { + if (refcount_dec_and_lock(&c->entries, &cn->lock)) { list_del_rcu(&c->list); spin_unlock(&cn->lock); local_bh_enable(); @@ -149,10 +150,10 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) c = NULL; else #endif - if (unlikely(!atomic_inc_not_zero(&c->refcount))) + if (unlikely(!refcount_inc_not_zero(&c->refcount))) c = NULL; else if (entry) - atomic_inc(&c->entries); + refcount_inc(&c->entries); } rcu_read_unlock_bh(); @@ -188,8 +189,8 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, clusterip_config_init_nodelist(c, i); c->hash_mode = i->hash_mode; c->hash_initval = i->hash_initval; - atomic_set(&c->refcount, 1); - atomic_set(&c->entries, 1); + refcount_set(&c->refcount, 1); + refcount_set(&c->entries, 1); spin_lock_bh(&cn->lock); if (__clusterip_config_find(net, ip)) { diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 3240a2614e82bd82c674aac84ab140c91670f163..af2b69b6895f5ae8e326b27e05efa01cc1405e1b 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -293,12 +293,16 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(net, skb, th, &opts); - return NF_DROP; - + consume_skb(skb); + return NF_STOLEN; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ - synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); - return NF_DROP; + if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) { + consume_skb(skb); + return NF_STOLEN; + } else { + return NF_DROP; + } } return XT_CONTINUE; @@ -367,10 +371,13 @@ static unsigned int ipv4_synproxy_hook(void *priv, * number match the one of first SYN. */ if (synproxy_recv_client_ack(net, skb, th, &opts, - ntohl(th->seq) + 1)) + ntohl(th->seq) + 1)) { this_cpu_inc(snet->stats->cookie_retrans); - - return NF_DROP; + consume_skb(skb); + return NF_STOLEN; + } else { + return NF_DROP; + } } synproxy->isn = ntohl(th->ack_seq); @@ -409,19 +416,56 @@ static unsigned int ipv4_synproxy_hook(void *priv, return NF_ACCEPT; } +static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = { + { + .hook = ipv4_synproxy_hook, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, + { + .hook = ipv4_synproxy_hook, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, +}; + static int synproxy_tg4_check(const struct xt_tgchk_param *par) { + struct synproxy_net *snet = synproxy_pernet(par->net); const struct ipt_entry *e = par->entryinfo; + int err; if (e->ip.proto != IPPROTO_TCP || e->ip.invflags & XT_INV_PROTO) return -EINVAL; - return nf_ct_netns_get(par->net, par->family); + err = nf_ct_netns_get(par->net, par->family); + if (err) + return err; + + if (snet->hook_ref4 == 0) { + err = nf_register_net_hooks(par->net, ipv4_synproxy_ops, + ARRAY_SIZE(ipv4_synproxy_ops)); + if (err) { + nf_ct_netns_put(par->net, par->family); + return err; + } + } + + snet->hook_ref4++; + return err; } static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) { + struct synproxy_net *snet = synproxy_pernet(par->net); + + snet->hook_ref4--; + if (snet->hook_ref4 == 0) + nf_unregister_net_hooks(par->net, ipv4_synproxy_ops, + ARRAY_SIZE(ipv4_synproxy_ops)); nf_ct_netns_put(par->net, par->family); } @@ -436,46 +480,14 @@ static struct xt_target synproxy_tg4_reg __read_mostly = { .me = THIS_MODULE, }; -static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = { - { - .hook = ipv4_synproxy_hook, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, - }, - { - .hook = ipv4_synproxy_hook, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, - }, -}; - static int __init synproxy_tg4_init(void) { - int err; - - err = nf_register_hooks(ipv4_synproxy_ops, - ARRAY_SIZE(ipv4_synproxy_ops)); - if (err < 0) - goto err1; - - err = xt_register_target(&synproxy_tg4_reg); - if (err < 0) - goto err2; - - return 0; - -err2: - nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops)); -err1: - return err; + return xt_register_target(&synproxy_tg4_reg); } static void __exit synproxy_tg4_exit(void) { xt_unregister_target(&synproxy_tg4_reg); - nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops)); } module_init(synproxy_tg4_init); diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c index f0dbff05fc28174de694ecad1d1adcde63c314ec..39895b9ddeb9601307f073298ab690ae1f9972d4 100644 --- a/net/ipv4/netfilter/nf_dup_ipv4.c +++ b/net/ipv4/netfilter/nf_dup_ipv4.c @@ -69,8 +69,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ nf_reset(skb); - nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb_nfct(skb)); + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); #endif /* * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 6f5e8d01b876933a68e5f6cf8b2a48f8c4e17262..feedd759ca8043c3eff37e22d0f1c8301b229da2 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -264,13 +264,7 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, if (!ct) return NF_ACCEPT; - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; + nat = nfct_nat(ct); switch (ctinfo) { case IP_CT_RELATED: diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c index ea91058b5f6f4245a84179f81a8641756f60efff..dc1dea15c1b4fb0d7ad3ad02779cde147e52c2cf 100644 --- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c @@ -37,7 +37,6 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); - nat = nfct_nat(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY)); @@ -56,7 +55,9 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, return NF_DROP; } - nat->masq_index = out->ifindex; + nat = nf_ct_nat_ext_add(ct); + if (nat) + nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index b3ca21b2ba9b638550b552c85e297234ccf39caa..8a69363b48846c628994e54c92a354ca46f71ebc 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -49,9 +49,14 @@ static void pptp_nat_expected(struct nf_conn *ct, const struct nf_ct_pptp_master *ct_pptp_info; const struct nf_nat_pptp *nat_pptp_info; struct nf_nat_range range; + struct nf_conn_nat *nat; + nat = nf_ct_nat_ext_add(ct); + if (WARN_ON_ONCE(!nat)) + return; + + nat_pptp_info = &nat->help.nat_pptp_info; ct_pptp_info = nfct_help_data(master); - nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; /* And here goes the grand finale of corrosion... */ if (exp->dir == IP_CT_DIR_ORIGINAL) { @@ -120,13 +125,17 @@ pptp_outbound_pkt(struct sk_buff *skb, { struct nf_ct_pptp_master *ct_pptp_info; + struct nf_conn_nat *nat = nfct_nat(ct); struct nf_nat_pptp *nat_pptp_info; u_int16_t msg; __be16 new_callid; unsigned int cid_off; + if (WARN_ON_ONCE(!nat)) + return NF_DROP; + + nat_pptp_info = &nat->help.nat_pptp_info; ct_pptp_info = nfct_help_data(ct); - nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; new_callid = ct_pptp_info->pns_call_id; @@ -177,11 +186,11 @@ pptp_outbound_pkt(struct sk_buff *skb, ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); /* mangle packet */ - if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, - cid_off + sizeof(struct pptp_pkt_hdr) + - sizeof(struct PptpControlHeader), - sizeof(new_callid), (char *)&new_callid, - sizeof(new_callid)) == 0) + if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, + cid_off + sizeof(struct pptp_pkt_hdr) + + sizeof(struct PptpControlHeader), + sizeof(new_callid), (char *)&new_callid, + sizeof(new_callid))) return NF_DROP; return NF_ACCEPT; } @@ -191,11 +200,15 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig, struct nf_conntrack_expect *expect_reply) { const struct nf_conn *ct = expect_orig->master; + struct nf_conn_nat *nat = nfct_nat(ct); struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; + if (WARN_ON_ONCE(!nat)) + return; + + nat_pptp_info = &nat->help.nat_pptp_info; ct_pptp_info = nfct_help_data(ct); - nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; /* save original PAC call ID in nat_info */ nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; @@ -223,11 +236,15 @@ pptp_inbound_pkt(struct sk_buff *skb, union pptp_ctrl_union *pptpReq) { const struct nf_nat_pptp *nat_pptp_info; + struct nf_conn_nat *nat = nfct_nat(ct); u_int16_t msg; __be16 new_pcid; unsigned int pcid_off; - nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; + if (WARN_ON_ONCE(!nat)) + return NF_DROP; + + nat_pptp_info = &nat->help.nat_pptp_info; new_pcid = nat_pptp_info->pns_call_id; switch (msg = ntohs(ctlh->messageType)) { @@ -271,11 +288,11 @@ pptp_inbound_pkt(struct sk_buff *skb, pr_debug("altering peer call id from 0x%04x to 0x%04x\n", ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); - if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, - pcid_off + sizeof(struct pptp_pkt_hdr) + - sizeof(struct PptpControlHeader), - sizeof(new_pcid), (char *)&new_pcid, - sizeof(new_pcid)) == 0) + if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, + pcid_off + sizeof(struct pptp_pkt_hdr) + + sizeof(struct PptpControlHeader), + sizeof(new_pcid), (char *)&new_pcid, + sizeof(new_pcid))) return NF_DROP; return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 53e49f5011d3ce482c5180edc47da7928db0f224..d5b1e0b3f6876995b6987aa47515ece9c7beaca3 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -827,8 +827,8 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, return 1; } -static unsigned char snmp_request_decode(struct asn1_ctx *ctx, - struct snmp_request *request) +static unsigned char noinline_for_stack +snmp_request_decode(struct asn1_ctx *ctx, struct snmp_request *request) { unsigned int cls, con, tag; unsigned char *end; @@ -920,10 +920,10 @@ static inline void mangle_address(unsigned char *begin, } } -static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, - struct snmp_v1_trap *trap, - const struct oct1_map *map, - __sum16 *check) +static unsigned char noinline_for_stack +snmp_trap_decode(struct asn1_ctx *ctx, struct snmp_v1_trap *trap, + const struct oct1_map *map, + __sum16 *check) { unsigned int cls, con, tag, len; unsigned char *end; @@ -998,18 +998,6 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, * *****************************************************************************/ -static void hex_dump(const unsigned char *buf, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - if (i && !(i % 16)) - printk("\n"); - printk("%02x ", *(buf + i)); - } - printk("\n"); -} - /* * Parse and mangle SNMP message according to mapping. * (And this is the fucking 'basic' method). @@ -1026,7 +1014,8 @@ static int snmp_parse_mangle(unsigned char *msg, struct snmp_object *obj; if (debug > 1) - hex_dump(msg, len); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, + msg, len, 0); asn1_open(&ctx, msg, len); diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 146d86105183e1a456a0f17ed6bb5371aa1e8f76..7cd8d0d918f82e275e0ecd31c1cea8ec8fcf345d 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -104,7 +104,6 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; - const struct iphdr *oiph; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _oth; @@ -116,8 +115,6 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return; - oiph = ip_hdr(oldskb); - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c index a83d558e1aaed62b6bce05d9cce36405b0c210ef..e9293bdebba04f5b329cd4fd4a6b5b676ade5988 100644 --- a/net/ipv4/netfilter/nf_socket_ipv4.c +++ b/net/ipv4/netfilter/nf_socket_ipv4.c @@ -139,7 +139,7 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb, * SNAT-ted connection. */ ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct) && + if (ct && ((iph->protocol != IPPROTO_ICMP && ctinfo == IP_CT_ESTABLISHED_REPLY) || (iph->protocol == IPPROTO_ICMP && diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 2981291910dd2cac2d508fcde89083afc22affd4..de3681df2ce71c7341d96dd5d2540ab110ac80eb 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -90,7 +90,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { - nft_fib_store_result(dest, priv->result, pkt, + nft_fib_store_result(dest, priv, pkt, nft_in(pkt)->ifindex); return; } @@ -99,7 +99,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, if (ipv4_is_zeronet(iph->saddr)) { if (ipv4_is_lbcast(iph->daddr) || ipv4_is_local_multicast(iph->daddr)) { - nft_fib_store_result(dest, priv->result, pkt, + nft_fib_store_result(dest, priv, pkt, get_ifindex(pkt->skb->dev)); return; } @@ -212,7 +212,7 @@ nft_fib4_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_fib4_type __read_mostly = { .name = "fib", - .select_ops = &nft_fib4_select_ops, + .select_ops = nft_fib4_select_ops, .policy = nft_fib_policy, .maxattr = NFTA_FIB_MAX, .family = NFPROTO_IPV4, diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 69cf49e8356d0184f774840c9dc96560f2ae2f2b..fa44e752a9a3f8eb9957314149ae15e6df10465a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -199,7 +199,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED), SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED), SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED), - SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED), SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED), SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED), SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS), @@ -282,6 +281,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), + SNMP_MIB_ITEM("TCPFastOpenBlackhole", LINUX_MIB_TCPFASTOPENBLACKHOLE), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING), diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 4b7c0ec65251ef40577a2d5e360fcbaed391a566..32a691b7ce2c7e79eab6491b52457a11e666f7d3 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c @@ -28,7 +28,7 @@ #include #include -const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; +struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; EXPORT_SYMBOL(inet_offloads); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d9724889ff09077aa88c98ec3e170dcfdb91d29b..655d9eebe43e16a59102edcd3ea4bc177c6b341d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1250,15 +1250,11 @@ static void set_class_tag(struct rtable *rt, u32 tag) static unsigned int ipv4_default_advmss(const struct dst_entry *dst) { - unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS); + unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); + unsigned int advmss = max_t(unsigned int, dst->dev->mtu - header_size, + ip_rt_min_advmss); - if (advmss == 0) { - advmss = max_t(unsigned int, dst->dev->mtu - 40, - ip_rt_min_advmss); - if (advmss > 65535 - 40) - advmss = 65535 - 40; - } - return advmss; + return min(advmss, IPV4_MAX_PMTU - header_size); } static unsigned int ipv4_mtu(const struct dst_entry *dst) @@ -1734,45 +1730,97 @@ static int __mkroute_input(struct sk_buff *skb, } #ifdef CONFIG_IP_ROUTE_MULTIPATH - /* To make ICMP packets follow the right flow, the multipath hash is - * calculated from the inner IP addresses in reverse order. + * calculated from the inner IP addresses. */ -static int ip_multipath_icmp_hash(struct sk_buff *skb) +static void ip_multipath_l3_keys(const struct sk_buff *skb, + struct flow_keys *hash_keys) { const struct iphdr *outer_iph = ip_hdr(skb); - struct icmphdr _icmph; + const struct iphdr *inner_iph; const struct icmphdr *icmph; struct iphdr _inner_iph; - const struct iphdr *inner_iph; + struct icmphdr _icmph; + + hash_keys->addrs.v4addrs.src = outer_iph->saddr; + hash_keys->addrs.v4addrs.dst = outer_iph->daddr; + if (likely(outer_iph->protocol != IPPROTO_ICMP)) + return; if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0)) - goto standard_hash; + return; icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph), &_icmph); if (!icmph) - goto standard_hash; + return; if (icmph->type != ICMP_DEST_UNREACH && icmph->type != ICMP_REDIRECT && icmph->type != ICMP_TIME_EXCEEDED && - icmph->type != ICMP_PARAMETERPROB) { - goto standard_hash; - } + icmph->type != ICMP_PARAMETERPROB) + return; inner_iph = skb_header_pointer(skb, outer_iph->ihl * 4 + sizeof(_icmph), sizeof(_inner_iph), &_inner_iph); if (!inner_iph) - goto standard_hash; + return; + hash_keys->addrs.v4addrs.src = inner_iph->saddr; + hash_keys->addrs.v4addrs.dst = inner_iph->daddr; +} - return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr); +/* if skb is set it will be used and fl4 can be NULL */ +int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4, + const struct sk_buff *skb) +{ + struct net *net = fi->fib_net; + struct flow_keys hash_keys; + u32 mhash; -standard_hash: - return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr); -} + switch (net->ipv4.sysctl_fib_multipath_hash_policy) { + case 0: + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + if (skb) { + ip_multipath_l3_keys(skb, &hash_keys); + } else { + hash_keys.addrs.v4addrs.src = fl4->saddr; + hash_keys.addrs.v4addrs.dst = fl4->daddr; + } + break; + case 1: + /* skb is currently provided only when forwarding */ + if (skb) { + unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; + struct flow_keys keys; + + /* short-circuit if we already have L4 hash present */ + if (skb->l4_hash) + return skb_get_hash_raw(skb) >> 1; + memset(&hash_keys, 0, sizeof(hash_keys)); + skb_flow_dissect_flow_keys(skb, &keys, flag); + hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; + hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; + hash_keys.ports.src = keys.ports.src; + hash_keys.ports.dst = keys.ports.dst; + hash_keys.basic.ip_proto = keys.basic.ip_proto; + } else { + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + hash_keys.addrs.v4addrs.src = fl4->saddr; + hash_keys.addrs.v4addrs.dst = fl4->daddr; + hash_keys.ports.src = fl4->fl4_sport; + hash_keys.ports.dst = fl4->fl4_dport; + hash_keys.basic.ip_proto = fl4->flowi4_proto; + } + break; + } + mhash = flow_hash_from_keys(&hash_keys); + return mhash >> 1; +} +EXPORT_SYMBOL_GPL(fib_multipath_hash); #endif /* CONFIG_IP_ROUTE_MULTIPATH */ static int ip_mkroute_input(struct sk_buff *skb, @@ -1782,12 +1830,8 @@ static int ip_mkroute_input(struct sk_buff *skb, { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1) { - int h; + int h = fib_multipath_hash(res->fi, NULL, skb); - if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP)) - h = ip_multipath_icmp_hash(skb); - else - h = fib_multipath_hash(saddr, daddr); fib_select_multipath(res, h); } #endif @@ -2203,7 +2247,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, */ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, - int mp_hash) + const struct sk_buff *skb) { struct net_device *dev_out = NULL; __u8 tos = RT_FL_TOS(fl4); @@ -2366,7 +2410,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, goto make_route; } - fib_select_path(net, &res, fl4, mp_hash); + fib_select_path(net, &res, fl4, skb); dev_out = FIB_RES_DEV(res); fl4->flowi4_oif = dev_out->ifindex; @@ -2586,7 +2630,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, return -EMSGSIZE; } -static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) +static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; @@ -2602,7 +2647,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) u32 table_id = RT_TABLE_MAIN; kuid_t uid; - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy, + extack); if (err < 0) goto errout; @@ -2620,10 +2666,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) skb_reset_mac_header(skb); skb_reset_network_header(skb); - /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ - ip_hdr(skb)->protocol = IPPROTO_UDP; - skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); - src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; @@ -2633,6 +2675,15 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) else uid = (iif ? INVALID_UID : current_uid()); + /* Bugfix: need to give ip_route_input enough of an IP header to + * not gag. + */ + ip_hdr(skb)->protocol = IPPROTO_UDP; + ip_hdr(skb)->saddr = src; + ip_hdr(skb)->daddr = dst; + + skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); + memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst; fl4.saddr = src; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d6880a6149ee80c6c75f4fe75b46a9d18d204d5d..86957e9cd6c6748ac00aa0307154bb131c43f1da 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -24,6 +24,7 @@ #include #include #include +#include static int zero; static int one = 1; @@ -294,6 +295,74 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, return ret; } +static void proc_configure_early_demux(int enabled, int protocol) +{ + struct net_protocol *ipprot; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_protocol *ip6prot; +#endif + + rcu_read_lock(); + + ipprot = rcu_dereference(inet_protos[protocol]); + if (ipprot) + ipprot->early_demux = enabled ? ipprot->early_demux_handler : + NULL; + +#if IS_ENABLED(CONFIG_IPV6) + ip6prot = rcu_dereference(inet6_protos[protocol]); + if (ip6prot) + ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : + NULL; +#endif + rcu_read_unlock(); +} + +static int proc_tcp_early_demux(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write && !ret) { + int enabled = init_net.ipv4.sysctl_tcp_early_demux; + + proc_configure_early_demux(enabled, IPPROTO_TCP); + } + + return ret; +} + +static int proc_udp_early_demux(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write && !ret) { + int enabled = init_net.ipv4.sysctl_udp_early_demux; + + proc_configure_early_demux(enabled, IPPROTO_UDP); + } + + return ret; +} + +static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, + int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (write && ret == 0) + tcp_fastopen_active_timeout_reset(); + return ret; +} + static struct ctl_table ipv4_table[] = { { .procname = "tcp_timestamps", @@ -343,6 +412,14 @@ static struct ctl_table ipv4_table[] = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10), .proc_handler = proc_tcp_fastopen_key, }, + { + .procname = "tcp_fastopen_blackhole_timeout_sec", + .data = &sysctl_tcp_fastopen_blackhole_timeout, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_tfo_blackhole_detect_timeout, + .extra1 = &zero, + }, { .procname = "tcp_abort_on_overflow", .data = &sysctl_tcp_abort_on_overflow, @@ -749,6 +826,20 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "udp_early_demux", + .data = &init_net.ipv4.sysctl_udp_early_demux, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_udp_early_demux + }, + { + .procname = "tcp_early_demux", + .data = &init_net.ipv4.sysctl_tcp_early_demux, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_tcp_early_demux + }, { .procname = "ip_default_ttl", .data = &init_net.ipv4.sysctl_ip_default_ttl, @@ -980,13 +1071,6 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_tw_recycle", - .data = &init_net.ipv4.tcp_death_row.sysctl_tw_recycle, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_max_syn_backlog", .data = &init_net.ipv4.sysctl_max_syn_backlog, @@ -1004,6 +1088,15 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "fib_multipath_hash_policy", + .data = &init_net.ipv4.sysctl_fib_multipath_hash_policy, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #endif { .procname = "ip_unprivileged_port_start", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 40ba4249a58677671b68bf495f32f15b7c5f62d7..1e4c76d2b8278ba71d6cc2cf7ebfe483e241f76e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -533,7 +533,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; - } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { + } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect * Return POLLOUT so application can call write() * in order for kernel to generate SYN+data @@ -2296,6 +2296,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); tcp_write_queue_purge(sk); + tcp_fastopen_active_disable_ofo_check(sk); skb_rbtree_purge(&tp->out_of_order_queue); inet->inet_dport = 0; @@ -2394,7 +2395,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp, u16 snd_wscale = opt.opt_val & 0xFFFF; u16 rcv_wscale = opt.opt_val >> 16; - if (snd_wscale > 14 || rcv_wscale > 14) + if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) return -EFBIG; tp->rx_opt.snd_wscale = snd_wscale; @@ -2471,7 +2472,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, /* Values greater than interface MTU won't take effect. However * at the point when this call is done we typically don't yet * know which interface is going to be used */ - if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { + if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { err = -EINVAL; break; } @@ -2852,7 +2853,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_snd_ssthresh = tp->snd_ssthresh; info->tcpi_advmss = tp->advmss; - info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; + info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; info->tcpi_rcv_space = tp->rcvq_space.space; info->tcpi_total_retrans = tp->total_retrans; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index c99230efcd52d2233ba22fbc117101c8e8cee113..0683ba447d775b6101a929a6aca3eb255cff8932 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -72,7 +72,7 @@ MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); module_param(hystart, int, 0644); MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm"); module_param(hystart_detect, int, 0644); -MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms" +MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms" " 1: packet-train 2: delay 3: both packet-train and delay"); module_param(hystart_low_window, int, 0644); MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 8ea4e9787f82ba65cd07b4c2b663df76fe4eb143..4af82b914dd4bbdc47e37cf1cf70f206bd186db5 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -341,6 +341,13 @@ bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, cookie->len = -1; return false; } + + /* Firewall blackhole issue check */ + if (tcp_fastopen_active_should_disable(sk)) { + cookie->len = -1; + return false; + } + if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) { cookie->len = -1; return true; @@ -380,3 +387,98 @@ bool tcp_fastopen_defer_connect(struct sock *sk, int *err) return false; } EXPORT_SYMBOL(tcp_fastopen_defer_connect); + +/* + * The following code block is to deal with middle box issues with TFO: + * Middlebox firewall issues can potentially cause server's data being + * blackholed after a successful 3WHS using TFO. + * The proposed solution is to disable active TFO globally under the + * following circumstances: + * 1. client side TFO socket receives out of order FIN + * 2. client side TFO socket receives out of order RST + * We disable active side TFO globally for 1hr at first. Then if it + * happens again, we disable it for 2h, then 4h, 8h, ... + * And we reset the timeout back to 1hr when we see a successful active + * TFO connection with data exchanges. + */ + +/* Default to 1hr */ +unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60; +static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0); +static unsigned long tfo_active_disable_stamp __read_mostly; + +/* Disable active TFO and record current jiffies and + * tfo_active_disable_times + */ +void tcp_fastopen_active_disable(struct sock *sk) +{ + atomic_inc(&tfo_active_disable_times); + tfo_active_disable_stamp = jiffies; + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE); +} + +/* Reset tfo_active_disable_times to 0 */ +void tcp_fastopen_active_timeout_reset(void) +{ + atomic_set(&tfo_active_disable_times, 0); +} + +/* Calculate timeout for tfo active disable + * Return true if we are still in the active TFO disable period + * Return false if timeout already expired and we should use active TFO + */ +bool tcp_fastopen_active_should_disable(struct sock *sk) +{ + int tfo_da_times = atomic_read(&tfo_active_disable_times); + int multiplier; + unsigned long timeout; + + if (!tfo_da_times) + return false; + + /* Limit timout to max: 2^6 * initial timeout */ + multiplier = 1 << min(tfo_da_times - 1, 6); + timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ; + if (time_before(jiffies, tfo_active_disable_stamp + timeout)) + return true; + + /* Mark check bit so we can check for successful active TFO + * condition and reset tfo_active_disable_times + */ + tcp_sk(sk)->syn_fastopen_ch = 1; + return false; +} + +/* Disable active TFO if FIN is the only packet in the ofo queue + * and no data is received. + * Also check if we can reset tfo_active_disable_times if data is + * received successfully on a marked active TFO sockets opened on + * a non-loopback interface + */ +void tcp_fastopen_active_disable_ofo_check(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct rb_node *p; + struct sk_buff *skb; + struct dst_entry *dst; + + if (!tp->syn_fastopen) + return; + + if (!tp->data_segs_in) { + p = rb_first(&tp->out_of_order_queue); + if (p && !rb_next(p)) { + skb = rb_entry(p, struct sk_buff, rbnode); + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { + tcp_fastopen_active_disable(sk); + return; + } + } + } else if (tp->syn_fastopen_ch && + atomic_read(&tfo_active_disable_times)) { + dst = sk_dst_get(sk); + if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) + tcp_fastopen_active_timeout_reset(); + dst_release(dst); + } +} diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 659d1baefb2bba36d96e412eb7ca5a02996fb6dd..9739962bfb3fd2d39cb13f643def223f4f17fcb6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -442,7 +442,8 @@ void tcp_init_buffer_space(struct sock *sk) tcp_sndbuf_expand(sk); tp->rcvq_space.space = tp->rcv_wnd; - tp->rcvq_space.time = tcp_time_stamp; + skb_mstamp_get(&tp->tcp_mstamp); + tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.seq = tp->copied_seq; maxwin = tcp_full_space(sk); @@ -518,7 +519,7 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss); */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { - u32 new_sample = tp->rcv_rtt_est.rtt; + u32 new_sample = tp->rcv_rtt_est.rtt_us; long m = sample; if (m == 0) @@ -548,21 +549,23 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) new_sample = m << 3; } - if (tp->rcv_rtt_est.rtt != new_sample) - tp->rcv_rtt_est.rtt = new_sample; + tp->rcv_rtt_est.rtt_us = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { - if (tp->rcv_rtt_est.time == 0) + u32 delta_us; + + if (tp->rcv_rtt_est.time.v64 == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; - tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); + delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time); + tcp_rcv_rtt_update(tp, delta_us, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; - tp->rcv_rtt_est.time = tcp_time_stamp; + tp->rcv_rtt_est.time = tp->tcp_mstamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, @@ -572,7 +575,10 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) - tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); + tcp_rcv_rtt_update(tp, + jiffies_to_usecs(tcp_time_stamp - + tp->rx_opt.rcv_tsecr), + 0); } /* @@ -585,8 +591,8 @@ void tcp_rcv_space_adjust(struct sock *sk) int time; int copied; - time = tcp_time_stamp - tp->rcvq_space.time; - if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) + time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time); + if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) return; /* Number of bytes copied to user in last RTT */ @@ -642,7 +648,7 @@ void tcp_rcv_space_adjust(struct sock *sk) new_measure: tp->rcvq_space.seq = tp->copied_seq; - tp->rcvq_space.time = tcp_time_stamp; + tp->rcvq_space.time = tp->tcp_mstamp; } /* There is something which you must keep in mind when you analyze the @@ -1131,7 +1137,6 @@ struct tcp_sacktag_state { */ struct skb_mstamp first_sackt; struct skb_mstamp last_sackt; - struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */ struct rate_sample *rate; int flag; }; @@ -1214,8 +1219,7 @@ static u8 tcp_sacktag_one(struct sock *sk, return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { - tcp_rack_advance(tp, sacked, end_seq, - xmit_time, &state->ack_time); + tcp_rack_advance(tp, sacked, end_seq, xmit_time); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, @@ -2760,8 +2764,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked) return false; } -static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag, - const struct skb_mstamp *ack_time) +static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) { struct tcp_sock *tp = tcp_sk(sk); @@ -2769,7 +2772,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag, if (sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) { u32 prior_retrans = tp->retrans_out; - tcp_rack_mark_lost(sk, ack_time); + tcp_rack_mark_lost(sk); if (prior_retrans > tp->retrans_out) *ack_flag |= FLAG_LOST_RETRANS; } @@ -2788,8 +2791,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag, * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, const int acked, - bool is_dupack, int *ack_flag, int *rexmit, - const struct skb_mstamp *ack_time) + bool is_dupack, int *ack_flag, int *rexmit) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2857,11 +2859,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, tcp_try_keep_open(sk); return; } - tcp_rack_identify_loss(sk, ack_flag, ack_time); + tcp_rack_identify_loss(sk, ack_flag); break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack, rexmit); - tcp_rack_identify_loss(sk, ack_flag, ack_time); + tcp_rack_identify_loss(sk, ack_flag); if (!(icsk->icsk_ca_state == TCP_CA_Open || (*ack_flag & FLAG_LOST_RETRANS))) return; @@ -2877,7 +2879,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); - tcp_rack_identify_loss(sk, ack_flag, ack_time); + tcp_rack_identify_loss(sk, ack_flag); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag); return; @@ -3059,8 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, { const struct inet_connection_sock *icsk = inet_csk(sk); struct skb_mstamp first_ackt, last_ackt; - struct skb_mstamp *now = &sack->ack_time; struct tcp_sock *tp = tcp_sk(sk); + struct skb_mstamp *now = &tp->tcp_mstamp; u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; bool fully_acked = true; @@ -3120,8 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, sacked, scb->end_seq, - &skb->skb_mstamp, - &sack->ack_time); + &skb->skb_mstamp); } if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; @@ -3576,8 +3577,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (after(ack, tp->snd_nxt)) goto invalid_ack; - skb_mstamp_get(&sack_state.ack_time); - if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) tcp_rearm_rto(sk); @@ -3647,8 +3646,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (tcp_ack_is_dubious(sk, flag)) { is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, - &sack_state.ack_time); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); } if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); @@ -3660,8 +3658,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_schedule_loss_probe(sk); delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ lost = tp->lost - lost; /* freshly marked lost */ - tcp_rate_gen(sk, delivered, lost, &sack_state.ack_time, - sack_state.rate); + tcp_rate_gen(sk, delivered, lost, sack_state.rate); tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); tcp_xmit_recovery(sk, rexmit); return 1; @@ -3669,8 +3666,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, - &sack_state.ack_time); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. @@ -3691,11 +3687,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { - skb_mstamp_get(&sack_state.ack_time); flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); - tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit, - &sack_state.ack_time); + tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); tcp_xmit_recovery(sk, rexmit); } @@ -3768,11 +3762,12 @@ void tcp_parse_options(const struct sk_buff *skb, !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; - if (snd_wscale > 14) { - net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", + if (snd_wscale > TCP_MAX_WSCALE) { + net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n", __func__, - snd_wscale); - snd_wscale = 14; + snd_wscale, + TCP_MAX_WSCALE); + snd_wscale = TCP_MAX_WSCALE; } opt_rx->snd_wscale = snd_wscale; } @@ -4007,10 +4002,10 @@ void tcp_reset(struct sock *sk) /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); + tcp_done(sk); + if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); - - tcp_done(sk); } /* @@ -5299,8 +5294,16 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, if (rst_seq_match) tcp_reset(sk); - else + else { + /* Disable TFO if RST is out-of-order + * and no data has been received + * for current active TFO socket + */ + if (tp->syn_fastopen && !tp->data_segs_in && + sk->sk_state == TCP_ESTABLISHED) + tcp_fastopen_active_disable(sk); tcp_send_challenge_ack(sk, skb); + } goto discard; } @@ -5353,6 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); + skb_mstamp_get(&tp->tcp_mstamp); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* @@ -5579,10 +5583,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) else tp->pred_flags = 0; - if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); - sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, @@ -5651,6 +5651,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; + bool fastopen_fail; tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) @@ -5754,10 +5755,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_finish_connect(sk, skb); - if ((tp->syn_fastopen || tp->syn_data) && - tcp_rcv_fastopen_synack(sk, skb, &foc)) - return -1; + fastopen_fail = (tp->syn_fastopen || tp->syn_data) && + tcp_rcv_fastopen_synack(sk, skb, &foc); + if (!sock_flag(sk, SOCK_DEAD)) { + sk->sk_state_change(sk); + sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); + } + if (fastopen_fail) + return -1; if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { @@ -5911,6 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_SYN_SENT: tp->rx_opt.saw_tstamp = 0; + skb_mstamp_get(&tp->tcp_mstamp); queued = tcp_rcv_synsent_state_process(sk, skb, th); if (queued >= 0) return queued; @@ -5922,6 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) return 0; } + skb_mstamp_get(&tp->tcp_mstamp); tp->rx_opt.saw_tstamp = 0; req = tp->fastopen_rsk; if (req) { @@ -6041,9 +6049,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; } - if (tp->linger2 < 0 || - (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && - after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { + if (tp->linger2 < 0) { + tcp_done(sk); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); + return 1; + } + if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && + after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { + /* Receive out of order FIN after close() */ + if (tp->syn_fastopen && th->fin) + tcp_fastopen_active_disable(sk); tcp_done(sk); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; @@ -6333,36 +6348,14 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_free; if (isn && tmp_opt.tstamp_ok) - af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); + af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off); if (!want_cookie && !isn) { - /* VJ's idea. We save last timestamp seen - * from the destination in peer table, when entering - * state TIME-WAIT, and check against it before - * accepting new connection request. - * - * If "isn" is not zero, this request hit alive - * timewait bucket, so that all the necessary checks - * are made in the function processing timewait state. - */ - if (net->ipv4.tcp_death_row.sysctl_tw_recycle) { - bool strict; - - dst = af_ops->route_req(sk, &fl, req, &strict); - - if (dst && strict && - !tcp_peer_is_proven(req, dst, true, - tmp_opt.saw_tstamp)) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); - goto drop_and_release; - } - } /* Kill the following clause, if you dislike this way. */ - else if (!net->ipv4.sysctl_tcp_syncookies && - (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < - (net->ipv4.sysctl_max_syn_backlog >> 2)) && - !tcp_peer_is_proven(req, dst, false, - tmp_opt.saw_tstamp)) { + if (!net->ipv4.sysctl_tcp_syncookies && + (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < + (net->ipv4.sysctl_max_syn_backlog >> 2)) && + !tcp_peer_is_proven(req, dst)) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. @@ -6375,10 +6368,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_release; } - isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off); + isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off); } if (!dst) { - dst = af_ops->route_req(sk, &fl, req, NULL); + dst = af_ops->route_req(sk, &fl, req); if (!dst) goto drop_and_free; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 575e19dcc01763ef3fa938dea3ea51995b573163..cbbafe546c0f5c5f43531eaf24f5b460264785c6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -94,12 +94,12 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); -static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff) +static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff) { - return secure_tcp_sequence_number(ip_hdr(skb)->daddr, - ip_hdr(skb)->saddr, - tcp_hdr(skb)->dest, - tcp_hdr(skb)->source, tsoff); + return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source, tsoff); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) @@ -198,10 +198,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) tp->write_seq = 0; } - if (tcp_death_row->sysctl_tw_recycle && - !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) - tcp_fetch_timewait_stamp(sk, &rt->dst); - inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); @@ -236,11 +232,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) rt = NULL; if (likely(!tp->repair)) { - seq = secure_tcp_sequence_number(inet->inet_saddr, - inet->inet_daddr, - inet->inet_sport, - usin->sin_port, - &tp->tsoffset); + seq = secure_tcp_seq_and_tsoff(inet->inet_saddr, + inet->inet_daddr, + inet->inet_sport, + usin->sin_port, + &tp->tsoffset); if (!tp->write_seq) tp->write_seq = seq; } @@ -1217,19 +1213,9 @@ static void tcp_v4_init_req(struct request_sock *req, static struct dst_entry *tcp_v4_route_req(const struct sock *sk, struct flowi *fl, - const struct request_sock *req, - bool *strict) + const struct request_sock *req) { - struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); - - if (strict) { - if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) - *strict = true; - else - *strict = false; - } - - return dst; + return inet_csk_route_req(sk, &fl->u.ip4, req); } struct request_sock_ops tcp_request_sock_ops __read_mostly = { @@ -1253,7 +1239,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, - .init_seq = tcp_v4_init_sequence, + .init_seq_tsoff = tcp_v4_init_seq_and_tsoff, .send_synack = tcp_v4_send_synack, }; @@ -1423,8 +1409,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (!nsk) goto discard; if (nsk != sk) { - sock_rps_save_rxhash(nsk, skb); - sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; @@ -1871,6 +1855,9 @@ void tcp_v4_destroy_sock(struct sock *sk) /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); + /* Check if we want to disable active TFO */ + tcp_fastopen_active_disable_ofo_check(sk); + /* Cleans up our, hopefully empty, out_of_order_queue. */ skb_rbtree_purge(&tp->out_of_order_queue); @@ -2466,7 +2453,6 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_tw_reuse = 0; cnt = tcp_hashinfo.ehash_mask + 1; - net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 046fd3910873306d74207615d6997e1c847ea361..d6fb6c067af4641f232b94e7c590c212648e8173 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) { struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); + u32 delta; if (sample->rtt_us > 0) tcp_lp_rtt_sample(sk, sample->rtt_us); /* calc inference */ - if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) - lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); + delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr; + if ((s32)delta > 0) + lp->inference = 3 * delta; /* test if within inference */ if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 0f46e5fe31ad1b6809ada1f70bce7b63df4f8c9c..9d0d4f39e42be15d8ad7389bc7562449b92ea5fa 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -45,8 +45,6 @@ struct tcp_metrics_block { struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; - u32 tcpm_ts; - u32 tcpm_ts_stamp; u32 tcpm_lock; u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1]; struct tcp_fastopen_metrics tcpm_fastopen; @@ -123,8 +121,6 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); - tm->tcpm_ts = 0; - tm->tcpm_ts_stamp = 0; if (fastopen_clear) { tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; @@ -273,48 +269,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, return tm; } -static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) -{ - struct tcp_metrics_block *tm; - struct inetpeer_addr saddr, daddr; - unsigned int hash; - struct net *net; - - if (tw->tw_family == AF_INET) { - inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); - inetpeer_set_addr_v4(&daddr, tw->tw_daddr); - hash = ipv4_addr_hash(tw->tw_daddr); - } -#if IS_ENABLED(CONFIG_IPV6) - else if (tw->tw_family == AF_INET6) { - if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) { - inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); - inetpeer_set_addr_v4(&daddr, tw->tw_daddr); - hash = ipv4_addr_hash(tw->tw_daddr); - } else { - inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr); - inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr); - hash = ipv6_addr_hash(&tw->tw_v6_daddr); - } - } -#endif - else - return NULL; - - net = twsk_net(tw); - hash ^= net_hash_mix(net); - hash = hash_32(hash, tcp_metrics_hash_log); - - for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; - tm = rcu_dereference(tm->tcpm_next)) { - if (addr_same(&tm->tcpm_saddr, &saddr) && - addr_same(&tm->tcpm_daddr, &daddr) && - net_eq(tm_net(tm), net)) - break; - } - return tm; -} - static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, struct dst_entry *dst, bool create) @@ -573,8 +527,7 @@ void tcp_init_metrics(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, - bool paws_check, bool timestamps) +bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) { struct tcp_metrics_block *tm; bool ret; @@ -584,94 +537,10 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, rcu_read_lock(); tm = __tcp_get_metrics_req(req, dst); - if (paws_check) { - if (tm && - (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && - ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW || - !timestamps)) - ret = false; - else - ret = true; - } else { - if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) - ret = true; - else - ret = false; - } - rcu_read_unlock(); - - return ret; -} - -void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) -{ - struct tcp_metrics_block *tm; - - rcu_read_lock(); - tm = tcp_get_metrics(sk, dst, true); - if (tm) { - struct tcp_sock *tp = tcp_sk(sk); - - if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { - tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; - tp->rx_opt.ts_recent = tm->tcpm_ts; - } - } - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); - -/* VJ's idea. Save last timestamp seen from this destination and hold - * it at least for normal timewait interval to use for duplicate - * segment detection in subsequent connections, before they enter - * synchronized state. - */ -bool tcp_remember_stamp(struct sock *sk) -{ - struct dst_entry *dst = __sk_dst_get(sk); - bool ret = false; - - if (dst) { - struct tcp_metrics_block *tm; - - rcu_read_lock(); - tm = tcp_get_metrics(sk, dst, true); - if (tm) { - struct tcp_sock *tp = tcp_sk(sk); - - if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || - ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && - tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { - tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; - tm->tcpm_ts = tp->rx_opt.ts_recent; - } - ret = true; - } - rcu_read_unlock(); - } - return ret; -} - -bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) -{ - struct tcp_metrics_block *tm; - bool ret = false; - - rcu_read_lock(); - tm = __tcp_get_metrics_tw(tw); - if (tm) { - const struct tcp_timewait_sock *tcptw; - struct sock *sk = (struct sock *) tw; - - tcptw = tcp_twsk(sk); - if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || - ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && - tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { - tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; - tm->tcpm_ts = tcptw->tw_ts_recent; - } + if (tm && tcp_metric_get(tm, TCP_METRIC_RTT)) ret = true; - } + else + ret = false; rcu_read_unlock(); return ret; @@ -791,14 +660,6 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, jiffies - tm->tcpm_stamp, TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; - if (tm->tcpm_ts_stamp) { - if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, - (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0) - goto nla_put_failure; - if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL, - tm->tcpm_ts) < 0) - goto nla_put_failure; - } { int n = 0; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 65c0f3d13eca47c6394c09925decf54287d01b48..8f6373b0cd7729e7afde1b733879058197e9c5ca 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -26,6 +26,7 @@ #include #include #include +#include int sysctl_tcp_abort_on_overflow __read_mostly; @@ -94,7 +95,6 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); bool paws_reject = false; - struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { @@ -149,12 +149,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } - if (tcp_death_row->sysctl_tw_recycle && - tcptw->tw_ts_recent_stamp && - tcp_tw_remember_stamp(tw)) - inet_twsk_reschedule(tw, tw->tw_timeout); - else - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; } @@ -259,12 +254,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); struct inet_timewait_sock *tw; - bool recycle_ok = false; struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; - if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) - recycle_ok = tcp_remember_stamp(sk); - tw = inet_twsk_alloc(sk, tcp_death_row, state); if (tw) { @@ -317,13 +308,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) if (timeo < rto) timeo = rto; - if (recycle_ok) { - tw->tw_timeout = rto; - } else { - tw->tw_timeout = TCP_TIMEWAIT_LEN; - if (state == TCP_TIME_WAIT) - timeo = TCP_TIMEWAIT_LEN; - } + tw->tw_timeout = TCP_TIMEWAIT_LEN; + if (state == TCP_TIME_WAIT) + timeo = TCP_TIMEWAIT_LEN; inet_twsk_schedule(tw, timeo); /* Linkage updates. */ @@ -813,6 +800,9 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->sk_state; + /* record NAPI ID of child */ + sk_mark_napi_id(child, skb); + tcp_segs_in(tcp_sk(child), skb); if (!sock_owned_by_user(child)) { ret = tcp_rcv_state_process(child, skb); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a85d863c44196e60fd22e25471cf773e72d2c133..60111a0fc2017e817fc37087b3862b82e002da0f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -212,12 +212,12 @@ void tcp_select_initial_window(int __space, __u32 mss, /* If no clamp set the clamp to the max possible scaled window */ if (*window_clamp == 0) - (*window_clamp) = (65535 << 14); + (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); space = min(*window_clamp, space); /* Quantize space offering to a multiple of mss if possible. */ if (space > mss) - space = (space / mss) * mss; + space = rounddown(space, mss); /* NOTE: offering an initial window larger than 32767 * will break some buggy TCP stacks. If the admin tells us @@ -234,13 +234,11 @@ void tcp_select_initial_window(int __space, __u32 mss, (*rcv_wscale) = 0; if (wscale_ok) { - /* Set window scaling on max possible window - * See RFC1323 for an explanation of the limit to 14 - */ + /* Set window scaling on max possible window */ space = max_t(u32, space, sysctl_tcp_rmem[2]); space = max_t(u32, space, sysctl_rmem_max); space = min_t(u32, space, *window_clamp); - while (space > 65535 && (*rcv_wscale) < 14) { + while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) { space >>= 1; (*rcv_wscale)++; } @@ -253,7 +251,7 @@ void tcp_select_initial_window(int __space, __u32 mss, } /* Set the clamp no higher than max representable value */ - (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); + (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); } EXPORT_SYMBOL(tcp_select_initial_window); @@ -2566,7 +2564,6 @@ u32 __tcp_select_window(struct sock *sk) /* Don't do rounding if we are using window scaling, since the * scaled window will not line up with the MSS boundary anyway. */ - window = tp->rcv_wnd; if (tp->rx_opt.rcv_wscale) { window = free_space; @@ -2574,10 +2571,9 @@ u32 __tcp_select_window(struct sock *sk) * Import case: prevent zero window announcement if * 1< mss. */ - if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) - window = (((window >> tp->rx_opt.rcv_wscale) + 1) - << tp->rx_opt.rcv_wscale); + window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); } else { + window = tp->rcv_wnd; /* Get the largest window that is a nice multiple of mss. * Window clamp already applied above. * If our current window offering is within 1 mss of the @@ -2587,7 +2583,7 @@ u32 __tcp_select_window(struct sock *sk) * is too small. */ if (window <= free_space - mss || window > free_space) - window = (free_space / mss) * mss; + window = rounddown(free_space, mss); else if (mss == full_space && free_space > window + (full_space >> 1)) window = free_space; diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index 9be1581a5a08c36f4544fbdabedd9741fb266a1e..c6a9fa8946462100947ab62d86464ff8f99565c2 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, /* Update the connection delivery information and generate a rate sample. */ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, - struct skb_mstamp *now, struct rate_sample *rs) + struct rate_sample *rs) { struct tcp_sock *tp = tcp_sk(sk); u32 snd_us, ack_us; @@ -120,7 +120,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, * to carry current time, flags, stats like "tcp_sacktag_state". */ if (delivered) - tp->delivered_mstamp = *now; + tp->delivered_mstamp = tp->tcp_mstamp; rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ rs->losses = lost; /* freshly marked lost */ @@ -138,7 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, * longer phase. */ snd_us = rs->interval_us; /* send phase */ - ack_us = skb_mstamp_us_delta(now, &rs->prior_mstamp); /* ack phase */ + ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp, + &rs->prior_mstamp); /* ack phase */ rs->interval_us = max(snd_us, ack_us); /* Normally we expect interval_us >= min-rtt. diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index d8acbd9f477a2ac6b0f8eee1bf59f3ab43abff07..362b8c75bfab44cf87c2a01398a146a271bc1119 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -45,8 +45,7 @@ static bool tcp_rack_sent_after(const struct skb_mstamp *t1, * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will * make us enter the CA_Recovery state. */ -static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, - u32 *reo_timeout) +static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -79,7 +78,7 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, * A packet is lost if its elapsed time is beyond * the recent RTT plus the reordering window. */ - u32 elapsed = skb_mstamp_us_delta(now, + u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp, &skb->skb_mstamp); s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; @@ -105,7 +104,7 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now, } } -void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) +void tcp_rack_mark_lost(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); u32 timeout; @@ -115,7 +114,7 @@ void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) /* Reset the advanced flag to avoid unnecessary queue scanning */ tp->rack.advanced = 0; - tcp_rack_detect_loss(sk, now, &timeout); + tcp_rack_detect_loss(sk, &timeout); if (timeout) { timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN); inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, @@ -128,8 +127,7 @@ void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) * draft-cheng-tcpm-rack-00.txt */ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, - const struct skb_mstamp *xmit_time, - const struct skb_mstamp *ack_time) + const struct skb_mstamp *xmit_time) { u32 rtt_us; @@ -138,7 +136,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, end_seq, tp->rack.end_seq)) return; - rtt_us = skb_mstamp_us_delta(ack_time, xmit_time); + rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time); if (sacked & TCPCB_RETRANS) { /* If the sacked packet was retransmitted, it's ambiguous * whether the retransmission or the original (or the prior @@ -165,12 +163,11 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, void tcp_rack_reo_timeout(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - struct skb_mstamp now; u32 timeout, prior_inflight; - skb_mstamp_get(&now); prior_inflight = tcp_packets_in_flight(tp); - tcp_rack_detect_loss(sk, &now, &timeout); + skb_mstamp_get(&tp->tcp_mstamp); + tcp_rack_detect_loss(sk, &timeout); if (prior_inflight != tcp_packets_in_flight(tp)) { if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { tcp_enter_recovery(sk, false); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b2ab411c6d3728fa7dbdebde045532a7317f5166..14672543cf0bd27bc59976d5cec38d2d3bbcdd2c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -201,11 +201,10 @@ static int tcp_write_timeout(struct sock *sk) if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) { /* Some middle-boxes may black-hole Fast Open _after_ * the handshake. Therefore we conservatively disable - * Fast Open on this path on recurring timeouts with - * few or zero bytes acked after Fast Open. + * Fast Open on this path on recurring timeouts after + * successful Fast Open. */ - if (tp->syn_data_acked && - tp->bytes_acked <= tp->rx_opt.mss_clamp) { + if (tp->syn_data_acked) { tcp_fastopen_cache_set(sk, 0, NULL, true, 0); if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) NET_INC_STATS(sock_net(sk), diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index fed66dc0e0f5f242cf0af25434fa9cfa89998958..9775453b8d174c848dc09df83d1fa185422cd8cc 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -265,8 +265,8 @@ static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr, if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { info->vegas.tcpv_enabled = 1; info->vegas.tcpv_rttcnt = 0; - info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt), - info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), + info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt); + info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min); *attr = INET_DIAG_VEGASINFO; return sizeof(struct tcpvegas_info); diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index 4acc0508c5ebc65dc392de50a207901b2ea8d305..3d36644890bb6d3b0a755c811c60e920ad5cd8b8 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Add encapsulation header. * @@ -23,6 +24,8 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); + skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); @@ -56,9 +59,40 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) return 0; } +static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) +{ + const struct net_offload *ops; + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct xfrm_offload *xo = xfrm_offload(skb); + + skb->transport_header += x->props.header_len; + ops = rcu_dereference(inet_offloads[xo->proto]); + if (likely(ops && ops->callbacks.gso_segment)) + segs = ops->callbacks.gso_segment(skb, features); + + return segs; +} + +static void xfrm4_transport_xmit(struct xfrm_state *x, struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + + skb_reset_mac_len(skb); + pskb_pull(skb, skb->mac_len + sizeof(struct iphdr) + x->props.header_len); + + if (xo->flags & XFRM_GSO_SEGMENT) { + skb_reset_transport_header(skb); + skb->transport_header -= x->props.header_len; + } +} + static struct xfrm_mode xfrm4_transport_mode = { .input = xfrm4_transport_input, .output = xfrm4_transport_output, + .gso_segment = xfrm4_transport_gso_segment, + .xmit = xfrm4_transport_xmit, .owner = THIS_MODULE, .encap = XFRM_MODE_TRANSPORT, }; diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 35feda67646494c92263cf30109432fb395fa1df..e6265e2c274e48a54cfc08afb9fee6c9fc2ad194 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -33,6 +33,9 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *top_iph; int flags; + skb_set_inner_network_header(skb, skb_network_offset(skb)); + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); + skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); @@ -96,11 +99,36 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) return err; } +static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) +{ + __skb_push(skb, skb->mac_len); + return skb_mac_gso_segment(skb, features); + +} + +static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + + if (xo->flags & XFRM_GSO_SEGMENT) { + skb->network_header = skb->network_header - x->props.header_len; + skb->transport_header = skb->network_header + + sizeof(struct iphdr); + } + + skb_reset_mac_len(skb); + pskb_pull(skb, skb->mac_len + x->props.header_len); +} + static struct xfrm_mode xfrm4_tunnel_mode = { .input2 = xfrm4_mode_tunnel_input, .input = xfrm_prepare_input, .output2 = xfrm4_mode_tunnel_output, .output = xfrm4_prepare_output, + .gso_segment = xfrm4_mode_tunnel_gso_segment, + .xmit = xfrm4_mode_tunnel_xmit, .owner = THIS_MODULE, .encap = XFRM_MODE_TUNNEL, .flags = XFRM_MODE_FLAG_TUNNEL, diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 7ee6518afa86ff785cacda0f115297ff6e5d0fa5..94b8702603bc54f36542977ed0d9c3b93d43b6b7 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -29,7 +29,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) goto out; mtu = dst_mtu(skb_dst(skb)); - if (skb->len > mtu) { + if ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { skb->protocol = htons(ETH_P_IP); if (skb->sk) diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index e2afe677a9d944a2c6c27a2e7b2d06227712cf89..48c452959d2c2fe687472c3732fe40246a8c863a 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -307,6 +307,7 @@ config IPV6_SEG6_LWTUNNEL bool "IPv6: Segment Routing Header encapsulation support" depends on IPV6 select LWTUNNEL + select DST_CACHE ---help--- Support for encapsulation of packets within an outer IPv6 header and a Segment Routing Header using the lightweight diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0ea96c4d334da2821a8d9c0e5e7d0d513dcb4228..b09ac38d8dc47c890012357f1633091c74218942 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -224,6 +224,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .accept_ra_rtr_pref = 1, .rtr_probe_interval = 60 * HZ, #ifdef CONFIG_IPV6_ROUTE_INFO + .accept_ra_rt_info_min_plen = 0, .accept_ra_rt_info_max_plen = 0, #endif #endif @@ -245,6 +246,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { #endif .enhanced_dad = 1, .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, + .disable_policy = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -276,6 +278,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .accept_ra_rtr_pref = 1, .rtr_probe_interval = 60 * HZ, #ifdef CONFIG_IPV6_ROUTE_INFO + .accept_ra_rt_info_min_plen = 0, .accept_ra_rt_info_max_plen = 0, #endif #endif @@ -297,6 +300,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { #endif .enhanced_dad = 1, .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64, + .disable_policy = 0, }; /* Check if a valid qdisc is available */ @@ -545,6 +549,9 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) goto nla_put_failure; + if (!devconf) + goto out; + if ((all || type == NETCONFA_FORWARDING) && nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0) goto nla_put_failure; @@ -563,6 +570,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, devconf->ignore_routes_with_linkdown) < 0) goto nla_put_failure; +out: nlmsg_end(skb, nlh); return 0; @@ -571,8 +579,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, return -EMSGSIZE; } -void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, - struct ipv6_devconf *devconf) +void inet6_netconf_notify_devconf(struct net *net, int event, int type, + int ifindex, struct ipv6_devconf *devconf) { struct sk_buff *skb; int err = -ENOBUFS; @@ -582,7 +590,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, goto errout; err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, - RTM_NEWNETCONF, 0, type); + event, 0, type); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); @@ -603,7 +611,8 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { }; static int inet6_netconf_get_devconf(struct sk_buff *in_skb, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX+1]; @@ -616,7 +625,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb, int err; err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, - devconf_ipv6_policy); + devconf_ipv6_policy, extack); if (err < 0) goto errout; @@ -765,7 +774,8 @@ static void dev_forward_change(struct inet6_dev *idev) else addrconf_leave_anycast(ifa); } - inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING, + inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, + NETCONFA_FORWARDING, dev->ifindex, &idev->cnf); } @@ -800,7 +810,8 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) if (p == &net->ipv6.devconf_dflt->forwarding) { if ((!newf) ^ (!old)) - inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); rtnl_unlock(); @@ -812,13 +823,15 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) net->ipv6.devconf_dflt->forwarding = newf; if ((!newf) ^ (!old_dflt)) - inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); addrconf_forward_change(net, newf); if ((!newf) ^ (!old)) - inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); } else if ((!newf) ^ (!old)) @@ -843,6 +856,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf) idev->cnf.ignore_routes_with_linkdown = newf; if (changed) inet6_netconf_notify_devconf(dev_net(dev), + RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, dev->ifindex, &idev->cnf); @@ -865,6 +879,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf) if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) { if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, + RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); @@ -877,6 +892,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf) addrconf_linkdown_change(net, newf); if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, + RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); @@ -944,6 +960,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, const struct in6_addr *peer_addr, int pfxlen, int scope, u32 flags, u32 valid_lft, u32 prefered_lft) { + struct net *net = dev_net(idev->dev); struct inet6_ifaddr *ifa = NULL; struct rt6_info *rt; unsigned int hash; @@ -990,6 +1007,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, goto out; } + if (net->ipv6.devconf_all->disable_policy || + idev->cnf.disable_policy) + rt->dst.flags |= DST_NOPOLICY; + neigh_parms_data_state_setall(idev->nd_parms); ifa->addr = *addr; @@ -2053,12 +2074,23 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) __ipv6_dev_ac_dec(ifp->idev, &addr); } -static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) +static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev) { - if (dev->addr_len != EUI64_ADDR_LEN) + switch (dev->addr_len) { + case ETH_ALEN: + memcpy(eui, dev->dev_addr, 3); + eui[3] = 0xFF; + eui[4] = 0xFE; + memcpy(eui + 5, dev->dev_addr + 3, 3); + break; + case EUI64_ADDR_LEN: + memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN); + eui[0] ^= 2; + break; + default: return -1; - memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN); - eui[0] ^= 2; + } + return 0; } @@ -2150,7 +2182,7 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) case ARPHRD_TUNNEL: return addrconf_ifid_gre(eui, dev); case ARPHRD_6LOWPAN: - return addrconf_ifid_eui64(eui, dev); + return addrconf_ifid_6lowpan(eui, dev); case ARPHRD_IEEE1394: return addrconf_ifid_ieee1394(eui, dev); case ARPHRD_TUNNEL6: @@ -4392,7 +4424,8 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { }; static int -inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) +inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; @@ -4401,7 +4434,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) u32 ifa_flags; int err; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, + extack); if (err < 0) return err; @@ -4501,7 +4535,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, } static int -inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) +inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; @@ -4513,7 +4548,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) u32 ifa_flags; int err; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, + extack); if (err < 0) return err; @@ -4863,7 +4899,8 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) return inet6_dump_addr(skb, cb, type); } -static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) +static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct ifaddrmsg *ifm; @@ -4874,7 +4911,8 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) struct sk_buff *skb; int err; - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, + extack); if (err < 0) goto errout; @@ -4985,6 +5023,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_RTR_PROBE_INTERVAL] = jiffies_to_msecs(cnf->rtr_probe_interval); #ifdef CONFIG_IPV6_ROUTE_INFO + array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen; array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; #endif #endif @@ -5016,6 +5055,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, #endif array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad; array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode; + array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy; } static inline size_t inet6_ifla6_size(void) @@ -5242,7 +5282,8 @@ static int inet6_validate_link_af(const struct net_device *dev, if (dev && !__in6_dev_get(dev)) return -EAFNOSUPPORT; - return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy); + return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy, + NULL); } static int check_addr_gen_mode(int mode) @@ -5274,7 +5315,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) if (!idev) return -EAFNOSUPPORT; - if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0) + if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) BUG(); if (tb[IFLA_INET6_TOKEN]) { @@ -5677,17 +5718,20 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, return restart_syscall(); if (valp == &net->ipv6.devconf_dflt->proxy_ndp) - inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_PROXY_NEIGH, NETCONFA_IFINDEX_DEFAULT, net->ipv6.devconf_dflt); else if (valp == &net->ipv6.devconf_all->proxy_ndp) - inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_PROXY_NEIGH, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); else { struct inet6_dev *idev = ctl->extra1; - inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_PROXY_NEIGH, idev->dev->ifindex, &idev->cnf); } @@ -5840,6 +5884,105 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, return ret; } +static +void addrconf_set_nopolicy(struct rt6_info *rt, int action) +{ + if (rt) { + if (action) + rt->dst.flags |= DST_NOPOLICY; + else + rt->dst.flags &= ~DST_NOPOLICY; + } +} + +static +void addrconf_disable_policy_idev(struct inet6_dev *idev, int val) +{ + struct inet6_ifaddr *ifa; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + spin_lock(&ifa->lock); + if (ifa->rt) { + struct rt6_info *rt = ifa->rt; + struct fib6_table *table = rt->rt6i_table; + int cpu; + + read_lock(&table->tb6_lock); + addrconf_set_nopolicy(ifa->rt, val); + if (rt->rt6i_pcpu) { + for_each_possible_cpu(cpu) { + struct rt6_info **rtp; + + rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu); + addrconf_set_nopolicy(*rtp, val); + } + } + read_unlock(&table->tb6_lock); + } + spin_unlock(&ifa->lock); + } + read_unlock_bh(&idev->lock); +} + +static +int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val) +{ + struct inet6_dev *idev; + struct net *net; + + if (!rtnl_trylock()) + return restart_syscall(); + + *valp = val; + + net = (struct net *)ctl->extra2; + if (valp == &net->ipv6.devconf_dflt->disable_policy) { + rtnl_unlock(); + return 0; + } + + if (valp == &net->ipv6.devconf_all->disable_policy) { + struct net_device *dev; + + for_each_netdev(net, dev) { + idev = __in6_dev_get(dev); + if (idev) + addrconf_disable_policy_idev(idev, val); + } + } else { + idev = (struct inet6_dev *)ctl->extra1; + addrconf_disable_policy_idev(idev, val); + } + + rtnl_unlock(); + return 0; +} + +static +int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int *valp = ctl->data; + int val = *valp; + loff_t pos = *ppos; + struct ctl_table lctl; + int ret; + + lctl = *ctl; + lctl.data = &val; + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); + + if (write && (*valp != val)) + ret = addrconf_disable_policy(ctl, valp, val); + + if (ret) + *ppos = pos; + + return ret; +} + static int minus_one = -1; static const int one = 1; static const int two_five_five = 255; @@ -6027,6 +6170,13 @@ static const struct ctl_table addrconf_sysctl[] = { .proc_handler = proc_dointvec_jiffies, }, #ifdef CONFIG_IPV6_ROUTE_INFO + { + .procname = "accept_ra_rt_info_min_plen", + .data = &ipv6_devconf.accept_ra_rt_info_min_plen, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "accept_ra_rt_info_max_plen", .data = &ipv6_devconf.accept_ra_rt_info_max_plen, @@ -6197,6 +6347,13 @@ static const struct ctl_table addrconf_sysctl[] = { .mode = 0644, .proc_handler = addrconf_sysctl_addr_gen_mode, }, + { + .procname = "disable_policy", + .data = &ipv6_devconf.disable_policy, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_disable_policy, + }, { /* sentinel */ } @@ -6237,7 +6394,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, ifindex = NETCONFA_IFINDEX_DEFAULT; else ifindex = idev->dev->ifindex; - inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p); + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, + ifindex, p); return 0; free: @@ -6246,7 +6404,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, return -ENOBUFS; } -static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) +static void __addrconf_sysctl_unregister(struct net *net, + struct ipv6_devconf *p, int ifindex) { struct ctl_table *table; @@ -6257,6 +6416,8 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) unregister_net_sysctl_table(p->sysctl_header); p->sysctl_header = NULL; kfree(table); + + inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL); } static int addrconf_sysctl_register(struct inet6_dev *idev) @@ -6280,7 +6441,8 @@ static int addrconf_sysctl_register(struct inet6_dev *idev) static void addrconf_sysctl_unregister(struct inet6_dev *idev) { - __addrconf_sysctl_unregister(&idev->cnf); + __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf, + idev->dev->ifindex); neigh_sysctl_unregister(idev->nd_parms); } @@ -6323,7 +6485,7 @@ static int __net_init addrconf_init_net(struct net *net) #ifdef CONFIG_SYSCTL err_reg_dflt: - __addrconf_sysctl_unregister(all); + __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); err_reg_all: kfree(dflt); #endif @@ -6336,8 +6498,10 @@ static int __net_init addrconf_init_net(struct net *net) static void __net_exit addrconf_exit_net(struct net *net) { #ifdef CONFIG_SYSCTL - __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); - __addrconf_sysctl_unregister(net->ipv6.devconf_all); + __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt, + NETCONFA_IFINDEX_DEFAULT); + __addrconf_sysctl_unregister(net, net->ipv6.devconf_all, + NETCONFA_IFINDEX_ALL); #endif kfree(net->ipv6.devconf_dflt); kfree(net->ipv6.devconf_all); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index a8f6986dcbe5ea5a36a3cc6fcf51becebb50c33b..07cd7d248bb6822b787e4c74c8ef3d27d38e9d99 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -404,7 +404,8 @@ static const struct nla_policy ifal_policy[IFAL_MAX+1] = { [IFAL_LABEL] = { .len = sizeof(u32), }, }; -static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh) +static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifaddrlblmsg *ifal; @@ -413,7 +414,8 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh) u32 label; int err = 0; - err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); + err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, + extack); if (err < 0) return err; @@ -521,7 +523,8 @@ static inline int ip6addrlbl_msgsize(void) + nla_total_size(4); /* IFAL_LABEL */ } -static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh) +static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct ifaddrlblmsg *ifal; @@ -532,7 +535,8 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh) struct ip6addrlbl_entry *p; struct sk_buff *skb; - err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); + err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, + extack); if (err < 0) return err; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e82e59f22dfc0e8eabe6b8dd3e12f5c25533142b..a88b5b5b79558948dcbad84cbdda99c1d574102e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1003,6 +1003,10 @@ static int __init inet6_init(void) if (err) goto seg6_fail; + err = igmp6_late_init(); + if (err) + goto igmp6_late_err; + #ifdef CONFIG_SYSCTL err = ipv6_sysctl_register(); if (err) @@ -1017,8 +1021,10 @@ static int __init inet6_init(void) #ifdef CONFIG_SYSCTL sysctl_fail: - seg6_exit(); + igmp6_late_cleanup(); #endif +igmp6_late_err: + seg6_exit(); seg6_fail: calipso_exit(); calipso_fail: diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index ff54faa756317047af5af661d070ba9a5eb65429..1fe99ba8066c8de2a5717f0b611a152c94837734 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -170,19 +170,23 @@ static void esp_output_restore_header(struct sk_buff *skb) } static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, + struct xfrm_state *x, struct ip_esp_hdr *esph, __be32 *seqhi) { - struct xfrm_state *x = skb_dst(skb)->xfrm; - /* For ESN we move the header forward by 4 bytes to * accomodate the high bits. We will move it back after * encryption. */ if ((x->props.flags & XFRM_STATE_ESN)) { + struct xfrm_offload *xo = xfrm_offload(skb); + esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); *seqhi = esph->spi; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); + if (xo) + esph->seq_no = htonl(xo->seq.hi); + else + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); } esph->spi = x->id.spi; @@ -214,61 +218,16 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) tail[plen - 1] = proto; } -static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) +int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { - int err; - struct ip_esp_hdr *esph; - struct crypto_aead *aead; - struct aead_request *req; - struct scatterlist *sg, *dsg; - struct sk_buff *trailer; - struct page *page; - void *tmp; - int blksize; - int clen; - int alen; - int plen; - int ivlen; - int tfclen; - int nfrags; - int assoclen; - int seqhilen; - int tailen; - u8 *iv; u8 *tail; u8 *vaddr; - __be32 *seqhi; - __be64 seqno; - __u8 proto = *skb_mac_header(skb); - - /* skb is pure payload to encrypt */ - aead = x->data; - alen = crypto_aead_authsize(aead); - ivlen = crypto_aead_ivsize(aead); - - tfclen = 0; - if (x->tfcpad) { - struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); - u32 padto; - - padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); - if (skb->len < padto) - tfclen = padto - skb->len; - } - blksize = ALIGN(crypto_aead_blocksize(aead), 4); - clen = ALIGN(skb->len + 2 + tfclen, blksize); - plen = clen - skb->len - tfclen; - tailen = tfclen + plen + alen; - - assoclen = sizeof(*esph); - seqhilen = 0; - - if (x->props.flags & XFRM_STATE_ESN) { - seqhilen += sizeof(__be32); - assoclen += seqhilen; - } + int nfrags; + struct page *page; + struct ip_esp_hdr *esph; + struct sk_buff *trailer; + int tailen = esp->tailen; - *skb_mac_header(skb) = IPPROTO_ESP; esph = ip_esp_hdr(skb); if (!skb_cloned(skb)) { @@ -284,6 +243,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct sock *sk = skb->sk; struct page_frag *pfrag = &x->xfrag; + esp->inplace = false; + allocsize = ALIGN(tailen, L1_CACHE_BYTES); spin_lock_bh(&x->lock); @@ -300,10 +261,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) tail = vaddr + pfrag->offset; - esp_output_fill_trailer(tail, tfclen, plen, proto); + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); kunmap_atomic(vaddr); + spin_unlock_bh(&x->lock); + nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, @@ -319,108 +282,111 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) if (sk) atomic_add(tailen, &sk->sk_wmem_alloc); - skb_push(skb, -skb_network_offset(skb)); - - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); - esph->spi = x->id.spi; - - tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen); - if (!tmp) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; - goto error; - } - seqhi = esp_tmp_seqhi(tmp); - iv = esp_tmp_iv(aead, tmp, seqhilen); - req = esp_tmp_req(aead, iv); - sg = esp_req_sg(aead, req); - dsg = &sg[nfrags]; - - esph = esp_output_set_esn(skb, esph, seqhi); - - sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); - - allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); - - if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { - spin_unlock_bh(&x->lock); - err = -ENOMEM; - goto error; - } - - skb_shinfo(skb)->nr_frags = 1; - - page = pfrag->page; - get_page(page); - /* replace page frags in skb with new page */ - __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); - pfrag->offset = pfrag->offset + allocsize; - - sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); - skb_to_sgvec(skb, dsg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); - - spin_unlock_bh(&x->lock); - - goto skip_cow2; + goto out; } } cow: - err = skb_cow_data(skb, tailen, &trailer); - if (err < 0) - goto error; - nfrags = err; - + nfrags = skb_cow_data(skb, tailen, &trailer); + if (nfrags < 0) + goto out; tail = skb_tail_pointer(trailer); - esph = ip_esp_hdr(skb); skip_cow: - esp_output_fill_trailer(tail, tfclen, plen, proto); + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); + pskb_put(skb, trailer, tailen); - pskb_put(skb, trailer, clen - skb->len + alen); - skb_push(skb, -skb_network_offset(skb)); +out: + return nfrags; +} +EXPORT_SYMBOL_GPL(esp6_output_head); - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); - esph->spi = x->id.spi; +int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) +{ + u8 *iv; + int alen; + void *tmp; + int ivlen; + int assoclen; + int seqhilen; + __be32 *seqhi; + struct page *page; + struct ip_esp_hdr *esph; + struct aead_request *req; + struct crypto_aead *aead; + struct scatterlist *sg, *dsg; + int err = -ENOMEM; - tmp = esp_alloc_tmp(aead, nfrags, seqhilen); - if (!tmp) { - err = -ENOMEM; - goto error; + assoclen = sizeof(struct ip_esp_hdr); + seqhilen = 0; + + if (x->props.flags & XFRM_STATE_ESN) { + seqhilen += sizeof(__be32); + assoclen += sizeof(__be32); } + aead = x->data; + alen = crypto_aead_authsize(aead); + ivlen = crypto_aead_ivsize(aead); + + tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen); + if (!tmp) + goto error; + seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); - dsg = sg; - esph = esp_output_set_esn(skb, esph, seqhi); + if (esp->inplace) + dsg = sg; + else + dsg = &sg[esp->nfrags]; - sg_init_table(sg, nfrags); + esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi); + + sg_init_table(sg, esp->nfrags); skb_to_sgvec(skb, sg, (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); + assoclen + ivlen + esp->clen + alen); + + if (!esp->inplace) { + int allocsize; + struct page_frag *pfrag = &x->xfrag; + + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); + + spin_lock_bh(&x->lock); + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { + spin_unlock_bh(&x->lock); + goto error; + } + + skb_shinfo(skb)->nr_frags = 1; + + page = pfrag->page; + get_page(page); + /* replace page frags in skb with new page */ + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); + pfrag->offset = pfrag->offset + allocsize; + spin_unlock_bh(&x->lock); + + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); + skb_to_sgvec(skb, dsg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + esp->clen + alen); + } -skip_cow2: if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_output_done_esn, skb); else aead_request_set_callback(req, 0, esp_output_done, skb); - aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); + aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); aead_request_set_ad(req, assoclen); - seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + - ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); - memset(iv, 0, ivlen); - memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), + memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), min(ivlen, 8)); ESP_SKB_CB(skb)->tmp = tmp; @@ -446,10 +412,60 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) error: return err; } +EXPORT_SYMBOL_GPL(esp6_output_tail); -static int esp_input_done2(struct sk_buff *skb, int err) +static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) +{ + int alen; + int blksize; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; + + esp.inplace = true; + + esp.proto = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + + /* skb is pure payload to encrypt */ + + aead = x->data; + alen = crypto_aead_authsize(aead); + + esp.tfclen = 0; + if (x->tfcpad) { + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); + u32 padto; + + padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); + if (skb->len < padto) + esp.tfclen = padto - skb->len; + } + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); + esp.plen = esp.clen - skb->len - esp.tfclen; + esp.tailen = esp.tfclen + esp.plen + alen; + + esp.nfrags = esp6_output_head(x, skb, &esp); + if (esp.nfrags < 0) + return esp.nfrags; + + esph = ip_esp_hdr(skb); + esph->spi = x->id.spi; + + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); + + skb_push(skb, -skb_network_offset(skb)); + + return esp6_output_tail(x, skb, &esp); +} + +int esp6_input_done2(struct sk_buff *skb, int err) { struct xfrm_state *x = xfrm_input_state(skb); + struct xfrm_offload *xo = xfrm_offload(skb); struct crypto_aead *aead = x->data; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); @@ -458,7 +474,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) int padlen; u8 nexthdr[2]; - kfree(ESP_SKB_CB(skb)->tmp); + if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) + kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; @@ -492,12 +509,13 @@ static int esp_input_done2(struct sk_buff *skb, int err) out: return err; } +EXPORT_SYMBOL_GPL(esp6_input_done2); static void esp_input_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; - xfrm_input_resume(skb, esp_input_done2(skb, err)); + xfrm_input_resume(skb, esp6_input_done2(skb, err)); } static void esp_input_restore_header(struct sk_buff *skb) @@ -619,7 +637,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) if ((x->props.flags & XFRM_STATE_ESN)) esp_input_restore_header(skb); - ret = esp_input_done2(skb, ret); + ret = esp6_input_done2(skb, ret); out: return ret; @@ -682,13 +700,17 @@ static int esp_init_aead(struct xfrm_state *x) char aead_name[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *aead; int err; + u32 mask = 0; err = -ENAMETOOLONG; if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; - aead = crypto_alloc_aead(aead_name, 0, 0); + if (x->xso.offload_handle) + mask |= CRYPTO_ALG_ASYNC; + + aead = crypto_alloc_aead(aead_name, 0, mask); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -718,6 +740,7 @@ static int esp_init_authenc(struct xfrm_state *x) char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; + u32 mask = 0; err = -EINVAL; if (!x->ealg) @@ -743,7 +766,10 @@ static int esp_init_authenc(struct xfrm_state *x) goto error; } - aead = crypto_alloc_aead(authenc_name, 0, 0); + if (x->xso.offload_handle) + mask |= CRYPTO_ALG_ASYNC; + + aead = crypto_alloc_aead(authenc_name, 0, mask); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index d914eb93204adbe02dceca9254da2598b6c8d99b..d950d43ba255442cf3079546a46b95693029f10c 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -45,27 +45,31 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) goto out; - err = secpath_set(skb); - if (err) - goto out; + xo = xfrm_offload(skb); + if (!xo || !(xo->flags & CRYPTO_DONE)) { + err = secpath_set(skb); + if (err) + goto out; - if (skb->sp->len == XFRM_MAX_DEPTH) - goto out; + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; - x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, - (xfrm_address_t *)&ipv6_hdr(skb)->daddr, - spi, IPPROTO_ESP, AF_INET6); - if (!x) - goto out; + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ipv6_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET6); + if (!x) + goto out; - skb->sp->xvec[skb->sp->len++] = x; - skb->sp->olen++; + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; - xo = xfrm_offload(skb); - if (!xo) { - xfrm_state_put(x); - goto out; + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } } + xo->flags |= XFRM_GRO; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; @@ -86,19 +90,216 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, return NULL; } +static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ip_esp_hdr *esph; + struct ipv6hdr *iph = ipv6_hdr(skb); + struct xfrm_offload *xo = xfrm_offload(skb); + int proto = iph->nexthdr; + + skb_push(skb, -skb_network_offset(skb)); + esph = ip_esp_hdr(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + + esph->spi = x->id.spi; + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); + + xo->proto = proto; +} + +static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + __u32 seq; + int err = 0; + struct sk_buff *skb2; + struct xfrm_state *x; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct sk_buff *segs = ERR_PTR(-EINVAL); + netdev_features_t esp_features = features; + struct xfrm_offload *xo = xfrm_offload(skb); + + if (!xo) + goto out; + + seq = xo->seq.low; + + x = skb->sp->xvec[skb->sp->len - 1]; + aead = x->data; + esph = ip_esp_hdr(skb); + + if (esph->spi != x->id.spi) + goto out; + + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) + goto out; + + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); + + skb->encap_hdr_csum = 1; + + if (!(features & NETIF_F_HW_ESP)) + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + + segs = x->outer_mode->gso_segment(x, skb, esp_features); + if (IS_ERR_OR_NULL(segs)) + goto out; + + __skb_pull(skb, skb->data - skb_mac_header(skb)); + + skb2 = segs; + do { + struct sk_buff *nskb = skb2->next; + + xo = xfrm_offload(skb2); + xo->flags |= XFRM_GSO_SEGMENT; + xo->seq.low = seq; + xo->seq.hi = xfrm_replay_seqhi(x, seq); + + if(!(features & NETIF_F_HW_ESP)) + xo->flags |= CRYPTO_FALLBACK; + + x->outer_mode->xmit(x, skb2); + + err = x->type_offload->xmit(x, skb2, esp_features); + if (err) { + kfree_skb_list(segs); + return ERR_PTR(err); + } + + if (!skb_is_gso(skb2)) + seq++; + else + seq += skb_shinfo(skb2)->gso_segs; + + skb_push(skb2, skb2->mac_len); + skb2 = nskb; + } while (skb2); + +out: + return segs; +} + +static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) +{ + struct crypto_aead *aead = x->data; + + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) + return -EINVAL; + + skb->ip_summed = CHECKSUM_NONE; + + return esp6_input_done2(skb, 0); +} + +static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) +{ + int err; + int alen; + int blksize; + struct xfrm_offload *xo; + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; + bool hw_offload = true; + + esp.inplace = true; + + xo = xfrm_offload(skb); + + if (!xo) + return -EINVAL; + + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || + (x->xso.dev != skb->dev)) { + xo->flags |= CRYPTO_FALLBACK; + hw_offload = false; + } + + esp.proto = xo->proto; + + /* skb is pure payload to encrypt */ + + aead = x->data; + alen = crypto_aead_authsize(aead); + + esp.tfclen = 0; + /* XXX: Add support for tfc padding here. */ + + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); + esp.plen = esp.clen - skb->len - esp.tfclen; + esp.tailen = esp.tfclen + esp.plen + alen; + + if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { + esp.nfrags = esp6_output_head(x, skb, &esp); + if (esp.nfrags < 0) + return esp.nfrags; + } + + esph = ip_esp_hdr(skb); + esph->spi = x->id.spi; + + skb_push(skb, -skb_network_offset(skb)); + + if (xo->flags & XFRM_GSO_SEGMENT) { + esph->seq_no = htonl(xo->seq.low); + } else { + int len; + + len = skb->len - sizeof(struct ipv6hdr); + if (len > IPV6_MAXPLEN) + len = 0; + + ipv6_hdr(skb)->payload_len = htons(len); + } + + if (hw_offload) + return 0; + + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); + + err = esp6_output_tail(x, skb, &esp); + if (err < 0) + return err; + + secpath_reset(skb); + + return 0; +} + static const struct net_offload esp6_offload = { .callbacks = { .gro_receive = esp6_gro_receive, + .gso_segment = esp6_gso_segment, }, }; +static const struct xfrm_type_offload esp6_type_offload = { + .description = "ESP6 OFFLOAD", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .input_tail = esp6_input_tail, + .xmit = esp6_xmit, + .encap = esp6_gso_encap, +}; + static int __init esp6_offload_init(void) { + if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { + pr_info("%s: can't add xfrm type offload\n", __func__); + return -EAGAIN; + } + return inet6_add_offload(&esp6_offload, IPPROTO_ESP); } static void __exit esp6_offload_exit(void) { + if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0) + pr_info("%s: can't remove xfrm type offload\n", __func__); + inet6_del_offload(&esp6_offload, IPPROTO_ESP); } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index d32e2110aff286cf6c911048e96fc3abf6e10779..b636f1da9aece33532ccab5482aa72696f12db94 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -946,13 +946,13 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } -EXPORT_SYMBOL(ipv6_push_nfrag_opts); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) { if (opt->dst1opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); } +EXPORT_SYMBOL(ipv6_push_frag_opts); struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index ce1aae4a7fc8fa9daf9f1502d0ac77d2be2aee31..b3df03e3faa07d96f170e5ff0b535ebb16216c05 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -146,8 +146,7 @@ static int ila_build_state(struct nlattr *nla, return -EINVAL; } - ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, - ila_nl_policy); + ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, ila_nl_policy, NULL); if (ret < 0) return ret; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6fcb7cb49bb20dcfe518177177e3e0ac1c7d1091..8d128ba79b66de52d4d60628fe8df6cc9bccbc3b 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -544,6 +544,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) & IPV6_TCLASS_MASK; if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; + else + fl6.flowi6_mark = t->parms.fwmark; fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); @@ -603,6 +605,8 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; + else + fl6.flowi6_mark = t->parms.fwmark; fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); @@ -780,6 +784,7 @@ static int ip6gre_tnl_change(struct ip6_tnl *t, t->parms.o_key = p->o_key; t->parms.i_flags = p->i_flags; t->parms.o_flags = p->o_flags; + t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); ip6gre_tnl_link_config(t, set_mtu); return 0; @@ -1249,6 +1254,9 @@ static void ip6gre_netlink_parms(struct nlattr *data[], if (data[IFLA_GRE_FLAGS]) parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); + + if (data[IFLA_GRE_FWMARK]) + parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); } static int ip6gre_tap_init(struct net_device *dev) @@ -1470,6 +1478,8 @@ static size_t ip6gre_get_size(const struct net_device *dev) nla_total_size(2) + /* IFLA_GRE_ENCAP_DPORT */ nla_total_size(2) + + /* IFLA_GRE_FWMARK */ + nla_total_size(4) + 0; } @@ -1490,7 +1500,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || - nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags)) + nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || + nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, @@ -1525,6 +1536,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, + [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index c45b12b4431cbfcaef1f8452bae871bb176be478..9ee208a348f559b27dcc56ee80bf9ad51630cb45 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -49,6 +49,8 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + void (*edemux)(struct sk_buff *skb); + /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ @@ -60,8 +62,8 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) const struct inet6_protocol *ipprot; ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); - if (ipprot && ipprot->early_demux) - ipprot->early_demux(skb); + if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) + edemux(skb); } if (!skb_valid_dst(skb)) ip6_route_input(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a9692ec0cd6d0ba9fecba143d16191e8df0d9572..6eb2ae507500b30959134aa3ed35c53c7b79aad9 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -954,7 +954,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) opt->dst_opt[5] = IPV6_TLV_PADN; opt->dst_opt[6] = 1; - opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; + opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt; opt->ops.opt_nflen = 8; } @@ -1178,7 +1178,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); - ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL, NULL); + ipv6_push_frag_opts(skb, &opt.ops, &proto); } /* Calculate max headroom for all the headers and adjust @@ -1258,6 +1258,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) & IPV6_TCLASS_MASK; if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; + else + fl6.flowi6_mark = t->parms.fwmark; } fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); @@ -1340,6 +1342,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; + else + fl6.flowi6_mark = t->parms.fwmark; } fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); @@ -1469,6 +1473,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) t->parms.flowinfo = p->flowinfo; t->parms.link = p->link; t->parms.proto = p->proto; + t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); ip6_tnl_link_config(t); return 0; @@ -1920,6 +1925,9 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[], if (data[IFLA_IPTUN_COLLECT_METADATA]) parms->collect_md = true; + + if (data[IFLA_IPTUN_FWMARK]) + parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); } static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[], @@ -2056,6 +2064,8 @@ static size_t ip6_tnl_get_size(const struct net_device *dev) nla_total_size(2) + /* IFLA_IPTUN_COLLECT_METADATA */ nla_total_size(0) + + /* IFLA_IPTUN_FWMARK */ + nla_total_size(4) + 0; } @@ -2071,7 +2081,8 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || - nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) + nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) || + nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) || @@ -2083,6 +2094,7 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) if (parm->collect_md) if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA)) goto nla_put_failure; + return 0; nla_put_failure: @@ -2111,6 +2123,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops ip6_link_ops __read_mostly = { diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 3d8a3b63b4fdbec7d488194e21e0c9013f0ff6da..d67ef56454b25a088768e88fcd1f878a8c498f12 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -657,6 +657,7 @@ vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) t->parms.i_key = p->i_key; t->parms.o_key = p->o_key; t->parms.proto = p->proto; + t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); vti6_link_config(t); return 0; @@ -933,6 +934,9 @@ static void vti6_netlink_parms(struct nlattr *data[], if (data[IFLA_VTI_OKEY]) parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]); + + if (data[IFLA_VTI_FWMARK]) + parms->fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); } static int vti6_newlink(struct net *src_net, struct net_device *dev, @@ -998,6 +1002,8 @@ static size_t vti6_get_size(const struct net_device *dev) nla_total_size(4) + /* IFLA_VTI_OKEY */ nla_total_size(4) + + /* IFLA_VTI_FWMARK */ + nla_total_size(4) + 0; } @@ -1010,7 +1016,8 @@ static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_in6_addr(skb, IFLA_VTI_LOCAL, &parm->laddr) || nla_put_in6_addr(skb, IFLA_VTI_REMOTE, &parm->raddr) || nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) || - nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key)) + nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key) || + nla_put_u32(skb, IFLA_VTI_FWMARK, parm->fwmark)) goto nla_put_failure; return 0; @@ -1024,6 +1031,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = { [IFLA_VTI_REMOTE] = { .len = sizeof(struct in6_addr) }, [IFLA_VTI_IKEY] = { .type = NLA_U32 }, [IFLA_VTI_OKEY] = { .type = NLA_U32 }, + [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, }; static struct rtnl_link_ops vti6_link_ops __read_mostly = { diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bf34d0950752ba1466fa806fd4f8ce0b802b0087..374997d26488ea38db7ed42ff7e6a55ede249021 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -816,7 +816,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, int notify, in6_dev = __in6_dev_get(dev); if (in6_dev) { in6_dev->cnf.mc_forwarding--; - inet6_netconf_notify_devconf(dev_net(dev), + inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); } @@ -975,7 +975,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt, in6_dev = __in6_dev_get(dev); if (in6_dev) { in6_dev->cnf.mc_forwarding++; - inet6_netconf_notify_devconf(dev_net(dev), + inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); } @@ -1598,7 +1598,8 @@ static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk) write_unlock_bh(&mrt_lock); if (!err) - inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_MC_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); rtnl_unlock(); @@ -1619,7 +1620,7 @@ int ip6mr_sk_done(struct sock *sk) mrt->mroute6_sk = NULL; net->ipv6.devconf_all->mc_forwarding--; write_unlock_bh(&mrt_lock); - inet6_netconf_notify_devconf(net, + inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv6.devconf_all); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 1bdc703cb9668bd77690c3d8f1ec0062d7b88c43..07403fa164e18aac704e3b77d1e2a094ad53c04c 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2463,7 +2463,6 @@ static void mld_ifc_event(struct inet6_dev *idev) mld_ifc_start_timer(idev, 1); } - static void igmp6_timer_handler(unsigned long data) { struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data; @@ -2599,6 +2598,44 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) write_unlock_bh(&idev->lock); } +static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) +{ + struct ifmcaddr6 *pmc; + + ASSERT_RTNL(); + + if (mld_in_v1_mode(idev)) { + read_lock_bh(&idev->lock); + for (pmc = idev->mc_list; pmc; pmc = pmc->next) + igmp6_join_group(pmc); + read_unlock_bh(&idev->lock); + } else + mld_send_report(idev, NULL); +} + +static int ipv6_mc_netdev_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct inet6_dev *idev = __in6_dev_get(dev); + + switch (event) { + case NETDEV_RESEND_IGMP: + if (idev) + ipv6_mc_rejoin_groups(idev); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block igmp6_netdev_notifier = { + .notifier_call = ipv6_mc_netdev_event, +}; + #ifdef CONFIG_PROC_FS struct igmp6_mc_iter_state { struct seq_net_private p; @@ -2970,7 +3007,17 @@ int __init igmp6_init(void) return register_pernet_subsys(&igmp6_net_ops); } +int __init igmp6_late_init(void) +{ + return register_netdevice_notifier(&igmp6_netdev_notifier); +} + void igmp6_cleanup(void) { unregister_pernet_subsys(&igmp6_net_ops); } + +void igmp6_late_cleanup(void) +{ + unregister_netdevice_notifier(&igmp6_netdev_notifier); +} diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index cb1766724a4ca12ec9ccbc452c776261477c99f1..d310dc41209a39647b0ee7e8da2d38e90ac4333e 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -732,7 +732,7 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u8 icmp6_type, struct ndisc_options *ndopts) { - neigh_update(neigh, lladdr, new, flags); + neigh_update(neigh, lladdr, new, flags, 0); /* report ndisc ops about neighbour update */ ndisc_ops_update(dev, neigh, flags, icmp6_type, ndopts); } @@ -1418,6 +1418,8 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (ri->prefix_len == 0 && !in6_dev->cnf.accept_ra_defrtr) continue; + if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen) + continue; if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, @@ -1746,6 +1748,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEADDR: neigh_changeaddr(&nd_tbl, dev); fib6_run_gc(0, net, false); + /* fallthrough */ + case NETDEV_UP: idev = in6_dev_get(dev); if (!idev) break; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1e15c54fd5e27dbafaf4d4b0b3de9008a724559f..1f90644056ac784f1a455751796d5fa0d9441915 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -51,15 +51,6 @@ void *ip6t_alloc_initial_table(const struct xt_table *info) } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); -/* - We keep a set of rules for each CPU, so we can avoid write-locking - them in the softirq when updating the counters and therefore - only need to read-lock in the softirq; doing a write_lock_bh() in user - context stops packets coming through and allows user context to read - the counters or update the rules. - - Hence the start of any table is given by get_table() below. */ - /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool @@ -411,7 +402,7 @@ mark_source_chains(const struct xt_table_info *newinfo, to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; - struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); + struct ip6t_entry *e = entry0 + pos; if (!(valid_hooks & (1 << hook))) continue; @@ -453,14 +444,12 @@ mark_source_chains(const struct xt_table_info *newinfo, if (pos == oldpos) goto next; - e = (struct ip6t_entry *) - (entry0 + pos); + e = entry0 + pos; } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; - e = (struct ip6t_entry *) - (entry0 + pos + size); + e = entry0 + pos + size; if (pos + size >= newinfo->size) return 0; e->counters.pcnt = pos; @@ -475,16 +464,14 @@ mark_source_chains(const struct xt_table_info *newinfo, if (!xt_find_jump_offset(offsets, newpos, newinfo->number)) return 0; - e = (struct ip6t_entry *) - (entry0 + newpos); + e = entry0 + newpos; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; if (newpos >= newinfo->size) return 0; } - e = (struct ip6t_entry *) - (entry0 + newpos); + e = entry0 + newpos; e->counters.pcnt = pos; pos = newpos; } @@ -863,7 +850,7 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_match *m; const struct xt_entry_target *t; - e = (struct ip6t_entry *)(loc_cpu_entry + off); + e = loc_cpu_entry + off; if (copy_to_user(userptr + off, e, sizeof(*e))) { ret = -EFAULT; goto free_counters; @@ -1258,7 +1245,7 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, int ret = 0; origsize = *size; - ce = (struct compat_ip6t_entry __user *)*dstptr; + ce = *dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) @@ -1394,7 +1381,7 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, struct xt_entry_match *ematch; origsize = *size; - de = (struct ip6t_entry *)*dstptr; + de = *dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 4ef1ddd4bbbd813ff8e6ed46275ecdf48d5ff9a8..d3c4daa708b9014378f5cdec4b0cc811f9999d92 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -307,12 +307,17 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack(net, skb, th, &opts); - return NF_DROP; + consume_skb(skb); + return NF_STOLEN; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ - synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); - return NF_DROP; + if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) { + consume_skb(skb); + return NF_STOLEN; + } else { + return NF_DROP; + } } return XT_CONTINUE; @@ -388,10 +393,13 @@ static unsigned int ipv6_synproxy_hook(void *priv, * number match the one of first SYN. */ if (synproxy_recv_client_ack(net, skb, th, &opts, - ntohl(th->seq) + 1)) + ntohl(th->seq) + 1)) { this_cpu_inc(snet->stats->cookie_retrans); - - return NF_DROP; + consume_skb(skb); + return NF_STOLEN; + } else { + return NF_DROP; + } } synproxy->isn = ntohl(th->ack_seq); @@ -430,20 +438,57 @@ static unsigned int ipv6_synproxy_hook(void *priv, return NF_ACCEPT; } +static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = { + { + .hook = ipv6_synproxy_hook, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, + { + .hook = ipv6_synproxy_hook, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, + }, +}; + static int synproxy_tg6_check(const struct xt_tgchk_param *par) { + struct synproxy_net *snet = synproxy_pernet(par->net); const struct ip6t_entry *e = par->entryinfo; + int err; if (!(e->ipv6.flags & IP6T_F_PROTO) || e->ipv6.proto != IPPROTO_TCP || e->ipv6.invflags & XT_INV_PROTO) return -EINVAL; - return nf_ct_netns_get(par->net, par->family); + err = nf_ct_netns_get(par->net, par->family); + if (err) + return err; + + if (snet->hook_ref6 == 0) { + err = nf_register_net_hooks(par->net, ipv6_synproxy_ops, + ARRAY_SIZE(ipv6_synproxy_ops)); + if (err) { + nf_ct_netns_put(par->net, par->family); + return err; + } + } + + snet->hook_ref6++; + return err; } static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) { + struct synproxy_net *snet = synproxy_pernet(par->net); + + snet->hook_ref6--; + if (snet->hook_ref6 == 0) + nf_unregister_net_hooks(par->net, ipv6_synproxy_ops, + ARRAY_SIZE(ipv6_synproxy_ops)); nf_ct_netns_put(par->net, par->family); } @@ -458,46 +503,14 @@ static struct xt_target synproxy_tg6_reg __read_mostly = { .me = THIS_MODULE, }; -static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = { - { - .hook = ipv6_synproxy_hook, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, - }, - { - .hook = ipv6_synproxy_hook, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_POST_ROUTING, - .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1, - }, -}; - static int __init synproxy_tg6_init(void) { - int err; - - err = nf_register_hooks(ipv6_synproxy_ops, - ARRAY_SIZE(ipv6_synproxy_ops)); - if (err < 0) - goto err1; - - err = xt_register_target(&synproxy_tg6_reg); - if (err < 0) - goto err2; - - return 0; - -err2: - nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops)); -err1: - return err; + return xt_register_target(&synproxy_tg6_reg); } static void __exit synproxy_tg6_exit(void) { xt_unregister_target(&synproxy_tg6_reg); - nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops)); } module_init(synproxy_tg6_init); diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index d2c2ccbfbe728f8ff2d7bc42c5e7feb2ae4ad97b..d5f028e33f658bd62deb5b9b0737c33366a22a83 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -221,8 +221,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, type = icmp6h->icmp6_type - 130; if (type >= 0 && type < sizeof(noct_valid_new) && noct_valid_new[type]) { - nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb_nfct(skb)); + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); return NF_ACCEPT; } diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 888ecd106e5f37ba6c9e93bf1d1f161ddbaa280c..4a7ddeddbaabf1866a65cfda5d27e8e9c1e75b57 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -58,8 +58,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_reset(skb); - nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb_nfct(skb)); + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); #endif if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) { diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index e0be97e636a48f54c1488ca70ae97a9a13e8be61..bf3ad3e7b6479aeca80b24faefe381d1922ea290 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -273,13 +273,7 @@ nf_nat_ipv6_fn(void *priv, struct sk_buff *skb, if (!ct) return NF_ACCEPT; - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; + nat = nfct_nat(ct); switch (ctinfo) { case IP_CT_RELATED: diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c index 051b6a6bfff6ca7c0dd42ceb43a417260a11bd23..2297c9f073bac63c90ed0578b474fd53b556a6e2 100644 --- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c @@ -30,6 +30,7 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, const struct net_device *out) { enum ip_conntrack_info ctinfo; + struct nf_conn_nat *nat; struct in6_addr src; struct nf_conn *ct; struct nf_nat_range newrange; @@ -42,7 +43,9 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, &ipv6_hdr(skb)->daddr, 0, &src) < 0) return NF_DROP; - nfct_nat(ct)->masq_index = out->ifindex; + nat = nf_ct_nat_ext_add(ct); + if (nat) + nat->masq_index = out->ifindex; newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = src; diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 765facf03d45c47b9913b1adcdaf59b6fe09383c..43f91d9b086c7d5c66860ae2eef4ace040acbcff 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -159,7 +159,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { - nft_fib_store_result(dest, priv->result, pkt, + nft_fib_store_result(dest, priv, pkt, nft_in(pkt)->ifindex); return; } @@ -246,7 +246,7 @@ nft_fib6_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_fib6_type __read_mostly = { .name = "fib", - .select_ops = &nft_fib6_select_ops, + .select_ops = nft_fib6_select_ops, .policy = nft_fib_policy, .maxattr = NFTA_FIB_MAX, .family = NFPROTO_IPV6, diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index e3770abe688a3a9059456fe9195adbfcdfb73157..b5d54d4f995c0f4bade2e3f1c4def9616252ca55 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c @@ -26,7 +26,7 @@ #include #if IS_ENABLED(CONFIG_IPV6) -const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; +struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; EXPORT_SYMBOL(inet6_protos); int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fb174b590fd3b443a7503207d822dd02e7171290..a1bf426c959ba13d07798d9c66394cf8e6e5f2a4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2910,7 +2910,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int pref; int err; - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, + NULL); if (err < 0) goto errout; @@ -3263,7 +3264,8 @@ static int ip6_route_multipath_del(struct fib6_config *cfg) return last_err; } -static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; @@ -3280,7 +3282,8 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) } } -static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; @@ -3568,7 +3571,8 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) NLM_F_MULTI); } -static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) +static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; @@ -3578,7 +3582,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) struct flowi6 fl6; int err, iif = 0, oif = 0; - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, + extack); if (err < 0) goto errout; diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 85582257d3af88146d435ef6c2e98f0bbef94a41..6a495490d43e1018206e628b917840d7ae8f9085 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -26,17 +26,13 @@ #include #include #include -#ifdef CONFIG_DST_CACHE #include -#endif #ifdef CONFIG_IPV6_SEG6_HMAC #include #endif struct seg6_lwt { -#ifdef CONFIG_DST_CACHE struct dst_cache cache; -#endif struct seg6_iptunnel_encap tuninfo[0]; }; @@ -105,7 +101,7 @@ static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) hdrlen = (osrh->hdrlen + 1) << 3; tot_len = hdrlen + sizeof(*hdr); - err = pskb_expand_head(skb, tot_len, 0, GFP_ATOMIC); + err = skb_cow_head(skb, tot_len); if (unlikely(err)) return err; @@ -156,7 +152,7 @@ static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) hdrlen = (osrh->hdrlen + 1) << 3; - err = pskb_expand_head(skb, hdrlen, 0, GFP_ATOMIC); + err = skb_cow_head(skb, hdrlen); if (unlikely(err)) return err; @@ -237,6 +233,9 @@ static int seg6_do_srh(struct sk_buff *skb) static int seg6_input(struct sk_buff *skb) { + struct dst_entry *orig_dst = skb_dst(skb); + struct dst_entry *dst = NULL; + struct seg6_lwt *slwt; int err; err = seg6_do_srh(skb); @@ -245,8 +244,30 @@ static int seg6_input(struct sk_buff *skb) return err; } + slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); + + preempt_disable(); + dst = dst_cache_get(&slwt->cache); + preempt_enable(); + skb_dst_drop(skb); - ip6_route_input(skb); + + if (!dst) { + ip6_route_input(skb); + dst = skb_dst(skb); + if (!dst->error) { + preempt_disable(); + dst_cache_set_ip6(&slwt->cache, dst, + &ipv6_hdr(skb)->saddr); + preempt_enable(); + } + } else { + skb_dst_set(skb, dst); + } + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + return err; return dst_input(skb); } @@ -264,11 +285,9 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); -#ifdef CONFIG_DST_CACHE preempt_disable(); dst = dst_cache_get(&slwt->cache); preempt_enable(); -#endif if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -287,16 +306,18 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } -#ifdef CONFIG_DST_CACHE preempt_disable(); dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); preempt_enable(); -#endif } skb_dst_drop(skb); skb_dst_set(skb, dst); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + goto drop; + return dst_output(net, sk, skb); drop: kfree_skb(skb); @@ -315,7 +336,7 @@ static int seg6_build_state(struct nlattr *nla, int err; err = nla_parse_nested(tb, SEG6_IPTUNNEL_MAX, nla, - seg6_iptunnel_policy); + seg6_iptunnel_policy, NULL); if (err < 0) return err; @@ -355,13 +376,11 @@ static int seg6_build_state(struct nlattr *nla, slwt = seg6_lwt_lwtunnel(newts); -#ifdef CONFIG_DST_CACHE err = dst_cache_init(&slwt->cache, GFP_KERNEL); if (err) { kfree(newts); return err; } -#endif memcpy(&slwt->tuninfo, tuninfo, tuninfo_len); @@ -375,12 +394,10 @@ static int seg6_build_state(struct nlattr *nla, return 0; } -#ifdef CONFIG_DST_CACHE static void seg6_destroy_state(struct lwtunnel_state *lwt) { dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache); } -#endif static int seg6_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) @@ -414,9 +431,7 @@ static int seg6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) static const struct lwtunnel_encap_ops seg6_iptun_ops = { .build_state = seg6_build_state, -#ifdef CONFIG_DST_CACHE .destroy_state = seg6_destroy_state, -#endif .output = seg6_output, .input = seg6_input, .fill_encap = seg6_fill_encap_info, diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 99853c6e33a8c3def99ecb56e288cce4a38a997b..61e5902f068732b10f734c7937c7539d418820d7 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -881,11 +881,12 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - rt = ip_route_output_ports(tunnel->net, &fl4, NULL, - dst, tiph->saddr, - 0, 0, - IPPROTO_IPV6, RT_TOS(tos), - tunnel->parms.link); + flowi4_init_output(&fl4, tunnel->parms.link, tunnel->fwmark, + RT_TOS(tos), RT_SCOPE_UNIVERSE, IPPROTO_IPV6, + 0, dst, tiph->saddr, 0, 0, + sock_net_uid(tunnel->net, NULL)); + rt = ip_route_output_flow(tunnel->net, &fl4, NULL); + if (IS_ERR(rt)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; @@ -1071,7 +1072,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) } } -static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) +static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, + __u32 fwmark) { struct net *net = t->net; struct sit_net *sitn = net_generic(net, sit_net_id); @@ -1085,8 +1087,9 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) ipip6_tunnel_link(sitn, t); t->parms.iph.ttl = p->iph.ttl; t->parms.iph.tos = p->iph.tos; - if (t->parms.link != p->link) { + if (t->parms.link != p->link || t->fwmark != fwmark) { t->parms.link = p->link; + t->fwmark = fwmark; ipip6_tunnel_bind_dev(t->dev); } dst_cache_reset(&t->dst_cache); @@ -1220,7 +1223,7 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) t = netdev_priv(dev); } - ipip6_tunnel_update(t, &p); + ipip6_tunnel_update(t, &p, t->fwmark); } if (t) { @@ -1418,7 +1421,8 @@ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[]) } static void ipip6_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm *parms, + __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -1457,6 +1461,8 @@ static void ipip6_netlink_parms(struct nlattr *data[], if (data[IFLA_IPTUN_PROTO]) parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]); + if (data[IFLA_IPTUN_FWMARK]) + *fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); } /* This function returns true when ENCAP attributes are present in the nl msg */ @@ -1549,7 +1555,7 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, return err; } - ipip6_netlink_parms(data, &nt->parms); + ipip6_netlink_parms(data, &nt->parms, &nt->fwmark); if (ipip6_tunnel_locate(net, &nt->parms, 0)) return -EEXIST; @@ -1577,6 +1583,7 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif + __u32 fwmark = t->fwmark; int err; if (dev == sitn->fb_tunnel_dev) @@ -1588,7 +1595,7 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], return err; } - ipip6_netlink_parms(data, &p); + ipip6_netlink_parms(data, &p, &fwmark); if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) @@ -1602,7 +1609,7 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], } else t = netdev_priv(dev); - ipip6_tunnel_update(t, &p); + ipip6_tunnel_update(t, &p, fwmark); #ifdef CONFIG_IPV6_SIT_6RD if (ipip6_netlink_6rd_parms(data, &ip6rd)) @@ -1649,6 +1656,8 @@ static size_t ipip6_get_size(const struct net_device *dev) nla_total_size(2) + /* IFLA_IPTUN_ENCAP_DPORT */ nla_total_size(2) + + /* IFLA_IPTUN_FWMARK */ + nla_total_size(4) + 0; } @@ -1665,7 +1674,8 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, !!(parm->iph.frag_off & htons(IP_DF))) || nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) || - nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags)) + nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) || + nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark)) goto nla_put_failure; #ifdef CONFIG_IPV6_SIT_6RD @@ -1715,6 +1725,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 }, }; static void ipip6_dellink(struct net_device *dev, struct list_head *head) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 49fa2e8c3fa9212eef1198a1077a6726f0f1b6fc..8e42e8f54b705ed8780890c7434feeff1055599a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -101,12 +101,12 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) } } -static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff) +static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff) { - return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, - ipv6_hdr(skb)->saddr.s6_addr32, - tcp_hdr(skb)->dest, - tcp_hdr(skb)->source, tsoff); + return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32, + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source, tsoff); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, @@ -265,11 +265,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, NULL, NULL); - if (tcp_death_row->sysctl_tw_recycle && - !tp->rx_opt.ts_recent_stamp && - ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) - tcp_fetch_timewait_stamp(sk, dst); - icsk->icsk_ext_hdr_len = 0; if (opt) icsk->icsk_ext_hdr_len = opt->opt_flen + @@ -287,11 +282,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk_set_txhash(sk); if (likely(!tp->repair)) { - seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, - sk->sk_v6_daddr.s6_addr32, - inet->inet_sport, - inet->inet_dport, - &tp->tsoffset); + seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32, + sk->sk_v6_daddr.s6_addr32, + inet->inet_sport, + inet->inet_dport, + &tp->tsoffset); if (!tp->write_seq) tp->write_seq = seq; } @@ -727,11 +722,8 @@ static void tcp_v6_init_req(struct request_sock *req, static struct dst_entry *tcp_v6_route_req(const struct sock *sk, struct flowi *fl, - const struct request_sock *req, - bool *strict) + const struct request_sock *req) { - if (strict) - *strict = true; return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); } @@ -757,7 +749,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, - .init_seq = tcp_v6_init_sequence, + .init_seq_tsoff = tcp_v6_init_seq_and_tsoff, .send_synack = tcp_v6_send_synack, }; @@ -1301,8 +1293,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) goto discard; if (nsk != sk) { - sock_rps_save_rxhash(nsk, skb); - sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) @@ -1933,8 +1923,9 @@ struct proto tcpv6_prot = { .diag_destroy = tcp_abort, }; -static const struct inet6_protocol tcpv6_protocol = { +static struct inet6_protocol tcpv6_protocol = { .early_demux = tcp_v6_early_demux, + .early_demux_handler = tcp_v6_early_demux, .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e28082f0a307eb68ac13987580d8d9f65358212f..04862abfe4ec27d978adadd27735a9d19e3c4365 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -864,6 +865,69 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, return 0; } + +static struct sock *__udp6_lib_demux_lookup(struct net *net, + __be16 loc_port, const struct in6_addr *loc_addr, + __be16 rmt_port, const struct in6_addr *rmt_addr, + int dif) +{ + unsigned short hnum = ntohs(loc_port); + unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum); + unsigned int slot2 = hash2 & udp_table.mask; + struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; + const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); + struct sock *sk; + + udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { + if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) + return sk; + /* Only check first socket in chain */ + break; + } + return NULL; +} + +static void udp_v6_early_demux(struct sk_buff *skb) +{ + struct net *net = dev_net(skb->dev); + const struct udphdr *uh; + struct sock *sk; + struct dst_entry *dst; + int dif = skb->dev->ifindex; + + if (!pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct udphdr))) + return; + + uh = udp_hdr(skb); + + if (skb->pkt_type == PACKET_HOST) + sk = __udp6_lib_demux_lookup(net, uh->dest, + &ipv6_hdr(skb)->daddr, + uh->source, &ipv6_hdr(skb)->saddr, + dif); + else + return; + + if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) + return; + + skb->sk = sk; + skb->destructor = sock_efree; + dst = READ_ONCE(sk->sk_rx_dst); + + if (dst) + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); + if (dst) { + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } +} + static __inline__ int udpv6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); @@ -1378,7 +1442,9 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, } #endif -static const struct inet6_protocol udpv6_protocol = { +static struct inet6_protocol udpv6_protocol = { + .early_demux = udp_v6_early_demux, + .early_demux_handler = udp_v6_early_demux, .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 4439ee44c8b05461b8a66190c7800379ca5f105c..7a92c0f3191250118ce3572ca91ee9116460bce8 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -13,6 +13,7 @@ #include #include #include +#include /* Add encapsulation header. * @@ -26,6 +27,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) int hdr_len; iph = ipv6_hdr(skb); + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); hdr_len = x->type->hdr_offset(x, skb, &prevhdr); skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); @@ -61,9 +63,41 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) return 0; } +static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) +{ + const struct net_offload *ops; + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct xfrm_offload *xo = xfrm_offload(skb); + + skb->transport_header += x->props.header_len; + ops = rcu_dereference(inet6_offloads[xo->proto]); + if (likely(ops && ops->callbacks.gso_segment)) + segs = ops->callbacks.gso_segment(skb, features); + + return segs; +} + +static void xfrm6_transport_xmit(struct xfrm_state *x, struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + + skb_reset_mac_len(skb); + pskb_pull(skb, skb->mac_len + sizeof(struct ipv6hdr) + x->props.header_len); + + if (xo->flags & XFRM_GSO_SEGMENT) { + skb_reset_transport_header(skb); + skb->transport_header -= x->props.header_len; + } +} + + static struct xfrm_mode xfrm6_transport_mode = { .input = xfrm6_transport_input, .output = xfrm6_transport_output, + .gso_segment = xfrm4_transport_gso_segment, + .xmit = xfrm6_transport_xmit, .owner = THIS_MODULE, .encap = XFRM_MODE_TRANSPORT, }; diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 372855eeaf42551208adeeaff16b1cd06f08d3f2..02556e356f87e94830e76bb6f4f519e4cf1eb4d7 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -36,6 +36,9 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) struct ipv6hdr *top_iph; int dsfield; + skb_set_inner_network_header(skb, skb_network_offset(skb)); + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); + skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr); @@ -96,11 +99,35 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) return err; } +static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x, + struct sk_buff *skb, + netdev_features_t features) +{ + __skb_push(skb, skb->mac_len); + return skb_mac_gso_segment(skb, features); + +} + +static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + + if (xo->flags & XFRM_GSO_SEGMENT) { + skb->network_header = skb->network_header - x->props.header_len; + skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); + } + + skb_reset_mac_len(skb); + pskb_pull(skb, skb->mac_len + x->props.header_len); +} + static struct xfrm_mode xfrm6_tunnel_mode = { .input2 = xfrm6_mode_tunnel_input, .input = xfrm_prepare_input, .output2 = xfrm6_mode_tunnel_output, .output = xfrm6_prepare_output, + .gso_segment = xfrm6_mode_tunnel_gso_segment, + .xmit = xfrm6_mode_tunnel_xmit, .owner = THIS_MODULE, .encap = XFRM_MODE_TUNNEL, .flags = XFRM_MODE_FLAG_TUNNEL, diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 4d09ce6fa90e666bfdeda09cbdc8c7b0cb8b5824..8ae87d4ec5ff607d431513bb2ef42d5c2c93450a 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -73,11 +73,16 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) int mtu, ret = 0; struct dst_entry *dst = skb_dst(skb); + if (skb->ignore_df) + goto out; + mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - if (!skb->ignore_df && skb->len > mtu) { + if ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && + skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { skb->dev = dst->dev; skb->protocol = htons(ETH_P_IPV6); @@ -89,7 +94,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ret = -EMSGSIZE; } - +out: return ret; } diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 8a9219ff2e77e0205de652974a44c5e4ba33b2c0..fa31ef29e3fa0bf3973e12e43c57b48eb7d1be45 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg) sipx->sipx_network = ipxif->if_netnum; memcpy(sipx->sipx_node, ipxif->if_node, sizeof(sipx->sipx_node)); - rc = -EFAULT; + rc = 0; if (copy_to_user(arg, &ifr, sizeof(ifr))) - break; + rc = -EFAULT; ipxitf_put(ipxif); - rc = 0; break; } case SIOCAIPXITFCRT: diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 31762f76cdb5f2a3ec322135068402be532218ed..deca20fb2ce2e5e50063e58ec2c0d9d6239d3bd4 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1707,11 +1707,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct kcm_clone info; struct socket *newsock = NULL; - if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - return -EFAULT; - err = kcm_clone(sock, &info, &newsock); - if (!err) { if (copy_to_user((void __user *)arg, &info, sizeof(info))) { diff --git a/net/key/af_key.c b/net/key/af_key.c index be8cecc6500214de68cc8872b48b38c840d3304f..c1950bb14735bc914b53c0476342f2679ad50b87 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3827,7 +3827,6 @@ static inline void pfkey_exit_proc(struct net *net) static struct xfrm_mgr pfkeyv2_mgr = { - .id = "pfkeyv2", .notify = pfkey_send_notify, .acquire = pfkey_send_acquire, .compile_policy = pfkey_compile_policy, diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e37d9554da7b47df0571c41478e6e758b8a8e6f9..fa0342574b8986ad98f458febe8e3e6314171eae 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -120,7 +120,7 @@ static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) return sk->sk_user_data; } -static inline struct l2tp_net *l2tp_pernet(struct net *net) +static inline struct l2tp_net *l2tp_pernet(const struct net *net) { BUG_ON(!net); @@ -217,27 +217,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk) sock_put(sk); } -/* Lookup a session by id in the global session list - */ -static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) -{ - struct l2tp_net *pn = l2tp_pernet(net); - struct hlist_head *session_list = - l2tp_session_id_hash_2(pn, session_id); - struct l2tp_session *session; - - rcu_read_lock_bh(); - hlist_for_each_entry_rcu(session, session_list, global_hlist) { - if (session->session_id == session_id) { - rcu_read_unlock_bh(); - return session; - } - } - rcu_read_unlock_bh(); - - return NULL; -} - /* Session hash list. * The session_id SHOULD be random according to RFC2661, but several * L2TP implementations (Cisco and Microsoft) use incrementing @@ -250,38 +229,10 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; } -/* Lookup a session by id - */ -struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) -{ - struct hlist_head *session_list; - struct l2tp_session *session; - - /* In L2TPv3, session_ids are unique over all tunnels and we - * sometimes need to look them up before we know the - * tunnel. - */ - if (tunnel == NULL) - return l2tp_session_find_2(net, session_id); - - session_list = l2tp_session_id_hash(tunnel, session_id); - read_lock_bh(&tunnel->hlist_lock); - hlist_for_each_entry(session, session_list, hlist) { - if (session->session_id == session_id) { - read_unlock_bh(&tunnel->hlist_lock); - return session; - } - } - read_unlock_bh(&tunnel->hlist_lock); - - return NULL; -} -EXPORT_SYMBOL_GPL(l2tp_session_find); - -/* Like l2tp_session_find() but takes a reference on the returned session. +/* Lookup a session. A new reference is held on the returned session. * Optionally calls session->ref() too if do_ref is true. */ -struct l2tp_session *l2tp_session_get(struct net *net, +struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, bool do_ref) { @@ -356,7 +307,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, + const char *ifname, bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); @@ -427,7 +379,7 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, /* Lookup a tunnel by id */ -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id) { struct l2tp_tunnel *tunnel; struct l2tp_net *pn = l2tp_pernet(net); @@ -445,7 +397,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) } EXPORT_SYMBOL_GPL(l2tp_tunnel_find); -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) { struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8ce7818c7a9d0578b79e70eff1916d3248e7604a..eec5ad2ebb93c32bca7a57b377f80df8e10b2591 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -230,18 +230,16 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) return tunnel; } -struct l2tp_session *l2tp_session_get(struct net *net, +struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, bool do_ref); -struct l2tp_session *l2tp_session_find(struct net *net, - struct l2tp_tunnel *tunnel, - u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref); -struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, + const char *ifname, bool do_ref); -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id); +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 6fd41d7afe1ef27592970de333c75928264dc275..8b21af7321b928b4dcc5d7af3a6667380e9a949a 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -30,6 +30,9 @@ #include #include #include +#include +#include +#include #include "l2tp_core.h" @@ -127,8 +130,13 @@ static const struct net_device_ops l2tp_eth_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; +static struct device_type l2tpeth_type = { + .name = "l2tpeth", +}; + static void l2tp_eth_dev_setup(struct net_device *dev) { + SET_NETDEV_DEVTYPE(dev, &l2tpeth_type); ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->features |= NETIF_F_LLTX; @@ -204,8 +212,58 @@ static void l2tp_eth_show(struct seq_file *m, void *arg) } #endif +static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel, + struct l2tp_session *session, + struct net_device *dev) +{ + unsigned int overhead = 0; + struct dst_entry *dst; + u32 l3_overhead = 0; + + /* if the encap is UDP, account for UDP header size */ + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { + overhead += sizeof(struct udphdr); + dev->needed_headroom += sizeof(struct udphdr); + } + if (session->mtu != 0) { + dev->mtu = session->mtu; + dev->needed_headroom += session->hdr_len; + return; + } + lock_sock(tunnel->sock); + l3_overhead = kernel_sock_ip_overhead(tunnel->sock); + release_sock(tunnel->sock); + if (l3_overhead == 0) { + /* L3 Overhead couldn't be identified, this could be + * because tunnel->sock was NULL or the socket's + * address family was not IPv4 or IPv6, + * dev mtu stays at 1500. + */ + return; + } + /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr + * UDP overhead, if any, was already factored in above. + */ + overhead += session->hdr_len + ETH_HLEN + l3_overhead; + + /* If PMTU discovery was enabled, use discovered MTU on L2TP device */ + dst = sk_dst_get(tunnel->sock); + if (dst) { + /* dst_mtu will use PMTU if found, else fallback to intf MTU */ + u32 pmtu = dst_mtu(dst); + + if (pmtu != 0) + dev->mtu = pmtu; + dst_release(dst); + } + session->mtu = dev->mtu - overhead; + dev->mtu = session->mtu; + dev->needed_headroom += session->hdr_len; +} + static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { + unsigned char name_assign_type; struct net_device *dev; char name[IFNAMSIZ]; struct l2tp_tunnel *tunnel; @@ -222,15 +280,12 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p } if (cfg->ifname) { - dev = dev_get_by_name(net, cfg->ifname); - if (dev) { - dev_put(dev); - rc = -EEXIST; - goto out; - } strlcpy(name, cfg->ifname, IFNAMSIZ); - } else + name_assign_type = NET_NAME_USER; + } else { strcpy(name, L2TP_ETH_DEV_NAME); + name_assign_type = NET_NAME_ENUM; + } session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); @@ -239,7 +294,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p goto out; } - dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, + dev = alloc_netdev(sizeof(*priv), name, name_assign_type, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; @@ -247,12 +302,9 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p } dev_net_set(dev, net); - if (session->mtu == 0) - session->mtu = dev->mtu - session->hdr_len; - dev->mtu = session->mtu; - dev->needed_headroom += session->hdr_len; dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; + l2tp_eth_adjust_mtu(tunnel, session, dev); priv = netdev_priv(dev); priv->dev = dev; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 7e3e669baac42df9d0be2864b927ba4f390258e9..12cfcd0ca807396d18e061e9bcf4c29e760b2563 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -521,11 +521,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf goto out; } session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - ret = -EEXIST; - goto out; - } if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { ret = -EINVAL; diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 4456559cb056d1e32a621351120327cf27541bf7..1b7a4daf283c5191c19eaa4f3571d316c866906a 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -357,14 +357,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, spin_lock_init(&tid_agg_rx->reorder_lock); /* rx timer */ - tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; - tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer_deferrable(&tid_agg_rx->session_timer); + setup_deferrable_timer(&tid_agg_rx->session_timer, + sta_rx_agg_session_timer_expired, + (unsigned long)&sta->timer_to_tid[tid]); /* rx reorder timer */ - tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; - tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_agg_rx->reorder_timer); + setup_timer(&tid_agg_rx->reorder_timer, + sta_rx_agg_reorder_timer_expired, + (unsigned long)&sta->timer_to_tid[tid]); /* prepare reordering buffer */ tid_agg_rx->reorder_buf = diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 45319cc01121a9eb17d49185a07a9675e7b2995d..60e2a62f7bef2fbb014f3a40403cf498a75d429c 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -670,14 +670,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, tid_tx->timeout = timeout; /* response timer */ - tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; - tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer(&tid_tx->addba_resp_timer); + setup_timer(&tid_tx->addba_resp_timer, + sta_addba_resp_timer_expired, + (unsigned long)&sta->timer_to_tid[tid]); /* tx timer */ - tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; - tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; - init_timer_deferrable(&tid_tx->session_timer); + setup_deferrable_timer(&tid_tx->session_timer, + sta_tx_agg_session_timer_expired, + (unsigned long)&sta->timer_to_tid[tid]); /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ac879bb17870d4602fd151504f68d26c21eeac9b..6c2e6060cd549e3c4c39e78cc20ca9302472c4ac 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3,7 +3,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2015-2016 Intel Deutschland GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH * * This file is GPLv2 as found in COPYING. */ @@ -22,11 +22,98 @@ #include "mesh.h" #include "wme.h" +static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, + struct vif_params *params) +{ + bool mu_mimo_groups = false; + bool mu_mimo_follow = false; + + if (params->vht_mumimo_groups) { + u64 membership; + + BUILD_BUG_ON(sizeof(membership) != WLAN_MEMBERSHIP_LEN); + + memcpy(sdata->vif.bss_conf.mu_group.membership, + params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); + memcpy(sdata->vif.bss_conf.mu_group.position, + params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, + WLAN_USER_POSITION_LEN); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS); + /* don't care about endianness - just check for 0 */ + memcpy(&membership, params->vht_mumimo_groups, + WLAN_MEMBERSHIP_LEN); + mu_mimo_groups = membership != 0; + } + + if (params->vht_mumimo_follow_addr) { + mu_mimo_follow = + is_valid_ether_addr(params->vht_mumimo_follow_addr); + ether_addr_copy(sdata->u.mntr.mu_follow_addr, + params->vht_mumimo_follow_addr); + } + + sdata->vif.mu_mimo_owner = mu_mimo_groups || mu_mimo_follow; +} + +static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, + struct vif_params *params) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *monitor_sdata; + + /* check flags first */ + if (params->flags && ieee80211_sdata_running(sdata)) { + u32 mask = MONITOR_FLAG_COOK_FRAMES | MONITOR_FLAG_ACTIVE; + + /* + * Prohibit MONITOR_FLAG_COOK_FRAMES and + * MONITOR_FLAG_ACTIVE to be changed while the + * interface is up. + * Else we would need to add a lot of cruft + * to update everything: + * cooked_mntrs, monitor and all fif_* counters + * reconfigure hardware + */ + if ((params->flags & mask) != (sdata->u.mntr.flags & mask)) + return -EBUSY; + } + + /* also validate MU-MIMO change */ + monitor_sdata = rtnl_dereference(local->monitor_sdata); + + if (!monitor_sdata && + (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) + return -EOPNOTSUPP; + + /* apply all changes now - no failures allowed */ + + if (monitor_sdata) + ieee80211_set_mu_mimo_follow(monitor_sdata, params); + + if (params->flags) { + if (ieee80211_sdata_running(sdata)) { + ieee80211_adjust_monitor_flags(sdata, -1); + sdata->u.mntr.flags = params->flags; + ieee80211_adjust_monitor_flags(sdata, 1); + + ieee80211_configure_filter(local); + } else { + /* + * Because the interface is down, ieee80211_do_stop + * and ieee80211_do_open take care of "everything" + * mentioned in the comment above. + */ + sdata->u.mntr.flags = params->flags; + } + } + + return 0; +} + static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); @@ -38,9 +125,14 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, if (err) return ERR_PTR(err); - if (type == NL80211_IFTYPE_MONITOR && flags) { - sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); - sdata->u.mntr.flags = *flags; + sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + + if (type == NL80211_IFTYPE_MONITOR) { + err = ieee80211_set_mon_options(sdata, params); + if (err) { + ieee80211_if_remove(sdata); + return NULL; + } } return wdev; @@ -55,7 +147,7 @@ static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) static int ieee80211_change_iface(struct wiphy *wiphy, struct net_device *dev, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -75,58 +167,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy, } if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { - struct ieee80211_local *local = sdata->local; - struct ieee80211_sub_if_data *monitor_sdata; - u32 mu_mntr_cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; - - monitor_sdata = rtnl_dereference(local->monitor_sdata); - if (monitor_sdata && - wiphy_ext_feature_isset(wiphy, mu_mntr_cap_flag)) { - memcpy(monitor_sdata->vif.bss_conf.mu_group.membership, - params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN); - memcpy(monitor_sdata->vif.bss_conf.mu_group.position, - params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN, - WLAN_USER_POSITION_LEN); - monitor_sdata->vif.mu_mimo_owner = true; - ieee80211_bss_info_change_notify(monitor_sdata, - BSS_CHANGED_MU_GROUPS); - - ether_addr_copy(monitor_sdata->u.mntr.mu_follow_addr, - params->macaddr); - } - - if (!flags) - return 0; - - if (ieee80211_sdata_running(sdata)) { - u32 mask = MONITOR_FLAG_COOK_FRAMES | - MONITOR_FLAG_ACTIVE; - - /* - * Prohibit MONITOR_FLAG_COOK_FRAMES and - * MONITOR_FLAG_ACTIVE to be changed while the - * interface is up. - * Else we would need to add a lot of cruft - * to update everything: - * cooked_mntrs, monitor and all fif_* counters - * reconfigure hardware - */ - if ((*flags & mask) != (sdata->u.mntr.flags & mask)) - return -EBUSY; - - ieee80211_adjust_monitor_flags(sdata, -1); - sdata->u.mntr.flags = *flags; - ieee80211_adjust_monitor_flags(sdata, 1); - - ieee80211_configure_filter(local); - } else { - /* - * Because the interface is down, ieee80211_do_stop - * and ieee80211_do_open take care of "everything" - * mentioned in the comment above. - */ - sdata->u.mntr.flags = *flags; - } + ret = ieee80211_set_mon_options(sdata, params); + if (ret) + return ret; } return 0; @@ -617,10 +660,11 @@ void sta_set_rate_info_tx(struct sta_info *sta, int shift = ieee80211_vif_get_shift(&sta->sdata->vif); u16 brate; - sband = sta->local->hw.wiphy->bands[ - ieee80211_get_sdata_band(sta->sdata)]; - brate = sband->bitrates[rate->idx].bitrate; - rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + sband = ieee80211_get_sband(sta->sdata); + if (sband) { + brate = sband->bitrates[rate->idx].bitrate; + rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + } } if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rinfo->bw = RATE_INFO_BW_40; @@ -696,11 +740,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, return 0; mutex_lock(&local->mtx); - mutex_lock(&local->iflist_mtx); if (local->use_chanctx) { - sdata = rcu_dereference_protected( - local->monitor_sdata, - lockdep_is_held(&local->iflist_mtx)); + sdata = rtnl_dereference(local->monitor_sdata); if (sdata) { ieee80211_vif_release_channel(sdata); ret = ieee80211_vif_use_channel(sdata, chandef, @@ -713,7 +754,6 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, if (ret == 0) local->monitor_chandef = *chandef; - mutex_unlock(&local->iflist_mtx); mutex_unlock(&local->mtx); return ret; @@ -1214,10 +1254,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); u32 mask, set; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; mask = params->sta_flags_mask; set = params->sta_flags_set; @@ -1350,7 +1391,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, sband, params->supported_rates, params->supported_rates_len, - &sta->sta.supp_rates[band]); + &sta->sta.supp_rates[sband->band]); } if (params->ht_capa) @@ -1366,8 +1407,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it */ - __ieee80211_vht_handle_opmode(sdata, sta, - params->opmode_notif, band); + __ieee80211_vht_handle_opmode(sdata, sta, params->opmode_notif, + sband->band); } if (params->support_p2p_ps >= 0) @@ -2005,13 +2046,15 @@ static int ieee80211_change_bss(struct wiphy *wiphy, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - enum nl80211_band band; + struct ieee80211_supported_band *sband; u32 changed = 0; if (!sdata_dereference(sdata->u.ap.beacon, sdata)) return -ENOENT; - band = ieee80211_get_sdata_band(sdata); + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; if (params->use_cts_prot >= 0) { sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; @@ -2024,7 +2067,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, } if (!sdata->vif.bss_conf.use_short_slot && - band == NL80211_BAND_5GHZ) { + sband->band == NL80211_BAND_5GHZ) { sdata->vif.bss_conf.use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } @@ -2037,11 +2080,12 @@ static int ieee80211_change_bss(struct wiphy *wiphy, if (params->basic_rates) { ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, - wiphy->bands[band], + wiphy->bands[sband->band], params->basic_rates, params->basic_rates_len, &sdata->vif.bss_conf.basic_rates); changed |= BSS_CHANGED_BASIC_RATES; + ieee80211_check_rate_mask(sdata); } if (params->ap_isolate >= 0) { @@ -2198,7 +2242,8 @@ ieee80211_sched_scan_start(struct wiphy *wiphy, } static int -ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) +ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, + u64 reqid) { struct ieee80211_local *local = wiphy_priv(wiphy); @@ -2630,6 +2675,33 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, bss_conf->cqm_rssi_thold = rssi_thold; bss_conf->cqm_rssi_hyst = rssi_hyst; + bss_conf->cqm_rssi_low = 0; + bss_conf->cqm_rssi_high = 0; + sdata->u.mgd.last_cqm_event_signal = 0; + + /* tell the driver upon association, unless already associated */ + if (sdata->u.mgd.associated && + sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); + + return 0; +} + +static int ieee80211_set_cqm_rssi_range_config(struct wiphy *wiphy, + struct net_device *dev, + s32 rssi_low, s32 rssi_high) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_vif *vif = &sdata->vif; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) + return -EOPNOTSUPP; + + bss_conf->cqm_rssi_low = rssi_low; + bss_conf->cqm_rssi_high = rssi_high; + bss_conf->cqm_rssi_thold = 0; + bss_conf->cqm_rssi_hyst = 0; sdata->u.mgd.last_cqm_event_signal = 0; /* tell the driver upon association, unless already associated */ @@ -2658,6 +2730,21 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, return ret; } + /* + * If active validate the setting and reject it if it doesn't leave + * at least one basic rate usable, since we really have to be able + * to send something, and if we're an AP we have to be able to do + * so at a basic rate so that all clients can receive it. + */ + if (rcu_access_pointer(sdata->vif.chanctx_conf) && + sdata->vif.bss_conf.chandef.chan) { + u32 basic_rates = sdata->vif.bss_conf.basic_rates; + enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band; + + if (!(mask->control[band].legacy & basic_rates)) + return -EINVAL; + } + for (i = 0; i < NUM_NL80211_BANDS; i++) { struct ieee80211_supported_band *sband = wiphy->bands[i]; int j; @@ -3639,6 +3726,7 @@ const struct cfg80211_ops mac80211_config_ops = { .mgmt_tx = ieee80211_mgmt_tx, .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait, .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, + .set_cqm_rssi_range_config = ieee80211_set_cqm_rssi_range_config, .mgmt_frame_register = ieee80211_mgmt_frame_register, .set_antenna = ieee80211_set_antenna, .get_antenna = ieee80211_get_antenna, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 98999d3d5262743cf32ef92480772f034e70baf1..6db09fa18269f3a12032345afa5af0fa336a8c50 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -425,7 +425,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: cfg80211_chandef_create(&chandef, cbss->channel, - NL80211_CHAN_WIDTH_20_NOHT); + NL80211_CHAN_NO_HT); chandef.width = sdata->u.ibss.chandef.width; break; case NL80211_CHAN_WIDTH_80: @@ -437,7 +437,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, default: /* fall back to 20 MHz for unsupported modes */ cfg80211_chandef_create(&chandef, cbss->channel, - NL80211_CHAN_WIDTH_20_NOHT); + NL80211_CHAN_NO_HT); break; } @@ -992,7 +992,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, enum nl80211_band band = rx_status->band; enum nl80211_bss_scan_width scan_width; struct ieee80211_local *local = sdata->local; - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband; bool rates_updated = false; u32 supp_rates = 0; @@ -1002,6 +1002,10 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) return; + sband = local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return; + rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); @@ -1014,9 +1018,9 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ scan_width = NL80211_BSS_CHAN_WIDTH_20; - if (rx_status->flag & RX_FLAG_5MHZ) + if (rx_status->bw == RATE_INFO_BW_5) scan_width = NL80211_BSS_CHAN_WIDTH_5; - if (rx_status->flag & RX_FLAG_10MHZ) + else if (rx_status->bw == RATE_INFO_BW_10) scan_width = NL80211_BSS_CHAN_WIDTH_10; sta->sta.supp_rates[band] = supp_rates | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 0e718437d080e7258efe75bcba26ce65990671ce..f8f6c148f5545feeb78c16ca6f33ebf5e02d0ab5 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -839,6 +839,8 @@ struct txq_info { struct ieee80211_if_mntr { u32 flags; u8 mu_follow_addr[ETH_ALEN] __aligned(2); + + struct list_head list; }; /** @@ -999,21 +1001,6 @@ sdata_assert_lock(struct ieee80211_sub_if_data *sdata) lockdep_assert_held(&sdata->wdev.mtx); } -static inline enum nl80211_band -ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) -{ - enum nl80211_band band = NL80211_BAND_2GHZ; - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (!WARN_ON(!chanctx_conf)) - band = chanctx_conf->def.chan->band; - rcu_read_unlock(); - - return band; -} - static inline int ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef) { @@ -1259,6 +1246,7 @@ struct ieee80211_local { /* see iface.c */ struct list_head interfaces; + struct list_head mon_list; /* only that are IFF_UP && !cooked */ struct mutex iflist_mtx; /* @@ -1418,6 +1406,27 @@ IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev) return container_of(wdev, struct ieee80211_sub_if_data, wdev); } +static inline struct ieee80211_supported_band * +ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *chanctx_conf; + enum nl80211_band band; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return NULL; + } + + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + return local->hw.wiphy->bands[band]; +} + /* this struct represents 802.11n's RA/TID combination */ struct ieee80211_ra_tid { u8 ra[ETH_ALEN]; @@ -1474,6 +1483,7 @@ struct ieee802_11_elems { const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; /* length of them, respectively */ u8 ext_capab_len; @@ -1527,9 +1537,9 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) status->flag & RX_FLAG_MACTIME_END); if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END)) return true; - /* can't handle HT/VHT preamble yet */ + /* can't handle non-legacy preamble yet */ if (status->flag & RX_FLAG_MACTIME_PLCP_START && - !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT))) + status->encoding != RX_ENC_LEGACY) return true; return false; } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 5bb0c501281954dfe656c5e886c9032b958061be..3bd5b81f5d81ec7d73686043c2683630e24ecde4 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -676,7 +676,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) set_bit(SDATA_STATE_RUNNING, &sdata->state); - if (sdata->vif.type == NL80211_IFTYPE_WDS) { + switch (sdata->vif.type) { + case NL80211_IFTYPE_WDS: /* Create STA entry for the WDS peer */ sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, GFP_KERNEL); @@ -697,8 +698,17 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) rate_control_rate_init(sta); netif_carrier_on(dev); - } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { + break; + case NL80211_IFTYPE_P2P_DEVICE: rcu_assign_pointer(local->p2p_sdata, sdata); + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) + break; + list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); + break; + default: + break; } /* @@ -817,6 +827,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_AP: cancel_work_sync(&sdata->u.ap.request_smps_work); break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) + break; + list_del_rcu(&sdata->u.mntr.list); + break; default: break; } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 56fb47953b72420b3ceb4e3f5d27dc9c4d23928b..8aa1f5b6a05145b0838b494abdf2b2c5b0aa17ab 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -253,6 +253,7 @@ static void ieee80211_restart_work(struct work_struct *work) WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); + flush_work(&local->radar_detected_work); rtnl_lock(); list_for_each_entry(sdata, &local->interfaces, list) flush_delayed_work(&sdata->dec_tailroom_needed_wk); @@ -603,6 +604,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, ARRAY_SIZE(local->ext_capa); INIT_LIST_HEAD(&local->interfaces); + INIT_LIST_HEAD(&local->mon_list); __hw_addr_init(&local->mc_list); @@ -1186,6 +1188,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) cancel_work_sync(&local->reconfig_filter); cancel_work_sync(&local->tdls_chsw_work); flush_work(&local->sched_scan_stopped_work); + flush_work(&local->radar_detected_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6e7b6a07b7d536a64f7e9ec296adf2bbba8d961c..737e1f082b0ddd55e03b30bf215a5fbb68943143 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -63,6 +63,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 basic_rates = 0; struct cfg80211_chan_def sta_chan_def; + struct ieee80211_supported_band *sband; /* * As support for each feature is added, check for matching @@ -83,7 +84,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) return false; - ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata), + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + + ieee80211_sta_get_rates(sdata, ie, sband->band, &basic_rates); if (sdata->vif.bss_conf.basic_rates != basic_rates) @@ -399,12 +404,13 @@ static int mesh_add_ds_params_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + if (!sband->ht_cap.ht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || @@ -462,12 +468,13 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u8 *pos; - sband = local->hw.wiphy->bands[band]; + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + if (!sband->vht_cap.vht_supported || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || @@ -916,12 +923,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; int err; u32 sta_flags; sdata_assert_lock(sdata); + sband = ieee80211_get_sband(sdata); + if (!sband) + return false; + sta_flags = IEEE80211_STA_DISABLE_VHT; switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: @@ -935,7 +946,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, memset(¶ms, 0, sizeof(params)); memset(&csa_ie, 0, sizeof(csa_ie)); - err = ieee80211_parse_ch_switch_ie(sdata, elems, band, + err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band, sta_flags, sdata->vif.addr, &csa_ie); if (err < 0) @@ -1100,8 +1111,14 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; - if (mesh_matches_local(sdata, &elems)) - mesh_neighbour_update(sdata, mgmt->sa, &elems); + if (mesh_matches_local(sdata, &elems)) { + mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", + sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); + if (!sdata->u.mesh.user_mpm || + sdata->u.mesh.mshcfg.rssi_threshold == 0 || + sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) + mesh_neighbour_update(sdata, mgmt->sa, &elems); + } if (ifmsh->sync_ops) ifmsh->sync_ops->rx_bcn_presp(sdata, diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index b747c9645e432bffe5b9d266a51e7ffca3b75ec3..4005edd71fe86db5415bf0bb9803dac7248ae77a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -16,6 +16,7 @@ #define TEST_FRAME_LEN 8192 #define MAX_METRIC 0xffffffff #define ARITH_SHIFT 8 +#define LINK_FAIL_THRESH 95 #define MAX_PREQ_QUEUE_LEN 64 @@ -307,10 +308,12 @@ void ieee80211s_update_metric(struct ieee80211_local *local, failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); - /* moving average, scaled to 100 */ - sta->mesh->fail_avg = - ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed); - if (sta->mesh->fail_avg > 95) + /* moving average, scaled to 100. + * feed failure as 100 and success as 0 + */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100); + if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) > + LINK_FAIL_THRESH) mesh_plink_broken(sta); } @@ -325,6 +328,8 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, int rate, err; u32 tx_time, estimated_retx; u64 result; + unsigned long fail_avg = + ewma_mesh_fail_avg_read(&sta->mesh->fail_avg); /* Try to get rate based on HW/SW RC algorithm. * Rate is returned in units of Kbps, correct this @@ -336,7 +341,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, if (rate) { err = 0; } else { - if (sta->mesh->fail_avg >= 100) + if (fail_avg > LINK_FAIL_THRESH) return MAX_METRIC; sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); @@ -344,7 +349,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, if (WARN_ON(!rate)) return MAX_METRIC; - err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; + err = (fail_avg << ARITH_SHIFT) / 100; } /* bitrate is in units of 100 Kbps, while we need rate in units of @@ -484,6 +489,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); + ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); /* draft says preq_id should be saved to, but there does * not seem to be any use for it, skipping by now @@ -522,6 +530,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, ? mpath->exp_time : exp_time; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); + ewma_mesh_fail_avg_init(&sta->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } else spin_unlock_bh(&mpath->state_lock); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index f0e6175a9821f01d7aac2dfbda02c1ee5eeb31ec..97269caafecd7b52e644e9bb645d305fdfb67196 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -397,11 +397,10 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); - new_mpath->timer.data = (unsigned long) new_mpath; - new_mpath->timer.function = mesh_path_timer; new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); - init_timer(&new_mpath->timer); + setup_timer(&new_mpath->timer, mesh_path_timer, + (unsigned long) new_mpath); return new_mpath; } @@ -829,6 +828,9 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); + ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); + /* init it at a low value - 0 start is tricky */ + ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 953d71e784a9ab71cb8494478e39eaf3b9464211..1131cd504a15ef5fa507c37c8a5464aac173321f 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -95,19 +95,23 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband; struct sta_info *sta; u32 erp_rates = 0, changed = 0; int i; bool short_slot = false; - if (band == NL80211_BAND_5GHZ) { + sband = ieee80211_get_sband(sdata); + if (!sband) + return changed; + + if (sband->band == NL80211_BAND_5GHZ) { /* (IEEE 802.11-2012 19.4.5) */ short_slot = true; goto out; - } else if (band != NL80211_BAND_2GHZ) + } else if (sband->band != NL80211_BAND_2GHZ) { goto out; + } for (i = 0; i < sband->n_bitrates; i++) if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G) @@ -123,7 +127,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) continue; short_slot = false; - if (erp_rates & sta->sta.supp_rates[band]) + if (erp_rates & sta->sta.supp_rates[sband->band]) short_slot = true; else break; @@ -249,7 +253,15 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.self_prot.action_code = action; if (action != WLAN_SP_MESH_PEERING_CLOSE) { - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; + enum nl80211_band band; + + sband = ieee80211_get_sband(sdata); + if (!sband) { + err = -EINVAL; + goto free; + } + band = sband->band; /* capability info */ pos = skb_put(skb, 2); @@ -395,13 +407,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool insert) { struct ieee80211_local *local = sdata->local; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; - sband = local->hw.wiphy->bands[band]; - rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates); + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + rates = ieee80211_sta_get_rates(sdata, elems, sband->band, + &basic_rates); spin_lock_bh(&sta->mesh->plink_lock); sta->rx_stats.last_rx = jiffies; @@ -412,9 +427,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, goto out; sta->mesh->processed_beacon = true; - if (sta->sta.supp_rates[band] != rates) + if (sta->sta.supp_rates[sband->band] != rates) changed |= IEEE80211_RC_SUPP_RATES_CHANGED; - sta->sta.supp_rates[band] = rates; + sta->sta.supp_rates[sband->band] = rates; if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, sta)) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6e90301154d5a6ba4a7e9bb612ac8243cf09a23c..89dff563b1ecf5eb938985143396cc1287bc9914 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -6,7 +6,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1855,11 +1855,16 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + struct ieee80211_supported_band *sband; u32 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; + sband = ieee80211_get_sband(sdata); + if (!sband) + return changed; + if (erp_valid) { use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; @@ -1869,7 +1874,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); - if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ) + if (sband->band == NL80211_BAND_5GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { @@ -1908,6 +1913,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.associated = cbss; memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); + ieee80211_check_rate_mask(sdata); + sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; if (sdata->vif.p2p || @@ -2797,8 +2804,9 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - sdata_info(sdata, "disassociated from %pM (Reason: %u)\n", - mgmt->sa, reason_code); + sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n", + mgmt->sa, reason_code, + ieee80211_get_reason_code_string(reason_code)); ieee80211_set_disassoc(sdata, 0, 0, false, NULL); @@ -2822,15 +2830,15 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, *have_higher_than_11mbit = true; /* - * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009 - * 7.3.2.2 as a magic value instead of a rate. Hence, skip it. + * Skip HT and VHT BSS membership selectors since they're not + * rates. * - * Note: Even through the membership selector and the basic + * Note: Even though the membership selector and the basic * rate flag share the same bit, they are not exactly * the same. */ - if (!!(supp_rates[i] & 0x80) && - (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY)) continue; for (j = 0; j < sband->n_bitrates; j++) { @@ -3001,7 +3009,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, goto out; } - sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; + sband = ieee80211_get_sband(sdata); + if (!sband) { + mutex_unlock(&sdata->local->sta_mtx); + ret = false; + goto out; + } /* Set up internal HT/VHT capabilities */ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) @@ -3085,6 +3098,18 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } changed |= BSS_CHANGED_QOS; + if (elems.max_idle_period_ie) { + bss_conf->max_idle_period = + le16_to_cpu(elems.max_idle_period_ie->max_idle_period); + bss_conf->protected_keep_alive = + !!(elems.max_idle_period_ie->idle_options & + WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE); + changed |= BSS_CHANGED_KEEP_ALIVE; + } else { + bss_conf->max_idle_period = 0; + bss_conf->protected_keep_alive = false; + } + /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ bss_conf->aid = aid; @@ -3430,6 +3455,30 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } } + if (bss_conf->cqm_rssi_low && + ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) { + int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal); + int last_event = ifmgd->last_cqm_event_signal; + int low = bss_conf->cqm_rssi_low; + int high = bss_conf->cqm_rssi_high; + + if (sig < low && + (last_event == 0 || last_event >= low)) { + ifmgd->last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + sig, GFP_KERNEL); + } else if (sig > high && + (last_event == 0 || last_event <= high)) { + ifmgd->last_cqm_event_signal = sig; + ieee80211_cqm_rssi_notify( + &sdata->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + sig, GFP_KERNEL); + } + } + if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) { mlme_dbg_ratelimited(sdata, "cancelling AP probe due to a received beacon\n"); diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 76a8bcd8ef11237cf8715bf6a10c9085a2152d4d..a87d195c4a615761ed6041296b417ec3c9a8f996 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -10,7 +10,7 @@ static void ieee80211_sched_scan_cancel(struct ieee80211_local *local) { if (ieee80211_request_sched_scan_stop(local)) return; - cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); } int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 206698bc93f406939bb5d883b6ab2f04bc1a3bed..ea1f4315c521be976c822918d7d821cffd3cc0e9 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -2,6 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc + * Copyright 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -61,6 +62,28 @@ void rate_control_rate_init(struct sta_info *sta) set_sta_flag(sta, WLAN_STA_RATE_CONTROL); } +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_status *st) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct sta_info *sta = container_of(st->sta, struct sta_info, sta); + void *priv_sta = sta->rate_ctrl_priv; + + if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + return; + + spin_lock_bh(&sta->rate_ctrl_lock); + if (ref->ops->tx_status_ext) + ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); + else if (st->skb) + ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); + else + WARN_ON_ONCE(1); + + spin_unlock_bh(&sta->rate_ctrl_lock); +} + void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct sta_info *sta, u32 changed) @@ -173,9 +196,11 @@ ieee80211_rate_control_ops_get(const char *name) /* try default if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); - /* try built-in one if specific alg requested but not found */ - if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT)) + /* Note: check for > 0 is intentional to avoid clang warning */ + if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0)) + /* try built-in one if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); + kernel_param_unlock(THIS_MODULE); return ops; @@ -208,7 +233,6 @@ static struct rate_control_ref *rate_control_alloc(const char *name, ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); if (!ref) return NULL; - ref->local = local; ref->ops = ieee80211_rate_control_ops_get(name); if (!ref->ops) goto free; @@ -229,18 +253,45 @@ static struct rate_control_ref *rate_control_alloc(const char *name, return NULL; } -static void rate_control_free(struct rate_control_ref *ctrl_ref) +static void rate_control_free(struct ieee80211_local *local, + struct rate_control_ref *ctrl_ref) { ctrl_ref->ops->free(ctrl_ref->priv); #ifdef CONFIG_MAC80211_DEBUGFS - debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir); - ctrl_ref->local->debugfs.rcdir = NULL; + debugfs_remove_recursive(local->debugfs.rcdir); + local->debugfs.rcdir = NULL; #endif kfree(ctrl_ref); } +void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u32 user_mask, basic_rates = sdata->vif.bss_conf.basic_rates; + enum nl80211_band band; + + if (WARN_ON(!sdata->vif.bss_conf.chandef.chan)) + return; + + if (WARN_ON_ONCE(!basic_rates)) + return; + + band = sdata->vif.bss_conf.chandef.chan->band; + user_mask = sdata->rc_rateidx_mask[band]; + sband = local->hw.wiphy->bands[band]; + + if (user_mask & basic_rates) + return; + + sdata_dbg(sdata, + "no overlap between basic rates (0x%x) and user mask (0x%x on band %d) - clearing the latter", + basic_rates, user_mask, band); + sdata->rc_rateidx_mask[band] = (1 << sband->n_bitrates) - 1; +} + static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc) { struct sk_buff *skb = txrc->skb; @@ -875,7 +926,9 @@ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta_rates *old; struct ieee80211_supported_band *sband; - sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)]; + sband = ieee80211_get_sband(sta->sdata); + if (!sband) + return -EINVAL; rate_control_apply_mask_ratetbl(sta, sband, rates); /* * mac80211 guarantees that this function will not be called @@ -936,6 +989,6 @@ void rate_control_deinitialize(struct ieee80211_local *local) return; local->rate_ctrl = NULL; - rate_control_free(ref); + rate_control_free(local, ref); } diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 8d3260785b940d3ea87679f86b98015ce1b4fbbd..8212bfeb71d6b382fda7e2efe243304c7475db2a 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -20,7 +20,6 @@ #include "driver-ops.h" struct rate_control_ref { - struct ieee80211_local *local; const struct rate_control_ops *ops; void *priv; }; @@ -29,47 +28,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc); -static inline void rate_control_tx_status(struct ieee80211_local *local, - struct ieee80211_supported_band *sband, - struct sta_info *sta, - struct sk_buff *skb) -{ - struct rate_control_ref *ref = local->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) - return; - - spin_lock_bh(&sta->rate_ctrl_lock); - if (ref->ops->tx_status) - ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); - else - ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); - spin_unlock_bh(&sta->rate_ctrl_lock); -} - -static inline void -rate_control_tx_status_noskb(struct ieee80211_local *local, - struct ieee80211_supported_band *sband, - struct sta_info *sta, - struct ieee80211_tx_info *info) -{ - struct rate_control_ref *ref = local->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - - if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) - return; - - if (WARN_ON_ONCE(!ref->ops->tx_status_noskb)) - return; - - spin_lock_bh(&sta->rate_ctrl_lock); - ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info); - spin_unlock_bh(&sta->rate_ctrl_lock); -} +void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct ieee80211_tx_status *st); void rate_control_rate_init(struct sta_info *sta); void rate_control_rate_update(struct ieee80211_local *local, @@ -111,6 +72,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) #endif } +void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata); + /* Get a reference to the rate control algorithm. If `name' is NULL, get the * first available algorithm. */ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3ebe4405a2d43d66f76bdef3fd4f10cd3fa82b8f..9766c1cc4b0a5d59ae4c71aea92921391a1576b3 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -264,9 +264,9 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) static void minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info) + void *priv_sta, struct ieee80211_tx_status *st) { + struct ieee80211_tx_info *info = st->info; struct minstrel_priv *mp = priv; struct minstrel_sta_info *mi = priv_sta; struct ieee80211_tx_rate *ar = info->status.rates; @@ -726,7 +726,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) const struct rate_control_ops mac80211_minstrel = { .name = "minstrel", - .tx_status_noskb = minstrel_tx_status, + .tx_status_ext = minstrel_tx_status, .get_rate = minstrel_get_rate, .rate_init = minstrel_rate_init, .alloc = minstrel_alloc, diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 8e783e197e935cd8bcd0b7b7d5e88b8d2c9210cc..4a5bdad9f303034f0aee90ed3ac00cb044458a8d 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -678,9 +678,9 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) static void minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct ieee80211_tx_info *info) + void *priv_sta, struct ieee80211_tx_status *st) { + struct ieee80211_tx_info *info = st->info; struct minstrel_ht_sta_priv *msp = priv_sta; struct minstrel_ht_sta *mi = &msp->ht; struct ieee80211_tx_rate *ar = info->status.rates; @@ -690,8 +690,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, int i; if (!msp->is_ht) - return mac80211_minstrel.tx_status_noskb(priv, sband, sta, - &msp->legacy, info); + return mac80211_minstrel.tx_status_ext(priv, sband, + &msp->legacy, st); /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && @@ -1374,7 +1374,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) static const struct rate_control_ops mac80211_minstrel_ht = { .name = "minstrel_ht", - .tx_status_noskb = minstrel_ht_tx_status, + .tx_status_ext = minstrel_ht_tx_status, .get_rate = minstrel_ht_get_rate, .rate_init = minstrel_ht_rate_init, .rate_update = minstrel_ht_rate_update, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 4d7543d1a62cce8d70e2d6894ffb86920c80c241..35f4c7d7a50089e3998ff9860e86f6d26e1da600 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -95,24 +95,13 @@ static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, * This function cleans up the SKB, i.e. it removes all the stuff * only useful for monitoring. */ -static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, - struct sk_buff *skb, - unsigned int rtap_vendor_space) +static void remove_monitor_info(struct sk_buff *skb, + unsigned int present_fcs_len, + unsigned int rtap_vendor_space) { - if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { - if (likely(skb->len > FCS_LEN)) - __pskb_trim(skb, skb->len - FCS_LEN); - else { - /* driver bug */ - WARN_ON(1); - dev_kfree_skb(skb); - return NULL; - } - } - + if (present_fcs_len) + __pskb_trim(skb, skb->len - present_fcs_len); __pskb_pull(skb, rtap_vendor_space); - - return skb; } static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, @@ -167,7 +156,7 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, /* padding for RX_FLAGS if necessary */ len = ALIGN(len, 2); - if (status->flag & RX_FLAG_HT) /* HT info */ + if (status->encoding == RX_ENC_HT) /* HT info */ len += 3; if (status->flag & RX_FLAG_AMPDU_DETAILS) { @@ -175,7 +164,7 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, len += 8; } - if (status->flag & RX_FLAG_VHT) { + if (status->encoding == RX_ENC_VHT) { len = ALIGN(len, 2); len += 12; } @@ -340,12 +329,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos |= IEEE80211_RADIOTAP_F_FCS; if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) *pos |= IEEE80211_RADIOTAP_F_BADFCS; - if (status->flag & RX_FLAG_SHORTPRE) + if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; pos++; /* IEEE80211_RADIOTAP_RATE */ - if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { + if (!rate || status->encoding != RX_ENC_LEGACY) { /* * Without rate information don't add it. If we have, * MCS information is a separate field in radiotap, @@ -356,9 +345,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } else { int shift = 0; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); - if (status->flag & RX_FLAG_10MHZ) + if (status->bw == RATE_INFO_BW_10) shift = 1; - else if (status->flag & RX_FLAG_5MHZ) + else if (status->bw == RATE_INFO_BW_5) shift = 2; *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); } @@ -367,14 +356,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_CHANNEL */ put_unaligned_le16(status->freq, pos); pos += 2; - if (status->flag & RX_FLAG_10MHZ) + if (status->bw == RATE_INFO_BW_10) channel_flags |= IEEE80211_CHAN_HALF; - else if (status->flag & RX_FLAG_5MHZ) + else if (status->bw == RATE_INFO_BW_5) channel_flags |= IEEE80211_CHAN_QUARTER; if (status->band == NL80211_BAND_5GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; - else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) + else if (status->encoding != RX_ENC_LEGACY) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; else if (rate && rate->flags & IEEE80211_RATE_ERP_G) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; @@ -413,21 +402,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, put_unaligned_le16(rx_flags, pos); pos += 2; - if (status->flag & RX_FLAG_HT) { + if (status->encoding == RX_ENC_HT) { unsigned int stbc; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); *pos++ = local->hw.radiotap_mcs_details; *pos = 0; - if (status->flag & RX_FLAG_SHORT_GI) + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_MCS_SGI; - if (status->flag & RX_FLAG_40MHZ) + if (status->bw == RATE_INFO_BW_40) *pos |= IEEE80211_RADIOTAP_MCS_BW_40; - if (status->flag & RX_FLAG_HT_GF) + if (status->enc_flags & RX_ENC_FLAG_HT_GF) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; - if (status->flag & RX_FLAG_LDPC) + if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; - stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; + stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; pos++; *pos++ = status->rate_idx; @@ -460,35 +449,40 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos++ = 0; } - if (status->flag & RX_FLAG_VHT) { + if (status->encoding == RX_ENC_VHT) { u16 known = local->hw.radiotap_vht_details; rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); put_unaligned_le16(known, pos); pos += 2; /* flags */ - if (status->flag & RX_FLAG_SHORT_GI) + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; /* in VHT, STBC is binary */ - if (status->flag & RX_FLAG_STBC_MASK) + if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; - if (status->vht_flag & RX_VHT_FLAG_BF) + if (status->enc_flags & RX_ENC_FLAG_BF) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; pos++; /* bandwidth */ - if (status->vht_flag & RX_VHT_FLAG_80MHZ) + switch (status->bw) { + case RATE_INFO_BW_80: *pos++ = 4; - else if (status->vht_flag & RX_VHT_FLAG_160MHZ) + break; + case RATE_INFO_BW_160: *pos++ = 11; - else if (status->flag & RX_FLAG_40MHZ) + break; + case RATE_INFO_BW_40: *pos++ = 1; - else /* 20 MHz */ + break; + default: *pos++ = 0; + } /* MCS/NSS */ - *pos = (status->rate_idx << 4) | status->vht_nss; + *pos = (status->rate_idx << 4) | status->nss; pos += 4; /* coding field */ - if (status->flag & RX_FLAG_LDPC) + if (status->enc_flags & RX_ENC_FLAG_LDPC) *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; pos++; /* group ID */ @@ -544,6 +538,59 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } } +static struct sk_buff * +ieee80211_make_monitor_skb(struct ieee80211_local *local, + struct sk_buff **origskb, + struct ieee80211_rate *rate, + int rtap_vendor_space, bool use_origskb) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); + int rt_hdrlen, needed_headroom; + struct sk_buff *skb; + + /* room for the radiotap header based on driver features */ + rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); + needed_headroom = rt_hdrlen - rtap_vendor_space; + + if (use_origskb) { + /* only need to expand headroom if necessary */ + skb = *origskb; + *origskb = NULL; + + /* + * This shouldn't trigger often because most devices have an + * RX header they pull before we get here, and that should + * be big enough for our radiotap information. We should + * probably export the length to drivers so that we can have + * them allocate enough headroom to start with. + */ + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return NULL; + } + } else { + /* + * Need to make a copy and possibly remove radiotap header + * and FCS from the original. + */ + skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); + + if (!skb) + return NULL; + } + + /* prepend radiotap information */ + ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + return skb; +} + /* * This function copies a received frame to all monitor interfaces and * returns a cleaned-up SKB that no longer includes the FCS nor the @@ -555,13 +602,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); struct ieee80211_sub_if_data *sdata; - int rt_hdrlen, needed_headroom; - struct sk_buff *skb, *skb2; - struct net_device *prev_dev = NULL; + struct sk_buff *monskb = NULL; int present_fcs_len = 0; unsigned int rtap_vendor_space = 0; struct ieee80211_sub_if_data *monitor_sdata = rcu_dereference(local->monitor_sdata); + bool only_monitor = false; if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; @@ -578,8 +624,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, * the SKB because it has a bad FCS/PLCP checksum. */ - if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) + if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { + if (unlikely(origskb->len <= FCS_LEN)) { + /* driver bug */ + WARN_ON(1); + dev_kfree_skb(origskb); + return NULL; + } present_fcs_len = FCS_LEN; + } /* ensure hdr->frame_control and vendor radiotap data are in skb head */ if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { @@ -587,89 +640,62 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, return NULL; } + only_monitor = should_drop_frame(origskb, present_fcs_len, + rtap_vendor_space); + if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { - if (should_drop_frame(origskb, present_fcs_len, - rtap_vendor_space)) { + if (only_monitor) { dev_kfree_skb(origskb); return NULL; } - return remove_monitor_info(local, origskb, rtap_vendor_space); + remove_monitor_info(origskb, present_fcs_len, + rtap_vendor_space); + return origskb; } ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space); - /* room for the radiotap header based on driver features */ - rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); - needed_headroom = rt_hdrlen - rtap_vendor_space; - - if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { - /* only need to expand headroom if necessary */ - skb = origskb; - origskb = NULL; - - /* - * This shouldn't trigger often because most devices have an - * RX header they pull before we get here, and that should - * be big enough for our radiotap information. We should - * probably export the length to drivers so that we can have - * them allocate enough headroom to start with. - */ - if (skb_headroom(skb) < needed_headroom && - pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - return NULL; - } - } else { - /* - * Need to make a copy and possibly remove radiotap header - * and FCS from the original. - */ - skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); - - origskb = remove_monitor_info(local, origskb, - rtap_vendor_space); - - if (!skb) - return origskb; - } - - /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); - - skb_reset_mac_header(skb); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); + list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { + bool last_monitor = list_is_last(&sdata->u.mntr.list, + &local->mon_list); - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (sdata->vif.type != NL80211_IFTYPE_MONITOR) - continue; + if (!monskb) + monskb = ieee80211_make_monitor_skb(local, &origskb, + rate, + rtap_vendor_space, + only_monitor && + last_monitor); - if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) - continue; + if (monskb) { + struct sk_buff *skb; - if (!ieee80211_sdata_running(sdata)) - continue; + if (last_monitor) { + skb = monskb; + monskb = NULL; + } else { + skb = skb_clone(monskb, GFP_ATOMIC); + } - if (prev_dev) { - skb2 = skb_clone(skb, GFP_ATOMIC); - if (skb2) { - skb2->dev = prev_dev; - netif_receive_skb(skb2); + if (skb) { + skb->dev = sdata->dev; + ieee80211_rx_stats(skb->dev, skb->len); + netif_receive_skb(skb); } } - prev_dev = sdata->dev; - ieee80211_rx_stats(sdata->dev, skb->len); + if (last_monitor) + break; } - if (prev_dev) { - skb->dev = prev_dev; - netif_receive_skb(skb); - } else - dev_kfree_skb(skb); + /* this happens if last_monitor was erroneously false */ + dev_kfree_skb(monskb); + + /* ditto */ + if (!origskb) + return NULL; + remove_monitor_info(origskb, present_fcs_len, rtap_vendor_space); return origskb; } @@ -3315,8 +3341,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, status = IEEE80211_SKB_RXCB((rx->skb)); sband = rx->local->hw.wiphy->bands[status->band]; - if (!(status->flag & RX_FLAG_HT) && - !(status->flag & RX_FLAG_VHT)) + if (!(status->encoding == RX_ENC_HT) && + !(status->encoding == RX_ENC_VHT)) rate = &sband->bitrates[status->rate_idx]; ieee80211_rx_cooked_monitor(rx, rate); @@ -3553,7 +3579,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); - int multicast = is_multicast_ether_addr(hdr->addr1); + bool multicast = is_multicast_ether_addr(hdr->addr1); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: @@ -3577,7 +3603,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; if (!rx->sta) { int rate_idx; - if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) + if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT/VHT rates */ else rate_idx = status->rate_idx; @@ -3597,7 +3623,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) return false; if (!rx->sta) { int rate_idx; - if (status->flag & RX_FLAG_HT) + if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT rates */ else rate_idx = status->rate_idx; @@ -4260,7 +4286,8 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, * we probably can't have a valid rate here anyway. */ - if (status->flag & RX_FLAG_HT) { + switch (status->encoding) { + case RX_ENC_HT: /* * rate_idx is MCS index, which can be [0-76] * as documented on: @@ -4278,14 +4305,19 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rate_idx, status->rate_idx)) goto drop; - } else if (status->flag & RX_FLAG_VHT) { + break; + case RX_ENC_VHT: if (WARN_ONCE(status->rate_idx > 9 || - !status->vht_nss || - status->vht_nss > 8, + !status->nss || + status->nss > 8, "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", - status->rate_idx, status->vht_nss)) + status->rate_idx, status->nss)) goto drop; - } else { + break; + default: + WARN_ON_ONCE(1); + /* fall through */ + case RX_ENC_LEGACY: if (WARN_ON(status->rate_idx >= sband->n_bitrates)) goto drop; rate = &sband->bitrates[status->rate_idx]; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index faab3c490d2b755f32ee09638eaf0d45067780cd..47d2ed5704700f110aa6a0f57ecb5af29e0c5db9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -79,9 +79,9 @@ ieee80211_bss_info_update(struct ieee80211_local *local, bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20; - if (rx_status->flag & RX_FLAG_5MHZ) + if (rx_status->bw == RATE_INFO_BW_5) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5; - if (rx_status->flag & RX_FLAG_10MHZ) + else if (rx_status->bw == RATE_INFO_BW_10) bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; bss_meta.chan = channel; @@ -174,8 +174,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local, if (beacon) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[rx_status->band]; - if (!(rx_status->flag & RX_FLAG_HT) && - !(rx_status->flag & RX_FLAG_VHT)) + if (!(rx_status->encoding == RX_ENC_HT) && + !(rx_status->encoding == RX_ENC_VHT)) bss->beacon_rate = &sband->bitrates[rx_status->rate_idx]; } @@ -1219,7 +1219,7 @@ void ieee80211_sched_scan_results(struct ieee80211_hw *hw) trace_api_sched_scan_results(local); - cfg80211_sched_scan_results(hw->wiphy); + cfg80211_sched_scan_results(hw->wiphy, 0); } EXPORT_SYMBOL(ieee80211_sched_scan_results); @@ -1239,7 +1239,7 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local) mutex_unlock(&local->mtx); - cfg80211_sched_scan_stopped(local->hw.wiphy); + cfg80211_sched_scan_stopped(local->hw.wiphy, 0); } void ieee80211_sched_scan_stopped_work(struct work_struct *work) diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 97f4c9d6b54ced5bf9f337c55d334cd67646abe9..0782e486fe89306c3cd73d36f1f285dee130ddba 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -132,9 +132,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, struct ieee80211_vht_operation vht_oper = { .chan_width = wide_bw_chansw_ie->new_channel_width, - .center_freq_seg1_idx = + .center_freq_seg0_idx = wide_bw_chansw_ie->new_center_freq_seg0, - .center_freq_seg2_idx = + .center_freq_seg1_idx = wide_bw_chansw_ie->new_center_freq_seg1, /* .basic_mcs_set doesn't matter */ }; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 3323a2fb289bd035a1280043bd3d64c2509cb9e2..7cdf7a835bb01e8fade3b9d9bb6efaa19158f72b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2,7 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -395,10 +395,15 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sta.smps_mode = IEEE80211_SMPS_OFF; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { - struct ieee80211_supported_band *sband = - hw->wiphy->bands[ieee80211_get_sdata_band(sdata)]; - u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> - IEEE80211_HT_CAP_SM_PS_SHIFT; + struct ieee80211_supported_band *sband; + u8 smps; + + sband = ieee80211_get_sband(sdata); + if (!sband) + goto free_txq; + + smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> + IEEE80211_HT_CAP_SM_PS_SHIFT; /* * Assume that hostapd advertises our caps in the beacon and * this is the known_smps_mode for a station that just assciated @@ -1957,24 +1962,32 @@ sta_get_last_rx_stats(struct sta_info *sta) static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, struct rate_info *rinfo) { - rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >> - STA_STATS_RATE_BW_SHIFT; + rinfo->bw = STA_STATS_GET(BW, rate); - if (rate & STA_STATS_RATE_VHT) { + switch (STA_STATS_GET(TYPE, rate)) { + case STA_STATS_RATE_TYPE_VHT: rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; - rinfo->mcs = rate & 0xf; - rinfo->nss = (rate & 0xf0) >> 4; - } else if (rate & STA_STATS_RATE_HT) { + rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); + rinfo->nss = STA_STATS_GET(VHT_NSS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case STA_STATS_RATE_TYPE_HT: rinfo->flags = RATE_INFO_FLAGS_MCS; - rinfo->mcs = rate & 0xff; - } else if (rate & STA_STATS_RATE_LEGACY) { + rinfo->mcs = STA_STATS_GET(HT_MCS, rate); + if (STA_STATS_GET(SGI, rate)) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case STA_STATS_RATE_TYPE_LEGACY: { struct ieee80211_supported_band *sband; u16 brate; unsigned int shift; + int band = STA_STATS_GET(LEGACY_BAND, rate); + int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); rinfo->flags = 0; - sband = local->hw.wiphy->bands[(rate >> 4) & 0xf]; - brate = sband->bitrates[rate & 0xf].bitrate; + sband = local->hw.wiphy->bands[band]; + brate = sband->bitrates[rate_idx].bitrate; if (rinfo->bw == RATE_INFO_BW_5) shift = 2; else if (rinfo->bw == RATE_INFO_BW_10) @@ -1982,10 +1995,9 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, else shift = 0; rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); + break; + } } - - if (rate & STA_STATS_RATE_SGI) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; } static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index e65cda34d2bc000fb7e3738a6235ba5d53b8fde6..5609cacb20d5f31e02ba877f88a6f0290e18ce8b 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -1,7 +1,7 @@ /* * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright(c) 2015-2016 Intel Deutschland GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -324,6 +325,9 @@ struct ieee80211_fast_rx { struct rcu_head rcu_head; }; +/* we use only values in the range 0-100, so pick a large precision */ +DECLARE_EWMA(mesh_fail_avg, 20, 8) + /** * struct mesh_sta - mesh STA information * @plink_lock: serialize access to plink fields @@ -369,7 +373,7 @@ struct mesh_sta { enum nl80211_mesh_power_mode nonpeer_pm; /* moving percentage of failed MSDUs */ - unsigned int fail_avg; + struct ewma_mesh_fail_avg fail_avg; }; DECLARE_EWMA(signal, 10, 8) @@ -724,40 +728,55 @@ void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); unsigned long ieee80211_sta_last_active(struct sta_info *sta); +enum sta_stats_type { + STA_STATS_RATE_TYPE_INVALID = 0, + STA_STATS_RATE_TYPE_LEGACY, + STA_STATS_RATE_TYPE_HT, + STA_STATS_RATE_TYPE_VHT, +}; + +#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0) +#define STA_STATS_FIELD_LEGACY_IDX GENMASK( 3, 0) +#define STA_STATS_FIELD_LEGACY_BAND GENMASK( 7, 4) +#define STA_STATS_FIELD_VHT_MCS GENMASK( 3, 0) +#define STA_STATS_FIELD_VHT_NSS GENMASK( 7, 4) +#define STA_STATS_FIELD_BW GENMASK(11, 8) +#define STA_STATS_FIELD_SGI GENMASK(12, 12) +#define STA_STATS_FIELD_TYPE GENMASK(15, 13) + +#define STA_STATS_FIELD(_n, _v) FIELD_PREP(STA_STATS_FIELD_ ## _n, _v) +#define STA_STATS_GET(_n, _v) FIELD_GET(STA_STATS_FIELD_ ## _n, _v) + #define STA_STATS_RATE_INVALID 0 -#define STA_STATS_RATE_VHT 0x8000 -#define STA_STATS_RATE_HT 0x4000 -#define STA_STATS_RATE_LEGACY 0x2000 -#define STA_STATS_RATE_SGI 0x1000 -#define STA_STATS_RATE_BW_SHIFT 9 -#define STA_STATS_RATE_BW_MASK (0x7 << STA_STATS_RATE_BW_SHIFT) - -static inline u16 sta_stats_encode_rate(struct ieee80211_rx_status *s) + +static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s) { - u16 r = s->rate_idx; - - if (s->vht_flag & RX_VHT_FLAG_80MHZ) - r |= RATE_INFO_BW_80 << STA_STATS_RATE_BW_SHIFT; - else if (s->vht_flag & RX_VHT_FLAG_160MHZ) - r |= RATE_INFO_BW_160 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_40MHZ) - r |= RATE_INFO_BW_40 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_10MHZ) - r |= RATE_INFO_BW_10 << STA_STATS_RATE_BW_SHIFT; - else if (s->flag & RX_FLAG_5MHZ) - r |= RATE_INFO_BW_5 << STA_STATS_RATE_BW_SHIFT; - else - r |= RATE_INFO_BW_20 << STA_STATS_RATE_BW_SHIFT; - - if (s->flag & RX_FLAG_SHORT_GI) - r |= STA_STATS_RATE_SGI; - - if (s->flag & RX_FLAG_VHT) - r |= STA_STATS_RATE_VHT | (s->vht_nss << 4); - else if (s->flag & RX_FLAG_HT) - r |= STA_STATS_RATE_HT; - else - r |= STA_STATS_RATE_LEGACY | (s->band << 4); + u16 r; + + r = STA_STATS_FIELD(BW, s->bw); + + if (s->enc_flags & RX_ENC_FLAG_SHORT_GI) + r |= STA_STATS_FIELD(SGI, 1); + + switch (s->encoding) { + case RX_ENC_VHT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_VHT); + r |= STA_STATS_FIELD(VHT_NSS, s->nss); + r |= STA_STATS_FIELD(VHT_MCS, s->rate_idx); + break; + case RX_ENC_HT: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HT); + r |= STA_STATS_FIELD(HT_MCS, s->rate_idx); + break; + case RX_ENC_LEGACY: + r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_LEGACY); + r |= STA_STATS_FIELD(LEGACY_BAND, s->band); + r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx); + break; + default: + WARN_ON(1); + return STA_STATS_RATE_INVALID; + } return r; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 83b8b11f24ea1dadc0501bd8a4c15598195524a2..be47ac5cd8c8dd44a23247979f0d1742cf089026 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -200,6 +200,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) } if (ieee80211_is_action(mgmt->frame_control) && + !ieee80211_has_protected(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_HT && mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && ieee80211_sdata_running(sdata)) { @@ -630,61 +631,6 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, return rates_idx; } -void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, - struct ieee80211_sta *pubsta, - struct ieee80211_tx_info *info) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_supported_band *sband; - int retry_count; - bool acked, noack_success; - - ieee80211_tx_get_rates(hw, info, &retry_count); - - sband = hw->wiphy->bands[info->band]; - - acked = !!(info->flags & IEEE80211_TX_STAT_ACK); - noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); - - if (pubsta) { - struct sta_info *sta; - - sta = container_of(pubsta, struct sta_info, sta); - - if (!acked) - sta->status_stats.retry_failed++; - sta->status_stats.retry_count += retry_count; - - if (acked) { - sta->status_stats.last_ack = jiffies; - - if (sta->status_stats.lost_packets) - sta->status_stats.lost_packets = 0; - - /* Track when last TDLS packet was ACKed */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->status_stats.last_tdls_pkt_time = jiffies; - } else { - ieee80211_lost_packet(sta, info); - } - - rate_control_tx_status_noskb(local, sband, sta, info); - } - - if (acked || noack_success) { - I802_DEBUG_INC(local->dot11TransmittedFrameCount); - if (!pubsta) - I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); - if (retry_count > 0) - I802_DEBUG_INC(local->dot11RetryCount); - if (retry_count > 1) - I802_DEBUG_INC(local->dot11MultipleRetryCount); - } else { - I802_DEBUG_INC(local->dot11FailedCount); - } -} -EXPORT_SYMBOL(ieee80211_tx_status_noskb); - void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_supported_band *sband, int retry_count, int shift, bool send_to_cooked) @@ -742,15 +688,16 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, dev_kfree_skb(skb); } -void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +static void __ieee80211_tx_status(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status) { + struct sk_buff *skb = status->skb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info = status->info; + struct sta_info *sta; __le16 fc; struct ieee80211_supported_band *sband; - struct rhlist_head *tmp; - struct sta_info *sta; int retry_count; int rates_idx; bool send_to_cooked; @@ -761,16 +708,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); - rcu_read_lock(); - sband = local->hw.wiphy->bands[info->band]; fc = hdr->frame_control; - for_each_sta_info(local, hdr->addr1, sta, tmp) { - /* skip wrong virtual interface */ - if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) - continue; - + if (status->sta) { + sta = container_of(status->sta, struct sta_info, sta); shift = ieee80211_vif_get_shift(&sta->sdata->vif); if (info->flags & IEEE80211_TX_STATUS_EOSP) @@ -790,7 +732,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) * that this TX packet failed because of that. */ ieee80211_handle_filtered_frame(local, sta, skb); - rcu_read_unlock(); return; } @@ -840,7 +781,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { ieee80211_handle_filtered_frame(local, sta, skb); - rcu_read_unlock(); return; } else { if (!acked) @@ -856,7 +796,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } } - rate_control_tx_status(local, sband, sta, skb); + rate_control_tx_status(local, sband, status); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) ieee80211s_update_metric(local, sta, skb); @@ -883,8 +823,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } } - rcu_read_unlock(); - ieee80211_led_tx(local); /* SNMP counters @@ -949,8 +887,96 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) /* send to monitor interfaces */ ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked); } + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_status status = { + .skb = skb, + .info = IEEE80211_SKB_CB(skb), + }; + struct rhlist_head *tmp; + struct sta_info *sta; + + rcu_read_lock(); + + for_each_sta_info(local, hdr->addr1, sta, tmp) { + /* skip wrong virtual interface */ + if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) + continue; + + status.sta = &sta->sta; + break; + } + + __ieee80211_tx_status(hw, &status); + rcu_read_unlock(); +} EXPORT_SYMBOL(ieee80211_tx_status); +void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + struct ieee80211_tx_status *status) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = status->info; + struct ieee80211_sta *pubsta = status->sta; + struct ieee80211_supported_band *sband; + int retry_count; + bool acked, noack_success; + + if (status->skb) + return __ieee80211_tx_status(hw, status); + + if (!status->sta) + return; + + ieee80211_tx_get_rates(hw, info, &retry_count); + + sband = hw->wiphy->bands[info->band]; + + acked = !!(info->flags & IEEE80211_TX_STAT_ACK); + noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); + + if (pubsta) { + struct sta_info *sta; + + sta = container_of(pubsta, struct sta_info, sta); + + if (!acked) + sta->status_stats.retry_failed++; + sta->status_stats.retry_count += retry_count; + + if (acked) { + sta->status_stats.last_ack = jiffies; + + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; + + /* Track when last TDLS packet was ACKed */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + sta->status_stats.last_tdls_pkt_time = jiffies; + } else { + ieee80211_lost_packet(sta, info); + } + + rate_control_tx_status(local, sband, status); + } + + if (acked || noack_success) { + I802_DEBUG_INC(local->dot11TransmittedFrameCount); + if (!pubsta) + I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount); + if (retry_count > 0) + I802_DEBUG_INC(local->dot11RetryCount); + if (retry_count > 1) + I802_DEBUG_INC(local->dot11MultipleRetryCount); + } else { + I802_DEBUG_INC(local->dot11FailedCount); + } +} +EXPORT_SYMBOL(ieee80211_tx_status_ext); + void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index afca7d103684bbc8c953ae13bb0345934d8f5f35..f20dcf1b1830a2412ed96ebcc9ce3be65ef5d7e8 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -47,8 +47,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata); bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = (void *)skb_put(skb, 10); @@ -180,11 +179,14 @@ static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, u16 status_code) { + struct ieee80211_supported_band *sband; + /* The capability will be 0 when sending a failure code */ if (status_code != 0) return 0; - if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) { + sband = ieee80211_get_sband(sdata); + if (sband && sband->band == NL80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; } @@ -358,17 +360,20 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { - enum nl80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; + struct ieee80211_local *local = sdata->local; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct sta_info *sta = NULL; size_t offset = 0, noffset; u8 *pos; - ieee80211_add_srates_ie(sdata, skb, false, band); - ieee80211_add_ext_srates_ie(sdata, skb, false, band); + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + + ieee80211_add_srates_ie(sdata, skb, false, sband->band); + ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band); ieee80211_tdls_add_supp_channels(sdata, skb); /* add any custom IEs that go before Extended Capabilities */ @@ -439,7 +444,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, * the same on all bands. The specification limits the setup to a * single HT-cap, so use the current band for now. */ - sband = local->hw.wiphy->bands[band]; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || @@ -545,9 +549,13 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; - enum nl80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband; u8 *pos; + sband = ieee80211_get_sband(sdata); + if (!sband) + return; + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); @@ -612,7 +620,8 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ - if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { + if (sband->band != NL80211_BAND_2GHZ && + sta->sta.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ba8d7db0a07165e7b33f71caa260f352771d434a..04b22f8982fe42b41364d8d7feee0d5bd036c64a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -682,10 +682,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) txrc.skb = tx->skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; - if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) - txrc.max_rate_idx = -1; - else - txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; if (tx->sdata->rc_has_mcs_mask[info->band]) txrc.rate_idx_mcs_mask = @@ -4249,10 +4245,6 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, txrc.skb = skb; txrc.reported_rate.idx = -1; txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; - if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1) - txrc.max_rate_idx = -1; - else - txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; txrc.bss = true; rate_control_get_rate(sdata, NULL, &txrc); @@ -4305,7 +4297,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, return bcn; shift = ieee80211_vif_get_shift(vif); - sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))]; + sband = ieee80211_get_sband(vif_to_sdata(vif)); + if (!sband) + return bcn; + ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false); return bcn; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ac59fbd280dff8f712fb9dbc841147e4642ec607..ac9ac6c35594a4c67d570bb40cc4ad44f3d7bfa0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015-2016 Intel Deutschland GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -828,6 +828,7 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_CHAN_SWITCH_TIMING: case WLAN_EID_LINK_ID: + case WLAN_EID_BSS_MAX_IDLE_PERIOD: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once @@ -1089,6 +1090,10 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, else elem_parse_failed = true; break; + case WLAN_EID_BSS_MAX_IDLE_PERIOD: + if (elen >= sizeof(*elems->max_idle_period_ie)) + elems->max_idle_period_ie = (void *)pos; + break; default: break; } @@ -1590,14 +1595,14 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, size_t num_rates; u32 supp_rates, rate_flags; int i, j, shift; + sband = sdata->local->hw.wiphy->bands[band]; + if (WARN_ON(!sband)) + return 1; rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); shift = ieee80211_vif_get_shift(&sdata->vif); - if (WARN_ON(!sband)) - return 1; - num_rates = sband->n_bitrates; supp_rates = 0; for (i = 0; i < elems->supp_rates_len + @@ -1983,6 +1988,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->u.mgd.have_beacon) changed |= BSS_CHANGED_BEACON_INFO; + if (sdata->vif.bss_conf.max_idle_period || + sdata->vif.bss_conf.protected_keep_alive) + changed |= BSS_CHANGED_KEEP_ALIVE; + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); sdata_unlock(sdata); @@ -2103,7 +2112,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->mtx); if (sched_scan_stopped) - cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); wake_up: if (local->in_reconfig) { @@ -2413,13 +2422,13 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, *pos++ = WLAN_EID_VHT_OPERATION; *pos++ = sizeof(struct ieee80211_vht_operation); vht_oper = (struct ieee80211_vht_operation *)pos; - vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel( + vht_oper->center_freq_seg0_idx = ieee80211_frequency_to_channel( chandef->center_freq1); if (chandef->center_freq2) - vht_oper->center_freq_seg2_idx = + vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(chandef->center_freq2); else - vht_oper->center_freq_seg2_idx = 0x00; + vht_oper->center_freq_seg1_idx = 0x00; switch (chandef->width) { case NL80211_CHAN_WIDTH_160: @@ -2428,11 +2437,11 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, * workaround. */ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; - vht_oper->center_freq_seg2_idx = vht_oper->center_freq_seg1_idx; + vht_oper->center_freq_seg1_idx = vht_oper->center_freq_seg0_idx; if (chandef->chan->center_freq < chandef->center_freq1) - vht_oper->center_freq_seg1_idx -= 8; + vht_oper->center_freq_seg0_idx -= 8; else - vht_oper->center_freq_seg1_idx += 8; + vht_oper->center_freq_seg0_idx += 8; break; case NL80211_CHAN_WIDTH_80P80: /* @@ -2491,9 +2500,9 @@ bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper, if (!oper) return false; - cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx, + cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg0_idx, chandef->chan->band); - cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg2_idx, + cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx, chandef->chan->band); switch (oper->chan_width) { @@ -2503,11 +2512,11 @@ bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper, new.width = NL80211_CHAN_WIDTH_80; new.center_freq1 = cf1; /* If needed, adjust based on the newer interop workaround. */ - if (oper->center_freq_seg2_idx) { + if (oper->center_freq_seg1_idx) { unsigned int diff; - diff = abs(oper->center_freq_seg2_idx - - oper->center_freq_seg1_idx); + diff = abs(oper->center_freq_seg1_idx - + oper->center_freq_seg0_idx); if (diff == 8) { new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf2; @@ -2715,42 +2724,39 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, memset(&ri, 0, sizeof(ri)); /* Fill cfg80211 rate info */ - if (status->flag & RX_FLAG_HT) { + switch (status->encoding) { + case RX_ENC_HT: ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; - if (status->flag & RX_FLAG_40MHZ) - ri.bw = RATE_INFO_BW_40; - else - ri.bw = RATE_INFO_BW_20; - if (status->flag & RX_FLAG_SHORT_GI) + ri.bw = status->bw; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (status->flag & RX_FLAG_VHT) { + break; + case RX_ENC_VHT: ri.flags |= RATE_INFO_FLAGS_VHT_MCS; ri.mcs = status->rate_idx; - ri.nss = status->vht_nss; - if (status->flag & RX_FLAG_40MHZ) - ri.bw = RATE_INFO_BW_40; - else if (status->vht_flag & RX_VHT_FLAG_80MHZ) - ri.bw = RATE_INFO_BW_80; - else if (status->vht_flag & RX_VHT_FLAG_160MHZ) - ri.bw = RATE_INFO_BW_160; - else - ri.bw = RATE_INFO_BW_20; - if (status->flag & RX_FLAG_SHORT_GI) + ri.nss = status->nss; + ri.bw = status->bw; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else { + break; + default: + WARN_ON(1); + /* fall through */ + case RX_ENC_LEGACY: { struct ieee80211_supported_band *sband; int shift = 0; int bitrate; - if (status->flag & RX_FLAG_10MHZ) { + ri.bw = status->bw; + + switch (status->bw) { + case RATE_INFO_BW_10: shift = 1; - ri.bw = RATE_INFO_BW_10; - } else if (status->flag & RX_FLAG_5MHZ) { + break; + case RATE_INFO_BW_5: shift = 2; - ri.bw = RATE_INFO_BW_5; - } else { - ri.bw = RATE_INFO_BW_20; + break; } sband = local->hw.wiphy->bands[status->band]; @@ -2762,19 +2768,21 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, if (status->band == NL80211_BAND_5GHZ) { ts += 20 << shift; mpdu_offset += 2; - } else if (status->flag & RX_FLAG_SHORTPRE) { + } else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) { ts += 96; } else { ts += 192; } } + break; + } } rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", (unsigned long long)status->flag, status->rate_idx, - status->vht_nss)) + status->nss)) return 0; /* rewind from end of MPDU */ @@ -2791,8 +2799,10 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef; + /* for interface list, to avoid linking iflist_mtx and chanctx_mtx */ + ASSERT_RTNL(); + mutex_lock(&local->mtx); - mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { /* it might be waiting for the local->mtx, but then * by the time it gets it, sdata->wdev.cac_started @@ -2809,7 +2819,6 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) GFP_KERNEL); } } - mutex_unlock(&local->iflist_mtx); mutex_unlock(&local->mtx); } @@ -2831,7 +2840,9 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work) } mutex_unlock(&local->chanctx_mtx); + rtnl_lock(); ieee80211_dfs_cac_cancel(local); + rtnl_unlock(); if (num_chanctx > 1) /* XXX: multi-channel is not supported yet */ @@ -2846,7 +2857,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw) trace_api_radar_detected(local); - ieee80211_queue_work(hw, &local->radar_detected_work); + schedule_work(&local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected); diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 56ccffa3f2bfc7731adfaabb1026ef7e8af68d32..62141dcec2d66020fb4eb7bb698e880809878028 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -19,6 +19,7 @@ #ifndef __IEEE802154_I_H #define __IEEE802154_I_H +#include #include #include #include diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 6414079aa7297eee8fbd6320fb4392c6d8865e34..088e2b459d0f01a6d33b7a5166c3dc0a394523e9 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -24,6 +24,9 @@ #include #include "internal.h" +/* max memory we will use for mpls_route */ +#define MAX_MPLS_ROUTE_MEM 4096 + /* Maximum number of labels to look ahead at when selecting a path of * a multipath route */ @@ -32,7 +35,9 @@ #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1) static int zero = 0; +static int one = 1; static int label_limit = (1 << 20) - 1; +static int ttl_max = 255; static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt, struct nlmsghdr *nlh, struct net *net, u32 portid, @@ -58,10 +63,7 @@ EXPORT_SYMBOL_GPL(mpls_output_possible); static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh) { - u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN); - int nh_index = nh - rt->rt_nh; - - return nh0_via + rt->rt_max_alen * nh_index; + return (u8 *)nh + rt->rt_via_offset; } static const u8 *mpls_nh_via(const struct mpls_route *rt, @@ -187,21 +189,32 @@ static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb) return hash; } +static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index) +{ + return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size); +} + +/* number of alive nexthops (rt->rt_nhn_alive) and the flags for + * a next hop (nh->nh_flags) are modified by netdev event handlers. + * Since those fields can change at any moment, use READ_ONCE to + * access both. + */ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, struct sk_buff *skb) { - int alive = ACCESS_ONCE(rt->rt_nhn_alive); u32 hash = 0; int nh_index = 0; int n = 0; + u8 alive; /* No need to look further into packet if there's only * one path */ if (rt->rt_nhn == 1) - goto out; + return rt->rt_nh; - if (alive <= 0) + alive = READ_ONCE(rt->rt_nhn_alive); + if (alive == 0) return NULL; hash = mpls_multipath_hash(rt, skb); @@ -209,7 +222,9 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, if (alive == rt->rt_nhn) goto out; for_nexthops(rt) { - if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) + unsigned int nh_flags = READ_ONCE(nh->nh_flags); + + if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) continue; if (n == nh_index) return nh; @@ -217,11 +232,11 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, } endfor_nexthops(rt); out: - return &rt->rt_nh[nh_index]; + return mpls_get_nexthop(rt, nh_index); } -static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, - struct mpls_entry_decoded dec) +static bool mpls_egress(struct net *net, struct mpls_route *rt, + struct sk_buff *skb, struct mpls_entry_decoded dec) { enum mpls_payload_type payload_type; bool success = false; @@ -246,22 +261,46 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, switch (payload_type) { case MPT_IPV4: { struct iphdr *hdr4 = ip_hdr(skb); + u8 new_ttl; skb->protocol = htons(ETH_P_IP); + + /* If propagating TTL, take the decremented TTL from + * the incoming MPLS header, otherwise decrement the + * TTL, but only if not 0 to avoid underflow. + */ + if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED || + (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT && + net->mpls.ip_ttl_propagate)) + new_ttl = dec.ttl; + else + new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0; + csum_replace2(&hdr4->check, htons(hdr4->ttl << 8), - htons(dec.ttl << 8)); - hdr4->ttl = dec.ttl; + htons(new_ttl << 8)); + hdr4->ttl = new_ttl; success = true; break; } case MPT_IPV6: { struct ipv6hdr *hdr6 = ipv6_hdr(skb); skb->protocol = htons(ETH_P_IPV6); - hdr6->hop_limit = dec.ttl; + + /* If propagating TTL, take the decremented TTL from + * the incoming MPLS header, otherwise decrement the + * hop limit, but only if not 0 to avoid underflow. + */ + if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED || + (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT && + net->mpls.ip_ttl_propagate)) + hdr6->hop_limit = dec.ttl; + else if (hdr6->hop_limit) + hdr6->hop_limit = hdr6->hop_limit - 1; success = true; break; } case MPT_UNSPEC: + /* Should have decided which protocol it is by now */ break; } @@ -361,7 +400,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, if (unlikely(!new_header_size && dec.bos)) { /* Penultimate hop popping */ - if (!mpls_egress(rt, skb, dec)) + if (!mpls_egress(dev_net(out_dev), rt, skb, dec)) goto err; } else { bool bos; @@ -412,6 +451,7 @@ static struct packet_type mpls_packet_type __read_mostly = { static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = { [RTA_DST] = { .type = NLA_U32 }, [RTA_OIF] = { .type = NLA_U32 }, + [RTA_TTL_PROPAGATE] = { .type = NLA_U8 }, }; struct mpls_route_config { @@ -421,6 +461,7 @@ struct mpls_route_config { u8 rc_via_alen; u8 rc_via[MAX_VIA_ALEN]; u32 rc_label; + u8 rc_ttl_propagate; u8 rc_output_labels; u32 rc_output_label[MAX_NEW_LABELS]; u32 rc_nlflags; @@ -430,20 +471,27 @@ struct mpls_route_config { int rc_mp_len; }; -static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen) +/* all nexthops within a route have the same size based on max + * number of labels and max via length for a hop + */ +static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels) { - u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN); + u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen); struct mpls_route *rt; + size_t size; - rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh), - VIA_ALEN_ALIGN) + - num_nh * max_alen_aligned, - GFP_KERNEL); - if (rt) { - rt->rt_nhn = num_nh; - rt->rt_nhn_alive = num_nh; - rt->rt_max_alen = max_alen_aligned; - } + size = sizeof(*rt) + num_nh * nh_size; + if (size > MAX_MPLS_ROUTE_MEM) + return ERR_PTR(-EINVAL); + + rt = kzalloc(size, GFP_KERNEL); + if (!rt) + return ERR_PTR(-ENOMEM); + + rt->rt_nhn = num_nh; + rt->rt_nhn_alive = num_nh; + rt->rt_nh_size = nh_size; + rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels); return rt; } @@ -648,9 +696,6 @@ static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg, return -ENOMEM; err = -EINVAL; - /* Ensure only a supported number of labels are present */ - if (cfg->rc_output_labels > MAX_NEW_LABELS) - goto errout; nh->nh_labels = cfg->rc_output_labels; for (i = 0; i < nh->nh_labels; i++) @@ -675,7 +720,7 @@ static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg, static int mpls_nh_build(struct net *net, struct mpls_route *rt, struct mpls_nh *nh, int oif, struct nlattr *via, - struct nlattr *newdst) + struct nlattr *newdst, u8 max_labels) { int err = -ENOMEM; @@ -683,7 +728,7 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt, goto errout; if (newdst) { - err = nla_get_labels(newdst, MAX_NEW_LABELS, + err = nla_get_labels(newdst, max_labels, &nh->nh_labels, nh->nh_label); if (err) goto errout; @@ -708,22 +753,20 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt, return err; } -static int mpls_count_nexthops(struct rtnexthop *rtnh, int len, - u8 cfg_via_alen, u8 *max_via_alen) +static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len, + u8 cfg_via_alen, u8 *max_via_alen, + u8 *max_labels) { - int nhs = 0; int remaining = len; - - if (!rtnh) { - *max_via_alen = cfg_via_alen; - return 1; - } + u8 nhs = 0; *max_via_alen = 0; + *max_labels = 0; while (rtnh_ok(rtnh, remaining)) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); int attrlen; + u8 n_labels = 0; attrlen = rtnh_attrlen(rtnh); nla = nla_find(attrs, attrlen, RTA_VIA); @@ -737,7 +780,20 @@ static int mpls_count_nexthops(struct rtnexthop *rtnh, int len, via_alen); } + nla = nla_find(attrs, attrlen, RTA_NEWDST); + if (nla && + nla_get_labels(nla, MAX_NEW_LABELS, &n_labels, NULL) != 0) + return 0; + + *max_labels = max_t(u8, *max_labels, n_labels); + + /* number of nexthops is tracked by a u8. + * Check for overflow. + */ + if (nhs == 255) + return 0; nhs++; + rtnh = rtnh_next(rtnh, &remaining); } @@ -746,13 +802,13 @@ static int mpls_count_nexthops(struct rtnexthop *rtnh, int len, } static int mpls_nh_build_multi(struct mpls_route_config *cfg, - struct mpls_route *rt) + struct mpls_route *rt, u8 max_labels) { struct rtnexthop *rtnh = cfg->rc_mp; struct nlattr *nla_via, *nla_newdst; int remaining = cfg->rc_mp_len; - int nhs = 0; int err = 0; + u8 nhs = 0; change_nexthops(rt) { int attrlen; @@ -779,7 +835,8 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, } err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, - rtnh->rtnh_ifindex, nla_via, nla_newdst); + rtnh->rtnh_ifindex, nla_via, nla_newdst, + max_labels); if (err) goto errout; @@ -806,7 +863,8 @@ static int mpls_route_add(struct mpls_route_config *cfg) int err = -EINVAL; u8 max_via_alen; unsigned index; - int nhs; + u8 max_labels; + u8 nhs; index = cfg->rc_label; @@ -844,21 +902,32 @@ static int mpls_route_add(struct mpls_route_config *cfg) goto errout; err = -EINVAL; - nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len, - cfg->rc_via_alen, &max_via_alen); + if (cfg->rc_mp) { + nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len, + cfg->rc_via_alen, &max_via_alen, + &max_labels); + } else { + max_via_alen = cfg->rc_via_alen; + max_labels = cfg->rc_output_labels; + nhs = 1; + } + if (nhs == 0) goto errout; err = -ENOMEM; - rt = mpls_rt_alloc(nhs, max_via_alen); - if (!rt) + rt = mpls_rt_alloc(nhs, max_via_alen, max_labels); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); goto errout; + } rt->rt_protocol = cfg->rc_protocol; rt->rt_payload_type = cfg->rc_payload_type; + rt->rt_ttl_propagate = cfg->rc_ttl_propagate; if (cfg->rc_mp) - err = mpls_nh_build_multi(cfg, rt); + err = mpls_nh_build_multi(cfg, rt, max_labels); else err = mpls_nh_build_from_cfg(cfg, rt); if (err) @@ -1011,8 +1080,8 @@ static int mpls_netconf_msgsize_devconf(int type) return size; } -static void mpls_netconf_notify_devconf(struct net *net, int type, - struct mpls_dev *mdev) +static void mpls_netconf_notify_devconf(struct net *net, int event, + int type, struct mpls_dev *mdev) { struct sk_buff *skb; int err = -ENOBUFS; @@ -1021,8 +1090,7 @@ static void mpls_netconf_notify_devconf(struct net *net, int type, if (!skb) goto errout; - err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF, - 0, type); + err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type); if (err < 0) { /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); @@ -1042,7 +1110,8 @@ static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = { }; static int mpls_netconf_get_devconf(struct sk_buff *in_skb, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX + 1]; @@ -1054,7 +1123,7 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb, int err; err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, - devconf_mpls_policy); + devconf_mpls_policy, NULL); if (err < 0) goto errout; @@ -1155,9 +1224,8 @@ static int mpls_conf_proc(struct ctl_table *ctl, int write, if (i == offsetof(struct mpls_dev, input_enabled) && val != oval) { - mpls_netconf_notify_devconf(net, - NETCONFA_INPUT, - mdev); + mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, + NETCONFA_INPUT, mdev); } } @@ -1198,10 +1266,11 @@ static int mpls_dev_sysctl_register(struct net_device *dev, snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name); - mdev->sysctl = register_net_sysctl(dev_net(dev), path, table); + mdev->sysctl = register_net_sysctl(net, path, table); if (!mdev->sysctl) goto free; + mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev); return 0; free: @@ -1210,13 +1279,17 @@ static int mpls_dev_sysctl_register(struct net_device *dev, return -ENOBUFS; } -static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev) +static void mpls_dev_sysctl_unregister(struct net_device *dev, + struct mpls_dev *mdev) { + struct net *net = dev_net(dev); struct ctl_table *table; table = mdev->sysctl->ctl_table_arg; unregister_net_sysctl_table(mdev->sysctl); kfree(table); + + mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev); } static struct mpls_dev *mpls_add_dev(struct net_device *dev) @@ -1242,11 +1315,12 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) u64_stats_init(&mpls_stats->syncp); } + mdev->dev = dev; + err = mpls_dev_sysctl_register(dev, mdev); if (err) goto free; - mdev->dev = dev; rcu_assign_pointer(dev->mpls_ptr, mdev); return mdev; @@ -1269,8 +1343,7 @@ static void mpls_ifdown(struct net_device *dev, int event) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); - unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN; - unsigned int alive; + u8 alive, deleted; unsigned index; platform_label = rtnl_dereference(net->mpls.platform_label); @@ -1281,36 +1354,48 @@ static void mpls_ifdown(struct net_device *dev, int event) continue; alive = 0; + deleted = 0; change_nexthops(rt) { + unsigned int nh_flags = nh->nh_flags; + if (rtnl_dereference(nh->nh_dev) != dev) goto next; switch (event) { case NETDEV_DOWN: case NETDEV_UNREGISTER: - nh->nh_flags |= RTNH_F_DEAD; + nh_flags |= RTNH_F_DEAD; /* fall through */ case NETDEV_CHANGE: - nh->nh_flags |= RTNH_F_LINKDOWN; + nh_flags |= RTNH_F_LINKDOWN; break; } if (event == NETDEV_UNREGISTER) RCU_INIT_POINTER(nh->nh_dev, NULL); + + if (nh->nh_flags != nh_flags) + WRITE_ONCE(nh->nh_flags, nh_flags); next: - if (!(nh->nh_flags & nh_flags)) + if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))) alive++; + if (!rtnl_dereference(nh->nh_dev)) + deleted++; } endfor_nexthops(rt); WRITE_ONCE(rt->rt_nhn_alive, alive); + + /* if there are no more nexthops, delete the route */ + if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn) + mpls_route_update(net, index, NULL, NULL); } } -static void mpls_ifup(struct net_device *dev, unsigned int nh_flags) +static void mpls_ifup(struct net_device *dev, unsigned int flags) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); unsigned index; - int alive; + u8 alive; platform_label = rtnl_dereference(net->mpls.platform_label); for (index = 0; index < net->mpls.platform_labels; index++) { @@ -1321,20 +1406,22 @@ static void mpls_ifup(struct net_device *dev, unsigned int nh_flags) alive = 0; change_nexthops(rt) { + unsigned int nh_flags = nh->nh_flags; struct net_device *nh_dev = rtnl_dereference(nh->nh_dev); - if (!(nh->nh_flags & nh_flags)) { + if (!(nh_flags & flags)) { alive++; continue; } if (nh_dev != dev) continue; alive++; - nh->nh_flags &= ~nh_flags; + nh_flags &= ~flags; + WRITE_ONCE(nh->nh_flags, flags); } endfor_nexthops(rt); - ACCESS_ONCE(rt->rt_nhn_alive) = alive; + WRITE_ONCE(rt->rt_nhn_alive, alive); } } @@ -1385,7 +1472,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, mpls_ifdown(dev, event); mdev = mpls_dev_get(dev); if (mdev) { - mpls_dev_sysctl_unregister(mdev); + mpls_dev_sysctl_unregister(dev, mdev); RCU_INIT_POINTER(dev->mpls_ptr, NULL); call_rcu(&mdev->rcu, mpls_dev_destroy_rcu); } @@ -1395,7 +1482,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, if (mdev) { int err; - mpls_dev_sysctl_unregister(mdev); + mpls_dev_sysctl_unregister(dev, mdev); err = mpls_dev_sysctl_register(dev, mdev); if (err) return notifier_from_errno(err); @@ -1455,16 +1542,18 @@ int nla_put_labels(struct sk_buff *skb, int attrtype, EXPORT_SYMBOL_GPL(nla_put_labels); int nla_get_labels(const struct nlattr *nla, - u32 max_labels, u8 *labels, u32 label[]) + u8 max_labels, u8 *labels, u32 label[]) { unsigned len = nla_len(nla); - unsigned nla_labels; struct mpls_shim_hdr *nla_label; + u8 nla_labels; bool bos; int i; - /* len needs to be an even multiple of 4 (the label size) */ - if (len & 3) + /* len needs to be an even multiple of 4 (the label size). Number + * of labels is a u8 so check for overflow. + */ + if (len & 3 || len / 4 > 255) return -EINVAL; /* Limit the number of new labels allowed */ @@ -1472,6 +1561,10 @@ int nla_get_labels(const struct nlattr *nla, if (nla_labels > max_labels) return -EINVAL; + /* when label == NULL, caller wants number of labels */ + if (!label) + goto out; + nla_label = nla_data(nla); bos = true; for (i = nla_labels - 1; i >= 0; i--, bos = false) { @@ -1495,6 +1588,7 @@ int nla_get_labels(const struct nlattr *nla, label[i] = dec.label; } +out: *labels = nla_labels; return 0; } @@ -1550,13 +1644,13 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, int index; int err; - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy, + NULL); if (err < 0) goto errout; err = -EINVAL; rtm = nlmsg_data(nlh); - memset(cfg, 0, sizeof(*cfg)); if (rtm->rtm_family != AF_MPLS) goto errout; @@ -1584,6 +1678,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->rc_label = LABEL_NOT_SPECIFIED; cfg->rc_protocol = rtm->rtm_protocol; cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC; + cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT; cfg->rc_nlflags = nlh->nlmsg_flags; cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->rc_nlinfo.nlh = nlh; @@ -1630,6 +1725,17 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->rc_mp_len = nla_len(nla); break; } + case RTA_TTL_PROPAGATE: + { + u8 ttl_propagate = nla_get_u8(nla); + + if (ttl_propagate > 1) + goto errout; + cfg->rc_ttl_propagate = ttl_propagate ? + MPLS_TTL_PROP_ENABLED : + MPLS_TTL_PROP_DISABLED; + break; + } default: /* Unsupported attribute */ goto errout; @@ -1641,29 +1747,47 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } -static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { - struct mpls_route_config cfg; + struct mpls_route_config *cfg; int err; - err = rtm_to_route_config(skb, nlh, &cfg); + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + err = rtm_to_route_config(skb, nlh, cfg); if (err < 0) - return err; + goto out; + + err = mpls_route_del(cfg); +out: + kfree(cfg); - return mpls_route_del(&cfg); + return err; } -static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) +static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { - struct mpls_route_config cfg; + struct mpls_route_config *cfg; int err; - err = rtm_to_route_config(skb, nlh, &cfg); + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return -ENOMEM; + + err = rtm_to_route_config(skb, nlh, cfg); if (err < 0) - return err; + goto out; - return mpls_route_add(&cfg); + err = mpls_route_add(cfg); +out: + kfree(cfg); + + return err; } static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, @@ -1690,6 +1814,15 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, if (nla_put_labels(skb, RTA_DST, 1, &label)) goto nla_put_failure; + + if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) { + bool ttl_propagate = + rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED; + + if (nla_put_u8(skb, RTA_TTL_PROPAGATE, + ttl_propagate)) + goto nla_put_failure; + } if (rt->rt_nhn == 1) { const struct mpls_nh *nh = rt->rt_nh; @@ -1711,21 +1844,23 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, } else { struct rtnexthop *rtnh; struct nlattr *mp; - int dead = 0; - int linkdown = 0; + u8 linkdown = 0; + u8 dead = 0; mp = nla_nest_start(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; for_nexthops(rt) { + dev = rtnl_dereference(nh->nh_dev); + if (!dev) + continue; + rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); if (!rtnh) goto nla_put_failure; - dev = rtnl_dereference(nh->nh_dev); - if (dev) - rtnh->rtnh_ifindex = dev->ifindex; + rtnh->rtnh_ifindex = dev->ifindex; if (nh->nh_flags & RTNH_F_LINKDOWN) { rtnh->rtnh_flags |= RTNH_F_LINKDOWN; linkdown++; @@ -1800,7 +1935,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) { size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) - + nla_total_size(4); /* RTA_DST */ + + nla_total_size(4) /* RTA_DST */ + + nla_total_size(1); /* RTA_TTL_PROPAGATE */ if (rt->rt_nhn == 1) { struct mpls_nh *nh = rt->rt_nh; @@ -1816,6 +1952,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) size_t nhsize = 0; for_nexthops(rt) { + if (!rtnl_dereference(nh->nh_dev)) + continue; nhsize += nla_total_size(sizeof(struct rtnexthop)); /* RTA_VIA */ if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) @@ -1878,12 +2016,13 @@ static int resize_platform_label_table(struct net *net, size_t limit) /* In case the predefined labels need to be populated */ if (limit > MPLS_LABEL_IPV4NULL) { struct net_device *lo = net->loopback_dev; - rt0 = mpls_rt_alloc(1, lo->addr_len); - if (!rt0) + rt0 = mpls_rt_alloc(1, lo->addr_len, 0); + if (IS_ERR(rt0)) goto nort0; RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo); rt0->rt_protocol = RTPROT_KERNEL; rt0->rt_payload_type = MPT_IPV4; + rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE; rt0->rt_nh->nh_via_alen = lo->addr_len; memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr, @@ -1891,12 +2030,13 @@ static int resize_platform_label_table(struct net *net, size_t limit) } if (limit > MPLS_LABEL_IPV6NULL) { struct net_device *lo = net->loopback_dev; - rt2 = mpls_rt_alloc(1, lo->addr_len); - if (!rt2) + rt2 = mpls_rt_alloc(1, lo->addr_len, 0); + if (IS_ERR(rt2)) goto nort2; RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo); rt2->rt_protocol = RTPROT_KERNEL; rt2->rt_payload_type = MPT_IPV6; + rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE; rt2->rt_nh->nh_via_alen = lo->addr_len; memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr, @@ -1978,6 +2118,9 @@ static int mpls_platform_labels(struct ctl_table *table, int write, return ret; } +#define MPLS_NS_SYSCTL_OFFSET(field) \ + (&((struct net *)0)->field) + static const struct ctl_table mpls_table[] = { { .procname = "platform_labels", @@ -1986,21 +2129,47 @@ static const struct ctl_table mpls_table[] = { .mode = 0644, .proc_handler = mpls_platform_labels, }, + { + .procname = "ip_ttl_propagate", + .data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate), + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "default_ttl", + .data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl), + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &ttl_max, + }, { } }; static int mpls_net_init(struct net *net) { struct ctl_table *table; + int i; net->mpls.platform_labels = 0; net->mpls.platform_label = NULL; + net->mpls.ip_ttl_propagate = 1; + net->mpls.default_ttl = 255; table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL); if (table == NULL) return -ENOMEM; - table[0].data = net; + /* Table data contains only offsets relative to the base of + * the mdev at this point, so make them absolute. + */ + for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++) + table[i].data = (char *)net + (uintptr_t)table[i].data; + net->mpls.ctl = register_net_sysctl(net, "net/mpls", table); if (net->mpls.ctl == NULL) { kfree(table); diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 76360d8b95798e148c5d6a48ce3375eeadcad5a5..4db6a59713220dcefac8be970ab06b1ae25bcd5c 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -2,6 +2,11 @@ #define MPLS_INTERNAL_H #include +/* put a reasonable limit on the number of labels + * we will accept from userspace + */ +#define MAX_NEW_LABELS 30 + struct mpls_entry_decoded { u32 label; u8 ttl; @@ -64,7 +69,6 @@ struct mpls_dev { struct sk_buff; #define LABEL_NOT_SPECIFIED (1 << 20) -#define MAX_NEW_LABELS 2 /* This maximum ha length copied from the definition of struct neighbour */ #define VIA_ALEN_ALIGN sizeof(unsigned long) @@ -83,11 +87,35 @@ enum mpls_payload_type { struct mpls_nh { /* next hop label forwarding entry */ struct net_device __rcu *nh_dev; + + /* nh_flags is accessed under RCU in the packet path; it is + * modified handling netdev events with rtnl lock held + */ unsigned int nh_flags; - u32 nh_label[MAX_NEW_LABELS]; u8 nh_labels; u8 nh_via_alen; u8 nh_via_table; + u8 nh_reserved1; + + u32 nh_label[0]; +}; + +/* offset of via from beginning of mpls_nh */ +#define MPLS_NH_VIA_OFF(num_labels) \ + ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \ + VIA_ALEN_ALIGN) + +/* all nexthops within a route have the same size based on the + * max number of labels and max via length across all nexthops + */ +#define MPLS_NH_SIZE(num_labels, max_via_alen) \ + (MPLS_NH_VIA_OFF((num_labels)) + \ + ALIGN((max_via_alen), VIA_ALEN_ALIGN)) + +enum mpls_ttl_propagation { + MPLS_TTL_PROP_DEFAULT, + MPLS_TTL_PROP_ENABLED, + MPLS_TTL_PROP_DISABLED, }; /* The route, nexthops and vias are stored together in the same memory @@ -98,16 +126,16 @@ struct mpls_nh { /* next hop label forwarding entry */ * +----------------------+ * | mpls_nh 0 | * +----------------------+ - * | ... | - * +----------------------+ - * | mpls_nh n-1 | - * +----------------------+ - * | alignment padding | + * | alignment padding | 4 bytes for odd number of labels * +----------------------+ * | via[rt_max_alen] 0 | * +----------------------+ + * | alignment padding | via's aligned on sizeof(unsigned long) + * +----------------------+ * | ... | * +----------------------+ + * | mpls_nh n-1 | + * +----------------------+ * | via[rt_max_alen] n-1 | * +----------------------+ */ @@ -116,22 +144,30 @@ struct mpls_route { /* next hop label forwarding entry */ u8 rt_protocol; u8 rt_payload_type; u8 rt_max_alen; - unsigned int rt_nhn; - unsigned int rt_nhn_alive; + u8 rt_ttl_propagate; + u8 rt_nhn; + /* rt_nhn_alive is accessed under RCU in the packet path; it + * is modified handling netdev events with rtnl lock held + */ + u8 rt_nhn_alive; + u8 rt_nh_size; + u8 rt_via_offset; + u8 rt_reserved1; struct mpls_nh rt_nh[0]; }; #define for_nexthops(rt) { \ - int nhsel; struct mpls_nh *nh; \ - for (nhsel = 0, nh = (rt)->rt_nh; \ + int nhsel; struct mpls_nh *nh; u8 *__nh; \ + for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \ nhsel < (rt)->rt_nhn; \ - nh++, nhsel++) + __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) #define change_nexthops(rt) { \ - int nhsel; struct mpls_nh *nh; \ - for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh); \ + int nhsel; struct mpls_nh *nh; u8 *__nh; \ + for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \ + __nh = (u8 *)((rt)->rt_nh); \ nhsel < (rt)->rt_nhn; \ - nh++, nhsel++) + __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++) #define endfor_nexthops(rt) } @@ -166,7 +202,7 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]); -int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels, +int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels, u32 label[]); int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table, u8 via[]); diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index e4e4424f9eb1f5531d22463687d74c2e2ca971a6..369c7a23c86c1407702917488c90e0f3e7f8ee1a 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -29,6 +29,7 @@ static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = { [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 }, + [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 }, }; static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en) @@ -49,6 +50,7 @@ static int mpls_xmit(struct sk_buff *skb) struct rtable *rt = NULL; struct rt6_info *rt6 = NULL; struct mpls_dev *out_mdev; + struct net *net; int err = 0; bool bos; int i; @@ -56,17 +58,7 @@ static int mpls_xmit(struct sk_buff *skb) /* Find the output device */ out_dev = dst->dev; - - /* Obtain the ttl */ - if (dst->ops->family == AF_INET) { - ttl = ip_hdr(skb)->ttl; - rt = (struct rtable *)dst; - } else if (dst->ops->family == AF_INET6) { - ttl = ipv6_hdr(skb)->hop_limit; - rt6 = (struct rt6_info *)dst; - } else { - goto drop; - } + net = dev_net(out_dev); skb_orphan(skb); @@ -78,6 +70,38 @@ static int mpls_xmit(struct sk_buff *skb) tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate); + /* Obtain the ttl using the following set of rules. + * + * LWT ttl propagation setting: + * - disabled => use default TTL value from LWT + * - enabled => use TTL value from IPv4/IPv6 header + * - default => + * Global ttl propagation setting: + * - disabled => use default TTL value from global setting + * - enabled => use TTL value from IPv4/IPv6 header + */ + if (dst->ops->family == AF_INET) { + if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED) + ttl = tun_encap_info->default_ttl; + else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT && + !net->mpls.ip_ttl_propagate) + ttl = net->mpls.default_ttl; + else + ttl = ip_hdr(skb)->ttl; + rt = (struct rtable *)dst; + } else if (dst->ops->family == AF_INET6) { + if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED) + ttl = tun_encap_info->default_ttl; + else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT && + !net->mpls.ip_ttl_propagate) + ttl = net->mpls.default_ttl; + else + ttl = ipv6_hdr(skb)->hop_limit; + rt6 = (struct rt6_info *)dst; + } else { + goto drop; + } + /* Verify the destination can hold the packet */ new_header_size = mpls_encap_size(tun_encap_info); mtu = mpls_dev_mtu(out_dev); @@ -140,10 +164,11 @@ static int mpls_build_state(struct nlattr *nla, struct mpls_iptunnel_encap *tun_encap_info; struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1]; struct lwtunnel_state *newts; + u8 n_labels; int ret; ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla, - mpls_iptunnel_policy); + mpls_iptunnel_policy, NULL); if (ret < 0) return ret; @@ -151,15 +176,32 @@ static int mpls_build_state(struct nlattr *nla, return -EINVAL; - newts = lwtunnel_state_alloc(sizeof(*tun_encap_info)); + /* determine number of labels */ + if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], + MAX_NEW_LABELS, &n_labels, NULL)) + return -EINVAL; + + newts = lwtunnel_state_alloc(sizeof(*tun_encap_info) + + n_labels * sizeof(u32)); if (!newts) return -ENOMEM; tun_encap_info = mpls_lwtunnel_encap(newts); - ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS, + ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels, &tun_encap_info->labels, tun_encap_info->label); if (ret) goto errout; + + tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT; + + if (tb[MPLS_IPTUNNEL_TTL]) { + tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]); + /* TTL 0 implies propagate from IP header */ + tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ? + MPLS_TTL_PROP_DISABLED : + MPLS_TTL_PROP_ENABLED; + } + newts->type = LWTUNNEL_ENCAP_MPLS; newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT; newts->headroom = mpls_encap_size(tun_encap_info); @@ -186,6 +228,10 @@ static int mpls_fill_encap_info(struct sk_buff *skb, tun_encap_info->label)) goto nla_put_failure; + if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT && + nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -195,10 +241,16 @@ static int mpls_fill_encap_info(struct sk_buff *skb, static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate) { struct mpls_iptunnel_encap *tun_encap_info; + int nlsize; tun_encap_info = mpls_lwtunnel_encap(lwtstate); - return nla_total_size(tun_encap_info->labels * 4); + nlsize = nla_total_size(tun_encap_info->labels * 4); + + if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT) + nlsize += nla_total_size(1); + + return nlsize; } static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) @@ -207,10 +259,12 @@ static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b); int l; - if (a_hdr->labels != b_hdr->labels) + if (a_hdr->labels != b_hdr->labels || + a_hdr->ttl_propagate != b_hdr->ttl_propagate || + a_hdr->default_ttl != b_hdr->default_ttl) return 1; - for (l = 0; l < MAX_NEW_LABELS; l++) + for (l = 0; l < a_hdr->labels; l++) if (a_hdr->label[l] != b_hdr->label[l]) return 1; return 0; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index a87a6f8a74d8c90b2e6801a9d391ced7084dd15f..552d606e57ca4aac6bfe5ad26c42c385a431ab68 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -126,14 +126,15 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) } EXPORT_SYMBOL(nf_register_net_hook); -void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) +static struct nf_hook_entry * +__nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) { struct nf_hook_entry __rcu **pp; struct nf_hook_entry *p; pp = nf_hook_entry_head(net, reg); if (WARN_ON_ONCE(!pp)) - return; + return NULL; mutex_lock(&nf_hook_mutex); for (; (p = nf_entry_dereference(*pp)) != NULL; pp = &p->next) { @@ -145,7 +146,7 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) mutex_unlock(&nf_hook_mutex); if (!p) { WARN(1, "nf_unregister_net_hook: hook not found!\n"); - return; + return NULL; } #ifdef CONFIG_NETFILTER_INGRESS if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) @@ -154,10 +155,24 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) #ifdef HAVE_JUMP_LABEL static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); #endif + + return p; +} + +void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) +{ + struct nf_hook_entry *p = __nf_unregister_net_hook(net, reg); + unsigned int nfq; + + if (!p) + return; + synchronize_net(); - nf_queue_nf_hook_drop(net, p); + /* other cpu might still process nfqueue verdict that used reg */ - synchronize_net(); + nfq = nf_queue_nf_hook_drop(net); + if (nfq) + synchronize_net(); kfree(p); } EXPORT_SYMBOL(nf_unregister_net_hook); @@ -183,10 +198,32 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, EXPORT_SYMBOL(nf_register_net_hooks); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, - unsigned int n) + unsigned int hookcount) { - while (n-- > 0) - nf_unregister_net_hook(net, ®[n]); + struct nf_hook_entry *to_free[16]; + unsigned int i, n, nfq; + + do { + n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free)); + + for (i = 0; i < n; i++) + to_free[i] = __nf_unregister_net_hook(net, ®[i]); + + synchronize_net(); + + /* need 2nd synchronize_net() if nfqueue is used, skb + * can get reinjected right before nf_queue_hook_drop() + */ + nfq = nf_queue_nf_hook_drop(net); + if (nfq) + synchronize_net(); + + for (i = 0; i < n; i++) + kfree(to_free[i]); + + reg += n; + hookcount -= n; + } while (hookcount > 0); } EXPORT_SYMBOL(nf_unregister_net_hooks); diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index 6f09a99298cdf6aaf18c1ff947c76db2d4985865..8ad2b52a0b328249688656c1fa80a7135b932c02 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -232,7 +232,7 @@ mtype_list(const struct ip_set *set, if (!test_bit(id, map->members) || (SET_WITH_TIMEOUT(set) && #ifdef IP_SET_BITMAP_STORED_TIMEOUT - mtype_is_filled((const struct mtype_elem *)x) && + mtype_is_filled(x) && #endif ip_set_timeout_expired(ext_timeout(x, set)))) continue; @@ -248,8 +248,7 @@ mtype_list(const struct ip_set *set, } if (mtype_do_list(skb, map, id, set->dsize)) goto nla_put_failure; - if (ip_set_put_extensions(skb, set, x, - mtype_is_filled((const struct mtype_elem *)x))) + if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x))) goto nla_put_failure; ipset_nest_end(skb, nested); } diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index c296f9b606d495d59c50ee46a04e7e17b21c1530..ba6a5516dc7c724b172ee13c0456b461c47f7281 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -295,7 +295,8 @@ ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr) if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; - if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) + if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, + ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) return -IPSET_ERR_PROTOCOL; @@ -313,7 +314,8 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr) if (unlikely(!flag_nested(nla))) return -IPSET_ERR_PROTOCOL; - if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) + if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, + ipaddr_policy, NULL)) return -IPSET_ERR_PROTOCOL; if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) return -IPSET_ERR_PROTOCOL; @@ -500,14 +502,6 @@ __ip_set_put(struct ip_set *set) /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need * a separate reference counter */ -static inline void -__ip_set_get_netlink(struct ip_set *set) -{ - write_lock_bh(&ip_set_ref_lock); - set->ref_netlink++; - write_unlock_bh(&ip_set_ref_lock); -} - static inline void __ip_set_put_netlink(struct ip_set *set) { @@ -769,7 +763,7 @@ start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8), + nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), sizeof(*nfmsg), flags); if (!nlh) return NULL; @@ -906,7 +900,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl, /* Without holding any locks, create private part. */ if (attr[IPSET_ATTR_DATA] && nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], - set->type->create_policy)) { + set->type->create_policy, NULL)) { ret = -IPSET_ERR_PROTOCOL; goto put_out; } @@ -1257,8 +1251,8 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst) ip_set_id_t index; /* Second pass, so parser can't fail */ - nla_parse(cda, IPSET_ATTR_CMD_MAX, - attr, nlh->nlmsg_len - min_len, ip_set_setname_policy); + nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len, + ip_set_setname_policy, NULL); if (cda[IPSET_ATTR_SETNAME]) { struct ip_set *set; @@ -1305,7 +1299,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) * manually :-( */ if (nlh->nlmsg_flags & NLM_F_ACK) - netlink_ack(cb->skb, nlh, ret); + netlink_ack(cb->skb, nlh, ret, NULL); return ret; } } @@ -1501,9 +1495,8 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, memcpy(&errmsg->msg, nlh, nlh->nlmsg_len); cmdattr = (void *)&errmsg->msg + min_len; - nla_parse(cda, IPSET_ATTR_CMD_MAX, - cmdattr, nlh->nlmsg_len - min_len, - ip_set_adt_policy); + nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr, + nlh->nlmsg_len - min_len, ip_set_adt_policy, NULL); errline = nla_data(cda[IPSET_ATTR_LINENO]); @@ -1549,7 +1542,7 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb, if (attr[IPSET_ATTR_DATA]) { if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], - set->type->adt_policy)) + set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags, use_lineno); @@ -1561,7 +1554,7 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb, if (nla_type(nla) != IPSET_ATTR_DATA || !flag_nested(nla) || nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, - set->type->adt_policy)) + set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags, use_lineno); @@ -1603,7 +1596,7 @@ static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb, if (attr[IPSET_ATTR_DATA]) { if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], - set->type->adt_policy)) + set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags, use_lineno); @@ -1615,7 +1608,7 @@ static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb, if (nla_type(nla) != IPSET_ATTR_DATA || !flag_nested(nla) || nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, - set->type->adt_policy)) + set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags, use_lineno); @@ -1646,7 +1639,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb, return -ENOENT; if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], - set->type->adt_policy)) + set->type->adt_policy, NULL)) return -IPSET_ERR_PROTOCOL; rcu_read_lock_bh(); @@ -1915,7 +1908,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) ret = -EFAULT; goto done; } - op = (unsigned int *)data; + op = data; if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ @@ -2013,7 +2006,7 @@ static struct nf_sockopt_ops so_set __read_mostly = { .pf = PF_INET, .get_optmin = SO_IP_SET, .get_optmax = SO_IP_SET + 1, - .get = &ip_set_sockfn_get, + .get = ip_set_sockfn_get, .owner = THIS_MODULE, }; diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index e6a2753dff9e91dac406e657ad0d49875f052503..3d2ac71a83ec411294361037e7b1d77b6d4bb7e2 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -181,7 +181,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) if (!(cp->flags & IP_VS_CONN_F_HASHED)) { cp->flags |= IP_VS_CONN_F_HASHED; - atomic_inc(&cp->refcnt); + refcount_inc(&cp->refcnt); hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); ret = 1; } else { @@ -215,7 +215,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) if (cp->flags & IP_VS_CONN_F_HASHED) { hlist_del_rcu(&cp->c_list); cp->flags &= ~IP_VS_CONN_F_HASHED; - atomic_dec(&cp->refcnt); + refcount_dec(&cp->refcnt); ret = 1; } else ret = 0; @@ -242,13 +242,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) if (cp->flags & IP_VS_CONN_F_HASHED) { ret = false; /* Decrease refcnt and unlink conn only if we are last user */ - if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) { + if (refcount_dec_if_one(&cp->refcnt)) { hlist_del_rcu(&cp->c_list); cp->flags &= ~IP_VS_CONN_F_HASHED; ret = true; } } else - ret = atomic_read(&cp->refcnt) ? false : true; + ret = refcount_read(&cp->refcnt) ? false : true; spin_unlock(&cp->lock); ct_write_unlock_bh(hash); @@ -475,7 +475,7 @@ static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp) void ip_vs_conn_put(struct ip_vs_conn *cp) { if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && - (atomic_read(&cp->refcnt) == 1) && + (refcount_read(&cp->refcnt) == 1) && !timer_pending(&cp->timer)) /* expire connection immediately */ __ip_vs_conn_put_notimer(cp); @@ -617,8 +617,8 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, - cp->flags, atomic_read(&cp->refcnt), - atomic_read(&dest->refcnt)); + cp->flags, refcount_read(&cp->refcnt), + refcount_read(&dest->refcnt)); /* Update the connection counters */ if (!(flags & IP_VS_CONN_F_TEMPLATE)) { @@ -714,8 +714,8 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, - cp->flags, atomic_read(&cp->refcnt), - atomic_read(&dest->refcnt)); + cp->flags, refcount_read(&cp->refcnt), + refcount_read(&dest->refcnt)); /* Update the connection counters */ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { @@ -863,10 +863,10 @@ static void ip_vs_conn_expire(unsigned long data) expire_later: IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n", - atomic_read(&cp->refcnt), + refcount_read(&cp->refcnt), atomic_read(&cp->n_control)); - atomic_inc(&cp->refcnt); + refcount_inc(&cp->refcnt); cp->timeout = 60*HZ; if (ipvs->sync_state & IP_VS_STATE_MASTER) @@ -941,7 +941,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, * it in the table, so that other thread run ip_vs_random_dropentry * but cannot drop this entry. */ - atomic_set(&cp->refcnt, 1); + refcount_set(&cp->refcnt, 1); cp->control = NULL; atomic_set(&cp->n_control, 0); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index db40050f8785eb9205a7bf493a71c9b956b93ab8..d2d7bdf1d5104b6e68284bbbb533c30f159844e2 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -542,7 +542,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), - cp->flags, atomic_read(&cp->refcnt)); + cp->flags, refcount_read(&cp->refcnt)); ip_vs_conn_stats(cp, svc); return cp; @@ -1193,7 +1193,7 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), - cp->flags, atomic_read(&cp->refcnt)); + cp->flags, refcount_read(&cp->refcnt)); LeaveFunction(12); return cp; } @@ -2200,6 +2200,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { static int __net_init __ip_vs_init(struct net *net) { struct netns_ipvs *ipvs; + int ret; ipvs = net_generic(net, ip_vs_net_id); if (ipvs == NULL) @@ -2231,13 +2232,17 @@ static int __net_init __ip_vs_init(struct net *net) if (ip_vs_sync_net_init(ipvs) < 0) goto sync_fail; - printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", - sizeof(struct netns_ipvs), ipvs->gen); + ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); + if (ret < 0) + goto hook_fail; + return 0; /* * Error handling */ +hook_fail: + ip_vs_sync_net_cleanup(ipvs); sync_fail: ip_vs_conn_net_cleanup(ipvs); conn_fail: @@ -2257,6 +2262,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */ ip_vs_conn_net_cleanup(ipvs); ip_vs_app_net_cleanup(ipvs); @@ -2317,24 +2323,16 @@ static int __init ip_vs_init(void) if (ret < 0) goto cleanup_sub; - ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - if (ret < 0) { - pr_err("can't register hooks.\n"); - goto cleanup_dev; - } - ret = ip_vs_register_nl_ioctl(); if (ret < 0) { pr_err("can't register netlink/ioctl.\n"); - goto cleanup_hooks; + goto cleanup_dev; } pr_info("ipvs loaded.\n"); return ret; -cleanup_hooks: - nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); cleanup_dev: unregister_pernet_device(&ipvs_core_dev_ops); cleanup_sub: @@ -2351,7 +2349,6 @@ static int __init ip_vs_init(void) static void __exit ip_vs_cleanup(void) { ip_vs_unregister_nl_ioctl(); - nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); unregister_pernet_device(&ipvs_core_dev_ops); unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ ip_vs_conn_cleanup(); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5aeb0dde6ccc5e525e740ca5fde3ba2c58c50070..668d9643f0cc7a9e410a73f961ec1730fc57033d 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -699,7 +699,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af, dest->vfwmark, IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), - atomic_read(&dest->refcnt)); + refcount_read(&dest->refcnt)); if (dest->af == dest_af && ip_vs_addr_equal(dest_af, &dest->addr, daddr) && dest->port == dport && @@ -934,7 +934,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, atomic_set(&dest->activeconns, 0); atomic_set(&dest->inactconns, 0); atomic_set(&dest->persistconns, 0); - atomic_set(&dest->refcnt, 1); + refcount_set(&dest->refcnt, 1); INIT_HLIST_NODE(&dest->d_list); spin_lock_init(&dest->dst_lock); @@ -998,7 +998,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " "dest->refcnt=%d, service %u/%s:%u\n", IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), - atomic_read(&dest->refcnt), + refcount_read(&dest->refcnt), dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->vaddr), ntohs(dest->vport)); @@ -1074,7 +1074,7 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, spin_lock_bh(&ipvs->dest_trash_lock); IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), - atomic_read(&dest->refcnt)); + refcount_read(&dest->refcnt)); if (list_empty(&ipvs->dest_trash) && !cleanup) mod_timer(&ipvs->dest_trash_timer, jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); @@ -1157,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data) spin_lock(&ipvs->dest_trash_lock); list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { - if (atomic_read(&dest->refcnt) > 1) + if (refcount_read(&dest->refcnt) > 1) continue; if (dest->idle_start) { if (time_before(now, dest->idle_start + @@ -1545,7 +1545,7 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev) dev->name, IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), - atomic_read(&dest->refcnt)); + refcount_read(&dest->refcnt)); __ip_vs_dst_cache_reset(dest); } spin_unlock_bh(&dest->dst_lock); @@ -1774,13 +1774,13 @@ static struct ctl_table vs_vars[] = { .procname = "sync_version", .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_do_sync_mode, + .proc_handler = proc_do_sync_mode, }, { .procname = "sync_ports", .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_do_sync_ports, + .proc_handler = proc_do_sync_ports, }, { .procname = "sync_persist_mode", @@ -2130,8 +2130,8 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v) /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); - seq_printf(seq, - " Conns Packets Packets Bytes Bytes\n"); + seq_puts(seq, + " Conns Packets Packets Bytes Bytes\n"); ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats); seq_printf(seq, "%8LX %8LX %8LX %16LX %16LX\n\n", @@ -2178,8 +2178,8 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); - seq_printf(seq, - "CPU Conns Packets Packets Bytes Bytes\n"); + seq_puts(seq, + "CPU Conns Packets Packets Bytes Bytes\n"); for_each_possible_cpu(i) { struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); @@ -3089,7 +3089,8 @@ static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs, /* Parse mandatory identifying service fields first */ if (nla == NULL || - nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy)) + nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, + ip_vs_svc_policy, NULL)) return -EINVAL; nla_af = attrs[IPVS_SVC_ATTR_AF]; @@ -3251,8 +3252,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb, mutex_lock(&__ip_vs_mutex); /* Try to find the service for which to dump destinations */ - if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, - IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy)) + if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX, + ip_vs_cmd_policy, NULL)) goto out_err; @@ -3288,7 +3289,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, /* Parse mandatory identifying destination fields first */ if (nla == NULL || - nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy)) + nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, + ip_vs_dest_policy, NULL)) return -EINVAL; nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; @@ -3530,7 +3532,7 @@ static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, info->attrs[IPVS_CMD_ATTR_DAEMON], - ip_vs_daemon_policy)) + ip_vs_daemon_policy, info->extack)) goto out; if (cmd == IPVS_CMD_NEW_DAEMON) diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index d30c327bb578981c0868086cfddb6c304419b2e8..fb780be76d15a05c0d66e8a7f5ea58ee32dacdb9 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -260,7 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, buf_len = strlen(buf); ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { + if (ct && (ct->status & IPS_NAT_MASK)) { + bool mangled; + /* If mangling fails this function will return 0 * which will cause the packet to be dropped. * Mangling can only fail under memory pressure, @@ -268,12 +270,13 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, * packet. */ rcu_read_lock(); - ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, - iph->ihl * 4, - start-data, end-start, - buf, buf_len); + mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, + iph->ihl * 4, + start - data, + end - start, + buf, buf_len); rcu_read_unlock(); - if (ret) { + if (mangled) { ip_vs_nfct_expect_related(skb, ct, n_cp, IPPROTO_TCP, 0, 0); if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -482,11 +485,8 @@ static struct pernet_operations ip_vs_ftp_ops = { static int __init ip_vs_ftp_init(void) { - int rv; - - rv = register_pernet_subsys(&ip_vs_ftp_ops); /* rcu_barrier() is called by netns on error */ - return rv; + return register_pernet_subsys(&ip_vs_ftp_ops); } /* diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 5824927cf8e02b7fa02f319177d96219c9427033..b6aa4a970c6e97678e8c88e8fe0e0e0b4e4d476d 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -448,7 +448,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 703f11877beece84cb56ec62d4bd13e87c0d67c3..c13ff575f9f73ab9fb53837ff1b01cd279156c9f 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -204,7 +204,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; } @@ -249,7 +249,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) __func__, IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), atomic_read(&most->activeconns), - atomic_read(&most->refcnt), + refcount_read(&most->refcnt), atomic_read(&most->weight), moh); return most; } @@ -612,7 +612,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index fc230d99aa3b0143f198d7a9ec798344886058ad..6cf3fd81a5eca887ccefe628c2b9d627b40cf055 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c @@ -85,7 +85,7 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conntrack_tuple new_tuple; - if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) || + if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct)) return; @@ -232,7 +232,7 @@ void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, { struct nf_conntrack_expect *exp; - if (ct == NULL || nf_ct_is_untracked(ct)) + if (ct == NULL) return; exp = nf_ct_expect_alloc(ct); diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index a8b63401e7731e6c8fef37b62c43425e2f96b43c..7d9d4ac596ca5809a9322aa3c1276f6dc08f146c 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c @@ -110,7 +110,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 8ae480715ceae79ae07fd4dbfc9bacf424b429b3..ca880a3ad033aec3f55641c3cd485098983a8722 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -193,28 +193,6 @@ ip_vs_create_timeout_table(int *table, int size) } -/* - * Set timeout value for state specified by name - */ -int -ip_vs_set_state_timeout(int *table, int num, const char *const *names, - const char *name, int to) -{ - int i; - - if (!table || !name || !to) - return -EINVAL; - - for (i = 0; i < num; i++) { - if (strcmp(names[i], name)) - continue; - table[i] = to * HZ; - return 0; - } - return -ENOENT; -} - - const char * ip_vs_state_name(__u16 proto, int state) { struct ip_vs_protocol *pp = ip_vs_proto_get(proto); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index d952d67f904d1124ed0c5adfa20a51f82207181c..56f8e4b204ffcc4840a1042097a0f7d0f004df18 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -447,7 +447,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, ntohs(cp->cport), sctp_state_name(cp->state), sctp_state_name(next_state), - atomic_read(&cp->refcnt)); + refcount_read(&cp->refcnt)); if (dest) { if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && (next_state != IP_VS_SCTP_S_ESTABLISHED)) { diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 5117bcb7d2f00604d5ab73f0610246a7d4fef755..12dc8d5bc37d7ea03ba8448514a6d3caf03a62b0 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -557,7 +557,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, ntohs(cp->cport), tcp_state_name(cp->state), tcp_state_name(new_state), - atomic_read(&cp->refcnt)); + refcount_read(&cp->refcnt)); if (dest) { if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 58bacfc461ee6a1d6df4e6e024032fb52a044e35..ee0530d14c5f9d468e199bcb7cd53aad022b748b 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c @@ -97,7 +97,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, "activeconns %d refcnt %d weight %d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), - atomic_read(&dest->refcnt), atomic_read(&dest->weight)); + refcount_read(&dest->refcnt), atomic_read(&dest->weight)); return dest; } diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index f8e2d00f528b945e774564854fc66f53dbc61970..ab23cf203437772407f800861ea68687d78f8ff6 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c @@ -111,7 +111,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index b03c28084f814a9f0b805357e5164ef7b00b0985..0e5b64a75da0521be40831f791eb8f3d7303f70e 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -520,7 +520,7 @@ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && pkts % sync_period != sysctl_sync_threshold(ipvs)) return 0; - } else if (sync_refresh_period <= 0 && + } else if (!sync_refresh_period && pkts != sysctl_sync_threshold(ipvs)) return 0; @@ -1849,7 +1849,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; - ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); + ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; ms = ipvs->ms; @@ -1862,7 +1862,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ms->ipvs = ipvs; } } else { - array = kzalloc(count * sizeof(struct task_struct *), + array = kcalloc(count, sizeof(struct task_struct *), GFP_KERNEL); if (!array) goto out; diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 6b366fd905542ff086a36da4111bd11646d274d8..6add39e0ec20d61d21883082e5079d16cab6942c 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c @@ -83,7 +83,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), - atomic_read(&least->refcnt), + refcount_read(&least->refcnt), atomic_read(&least->weight), loh); return least; diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 17e6d4406ca7c32657eff5e103d0aa1b9317e813..62258dd457ac9825aa14f26bfebb08bb2f2f1755 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -218,7 +218,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, "activeconns %d refcnt %d weight %d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), - atomic_read(&dest->refcnt), + refcount_read(&dest->refcnt), atomic_read(&dest->weight)); mark->cl = dest; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 4e1a98fcc8c3faa3fd037cc0516fd8a3108ab2a9..2eab1e0400f48a5816239cbb95a6b192169137bf 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -775,7 +775,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off, "ip_vs_nat_xmit(): " "stopping DNAT to local address"); @@ -866,7 +866,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off, "ip_vs_nat_xmit_v6(): " "stopping DNAT to local address"); @@ -1338,7 +1338,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { IP_VS_DBG(10, "%s(): " "stopping DNAT to local address %pI4\n", __func__, &cp->daddr.ip); @@ -1429,7 +1429,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { IP_VS_DBG(10, "%s(): " "stopping DNAT to local address %pI6\n", __func__, &cp->daddr.in6); diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index 45da11afa785b2779857b3786881158c821263b3..86691671290519809bf8b39b150c7b040a3c0fe1 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c @@ -55,7 +55,7 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) }; EXPORT_SYMBOL_GPL(seq_print_acct); -static struct nf_ct_ext_type acct_extend __read_mostly = { +static const struct nf_ct_ext_type acct_extend = { .len = sizeof(struct nf_conn_acct), .align = __alignof__(struct nf_conn_acct), .id = NF_CT_EXT_ACCT, diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 57a26cc90c9fada2be251d6718f8b814e059beea..03d2ccffa9fa3c1eecfbc07dcadc420a0ab5e315 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c @@ -207,6 +207,8 @@ static int __init nf_conntrack_amanda_init(void) { int ret, i; + NF_CT_HELPER_BUILD_BUG_ON(0); + for (i = 0; i < ARRAY_SIZE(search); i++) { search[i].ts = textsearch_prepare(ts_algo, search[i].string, search[i].len, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index ffb78e5f7b70912a2bba608a7f32f0a2bc486adc..f9245dbfe4356da65cd9f4e0aaf7fb07ea32b14a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -76,6 +76,7 @@ struct conntrack_gc_work { struct delayed_work dwork; u32 last_bucket; bool exiting; + bool early_drop; long next_gc_run; }; @@ -180,14 +181,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; seqcount_t nf_conntrack_generation __read_mostly; - -/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used - * for the nfctinfo. We cheat by (ab)using the PER CPU cache line - * alignment to enforce this. - */ -DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); -EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); - static unsigned int nf_conntrack_hash_rnd __read_mostly; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, @@ -706,7 +699,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->allow_clash && - !nfct_nat(ct) && + ((ct->status & IPS_NAT_DONE_MASK) == 0) && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { enum ip_conntrack_info oldinfo; @@ -959,10 +952,30 @@ static noinline int early_drop(struct net *net, unsigned int _hash) return false; } +static bool gc_worker_skip_ct(const struct nf_conn *ct) +{ + return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct); +} + +static bool gc_worker_can_early_drop(const struct nf_conn *ct) +{ + const struct nf_conntrack_l4proto *l4proto; + + if (!test_bit(IPS_ASSURED_BIT, &ct->status)) + return true; + + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); + if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) + return true; + + return false; +} + static void gc_worker(struct work_struct *work) { unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); unsigned int i, goal, buckets = 0, expired_count = 0; + unsigned int nf_conntrack_max95 = 0; struct conntrack_gc_work *gc_work; unsigned int ratio, scanned = 0; unsigned long next_run; @@ -971,6 +984,8 @@ static void gc_worker(struct work_struct *work) goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; i = gc_work->last_bucket; + if (gc_work->early_drop) + nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; do { struct nf_conntrack_tuple_hash *h; @@ -987,6 +1002,8 @@ static void gc_worker(struct work_struct *work) i = 0; hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { + struct net *net; + tmp = nf_ct_tuplehash_to_ctrack(h); scanned++; @@ -995,6 +1012,27 @@ static void gc_worker(struct work_struct *work) expired_count++; continue; } + + if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp)) + continue; + + net = nf_ct_net(tmp); + if (atomic_read(&net->ct.count) < nf_conntrack_max95) + continue; + + /* need to take reference to avoid possible races */ + if (!atomic_inc_not_zero(&tmp->ct_general.use)) + continue; + + if (gc_worker_skip_ct(tmp)) { + nf_ct_put(tmp); + continue; + } + + if (gc_worker_can_early_drop(tmp)) + nf_ct_kill(tmp); + + nf_ct_put(tmp); } /* could check get_nulls_value() here and restart if ct @@ -1040,6 +1078,7 @@ static void gc_worker(struct work_struct *work) next_run = gc_work->next_gc_run; gc_work->last_bucket = i; + gc_work->early_drop = false; queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); } @@ -1065,6 +1104,8 @@ __nf_conntrack_alloc(struct net *net, if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash)) { + if (!conntrack_gc_work.early_drop) + conntrack_gc_work.early_drop = true; atomic_dec(&net->ct.count); net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); return ERR_PTR(-ENOMEM); @@ -1133,7 +1174,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ -static struct nf_conntrack_tuple_hash * +static noinline struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, @@ -1241,21 +1282,20 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } -/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */ -static inline struct nf_conn * +/* On success, returns 0, sets skb->_nfct | ctinfo */ +static int resolve_normal_ct(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct nf_conntrack_l3proto *l3proto, - struct nf_conntrack_l4proto *l4proto, - int *set_reply, - enum ip_conntrack_info *ctinfo) + struct nf_conntrack_l4proto *l4proto) { const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; + enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; struct nf_conn *ct; u32 hash; @@ -1264,7 +1304,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, dataoff, l3num, protonum, net, &tuple, l3proto, l4proto)) { pr_debug("Can't get tuple\n"); - return NULL; + return 0; } /* look for tuple match */ @@ -1275,33 +1315,30 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, skb, dataoff, hash); if (!h) - return NULL; + return 0; if (IS_ERR(h)) - return (void *)h; + return PTR_ERR(h); } ct = nf_ct_tuplehash_to_ctrack(h); /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { - *ctinfo = IP_CT_ESTABLISHED_REPLY; - /* Please set reply bit if this packet OK */ - *set_reply = 1; + ctinfo = IP_CT_ESTABLISHED_REPLY; } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { pr_debug("normal packet for %p\n", ct); - *ctinfo = IP_CT_ESTABLISHED; + ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { pr_debug("related packet for %p\n", ct); - *ctinfo = IP_CT_RELATED; + ctinfo = IP_CT_RELATED; } else { pr_debug("new packet for %p\n", ct); - *ctinfo = IP_CT_NEW; + ctinfo = IP_CT_NEW; } - *set_reply = 0; } - nf_ct_set(skb, ct, *ctinfo); - return ct; + nf_ct_set(skb, ct, ctinfo); + return 0; } unsigned int @@ -1315,13 +1352,13 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts; unsigned int dataoff; u_int8_t protonum; - int set_reply = 0; int ret; tmpl = nf_ct_get(skb, &ctinfo); - if (tmpl) { + if (tmpl || ctinfo == IP_CT_UNTRACKED) { /* Previously seen (loopback or untracked)? Ignore. */ - if (!nf_ct_is_template(tmpl)) { + if ((tmpl && !nf_ct_is_template(tmpl)) || + ctinfo == IP_CT_UNTRACKED) { NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } @@ -1358,23 +1395,22 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, goto out; } repeat: - ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, - l3proto, l4proto, &set_reply, &ctinfo); - if (!ct) { - /* Not valid part of a connection */ - NF_CT_STAT_INC_ATOMIC(net, invalid); - ret = NF_ACCEPT; - goto out; - } - - if (IS_ERR(ct)) { + ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, + l3proto, l4proto); + if (ret < 0) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); ret = NF_DROP; goto out; } - NF_CT_ASSERT(skb_nfct(skb)); + ct = nf_ct_get(skb, &ctinfo); + if (!ct) { + /* Not valid part of a connection */ + NF_CT_STAT_INC_ATOMIC(net, invalid); + ret = NF_ACCEPT; + goto out; + } /* Decide what timeout policy we want to apply to this flow. */ timeouts = nf_ct_timeout_lookup(net, ct, l4proto); @@ -1399,7 +1435,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, goto out; } - if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) + if (ctinfo == IP_CT_ESTABLISHED_REPLY && + !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: if (tmpl) @@ -1634,18 +1671,6 @@ void nf_ct_free_hashtable(void *hash, unsigned int size) } EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); -static int untrack_refs(void) -{ - int cnt = 0, cpu; - - for_each_possible_cpu(cpu) { - struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); - - cnt += atomic_read(&ct->ct_general.use) - 1; - } - return cnt; -} - void nf_conntrack_cleanup_start(void) { conntrack_gc_work.exiting = true; @@ -1655,8 +1680,6 @@ void nf_conntrack_cleanup_start(void) void nf_conntrack_cleanup_end(void) { RCU_INIT_POINTER(nf_ct_destroy, NULL); - while (untrack_refs() > 0) - schedule(); cancel_delayed_work_sync(&conntrack_gc_work.dwork); nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); @@ -1830,20 +1853,44 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); -void nf_ct_untracked_status_or(unsigned long bits) +static unsigned int total_extension_size(void) { - int cpu; + /* remember to add new extensions below */ + BUILD_BUG_ON(NF_CT_EXT_NUM > 9); - for_each_possible_cpu(cpu) - per_cpu(nf_conntrack_untracked, cpu).status |= bits; -} -EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); + return sizeof(struct nf_ct_ext) + + sizeof(struct nf_conn_help) +#if IS_ENABLED(CONFIG_NF_NAT) + + sizeof(struct nf_conn_nat) +#endif + + sizeof(struct nf_conn_seqadj) + + sizeof(struct nf_conn_acct) +#ifdef CONFIG_NF_CONNTRACK_EVENTS + + sizeof(struct nf_conntrack_ecache) +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + + sizeof(struct nf_conn_tstamp) +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT + + sizeof(struct nf_conn_timeout) +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + + sizeof(struct nf_conn_labels) +#endif +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) + + sizeof(struct nf_conn_synproxy) +#endif + ; +}; int nf_conntrack_init_start(void) { int max_factor = 8; int ret = -ENOMEM; - int i, cpu; + int i; + + /* struct nf_ct_ext uses u8 to store offsets/size */ + BUILD_BUG_ON(total_extension_size() > 255u); seqcount_init(&nf_conntrack_generation); @@ -1926,15 +1973,6 @@ int nf_conntrack_init_start(void) if (ret < 0) goto err_proto; - /* Set up fake conntrack: to never be deleted, not in any hashes */ - for_each_possible_cpu(cpu) { - struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); - write_pnet(&ct->ct_net, &init_net); - atomic_set(&ct->ct_general.use, 1); - } - /* - and look it like as a confirmed connection */ - nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); - conntrack_gc_work_init(&conntrack_gc_work); queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ); @@ -1982,6 +2020,7 @@ int nf_conntrack_init_net(struct net *net) int ret = -ENOMEM; int cpu; + BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER); atomic_set(&net->ct.count, 0); net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 22fc32143e9c4ae17bc48edc68eb0decf11d931c..caac41ad9483ed36333884f8e584521778e76937 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -195,7 +195,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct) events = xchg(&e->cache, 0); - if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) + if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct)) goto out_unlock; /* We make a copy of the missed event cache without taking @@ -212,7 +212,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct) ret = notify->fcn(events | missed, &item); - if (likely(ret >= 0 && !missed)) + if (likely(ret == 0 && !missed)) goto out_unlock; spin_lock_bh(&ct->lock); @@ -347,7 +347,7 @@ static struct ctl_table event_sysctl_table[] = { }; #endif /* CONFIG_SYSCTL */ -static struct nf_ct_ext_type event_extend __read_mostly = { +static const struct nf_ct_ext_type event_extend = { .len = sizeof(struct nf_conntrack_ecache), .align = __alignof__(struct nf_conntrack_ecache), .id = NF_CT_EXT_ECACHE, @@ -420,6 +420,9 @@ int nf_conntrack_ecache_init(void) int ret = nf_ct_extend_register(&event_extend); if (ret < 0) pr_err("nf_ct_event: Unable to register event extension.\n"); + + BUILD_BUG_ON(__IPCT_MAX >= 16); /* ctmask, missed use u16 */ + return ret; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index d80073037856db9081e57b7c88e482a61b94a45f..e03d16ed550d6bf07a537f4b150148a3cd967f59 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -103,6 +103,17 @@ nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple, nf_ct_zone_equal_any(i->master, zone); } +bool nf_ct_remove_expect(struct nf_conntrack_expect *exp) +{ + if (del_timer(&exp->timeout)) { + nf_ct_unlink_expect(exp); + nf_ct_expect_put(exp); + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(nf_ct_remove_expect); + struct nf_conntrack_expect * __nf_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone, @@ -133,7 +144,7 @@ nf_ct_expect_find_get(struct net *net, rcu_read_lock(); i = __nf_ct_expect_find(net, zone, tuple); - if (i && !atomic_inc_not_zero(&i->use)) + if (i && !refcount_inc_not_zero(&i->use)) i = NULL; rcu_read_unlock(); @@ -186,7 +197,7 @@ nf_ct_find_expectation(struct net *net, return NULL; if (exp->flags & NF_CT_EXPECT_PERMANENT) { - atomic_inc(&exp->use); + refcount_inc(&exp->use); return exp; } else if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); @@ -211,10 +222,7 @@ void nf_ct_remove_expectations(struct nf_conn *ct) spin_lock_bh(&nf_conntrack_expect_lock); hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { - if (del_timer(&exp->timeout)) { - nf_ct_unlink_expect(exp); - nf_ct_expect_put(exp); - } + nf_ct_remove_expect(exp); } spin_unlock_bh(&nf_conntrack_expect_lock); } @@ -255,10 +263,7 @@ static inline int expect_matches(const struct nf_conntrack_expect *a, void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) { spin_lock_bh(&nf_conntrack_expect_lock); - if (del_timer(&exp->timeout)) { - nf_ct_unlink_expect(exp); - nf_ct_expect_put(exp); - } + nf_ct_remove_expect(exp); spin_unlock_bh(&nf_conntrack_expect_lock); } EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); @@ -275,7 +280,7 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) return NULL; new->master = me; - atomic_set(&new->use, 1); + refcount_set(&new->use, 1); return new; } EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); @@ -348,7 +353,7 @@ static void nf_ct_expect_free_rcu(struct rcu_head *head) void nf_ct_expect_put(struct nf_conntrack_expect *exp) { - if (atomic_dec_and_test(&exp->use)) + if (refcount_dec_and_test(&exp->use)) call_rcu(&exp->rcu, nf_ct_expect_free_rcu); } EXPORT_SYMBOL_GPL(nf_ct_expect_put); @@ -361,7 +366,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); /* two references : one for hash insert, one for the timer */ - atomic_add(2, &exp->use); + refcount_add(2, &exp->use); hlist_add_head_rcu(&exp->lnode, &master_help->expectations); master_help->expecting[exp->class]++; @@ -394,10 +399,8 @@ static void evict_oldest_expect(struct nf_conn *master, last = exp; } - if (last && del_timer(&last->timeout)) { - nf_ct_unlink_expect(last); - nf_ct_expect_put(last); - } + if (last) + nf_ct_remove_expect(last); } static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) @@ -419,11 +422,8 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { if (expect_matches(i, expect)) { - if (del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - nf_ct_expect_put(i); + if (nf_ct_remove_expect(expect)) break; - } } else if (expect_clash(i, expect)) { ret = -EBUSY; goto out; @@ -549,7 +549,7 @@ static int exp_seq_show(struct seq_file *s, void *v) seq_printf(s, "%ld ", timer_pending(&expect->timeout) ? (long)(expect->timeout.expires - jiffies)/HZ : 0); else - seq_printf(s, "- "); + seq_puts(s, "- "); seq_printf(s, "l3proto = %u proto=%u ", expect->tuple.src.l3num, expect->tuple.dst.protonum); @@ -559,7 +559,7 @@ static int exp_seq_show(struct seq_file *s, void *v) expect->tuple.dst.protonum)); if (expect->flags & NF_CT_EXPECT_PERMANENT) { - seq_printf(s, "PERMANENT"); + seq_puts(s, "PERMANENT"); delim = ","; } if (expect->flags & NF_CT_EXPECT_INACTIVE) { diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 008299b7f78fe3754946cf0a58029090234ad905..6c605e88ebae238ce618b9e808a4da0dc697d95c 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -18,17 +18,14 @@ static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM]; static DEFINE_MUTEX(nf_ct_ext_type_mutex); +#define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */ -void __nf_ct_ext_destroy(struct nf_conn *ct) +void nf_ct_ext_destroy(struct nf_conn *ct) { unsigned int i; struct nf_ct_ext_type *t; - struct nf_ct_ext *ext = ct->ext; for (i = 0; i < NF_CT_EXT_NUM; i++) { - if (!__nf_ct_ext_exist(ext, i)) - continue; - rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[i]); @@ -41,54 +38,26 @@ void __nf_ct_ext_destroy(struct nf_conn *ct) rcu_read_unlock(); } } -EXPORT_SYMBOL(__nf_ct_ext_destroy); - -static void * -nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, - size_t var_alloc_len, gfp_t gfp) -{ - unsigned int off, len; - struct nf_ct_ext_type *t; - size_t alloc_size; - - rcu_read_lock(); - t = rcu_dereference(nf_ct_ext_types[id]); - if (!t) { - rcu_read_unlock(); - return NULL; - } - - off = ALIGN(sizeof(struct nf_ct_ext), t->align); - len = off + t->len + var_alloc_len; - alloc_size = t->alloc_size + var_alloc_len; - rcu_read_unlock(); - - *ext = kzalloc(alloc_size, gfp); - if (!*ext) - return NULL; - - (*ext)->offset[id] = off; - (*ext)->len = len; - - return (void *)(*ext) + off; -} +EXPORT_SYMBOL(nf_ct_ext_destroy); -void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, - size_t var_alloc_len, gfp_t gfp) +void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) { + unsigned int newlen, newoff, oldlen, alloc; struct nf_ct_ext *old, *new; - int newlen, newoff; struct nf_ct_ext_type *t; /* Conntrack must not be confirmed to avoid races on reallocation. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); old = ct->ext; - if (!old) - return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp); - if (__nf_ct_ext_exist(old, id)) - return NULL; + if (old) { + if (__nf_ct_ext_exist(old, id)) + return NULL; + oldlen = old->len; + } else { + oldlen = sizeof(*new); + } rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); @@ -97,15 +66,19 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, return NULL; } - newoff = ALIGN(old->len, t->align); - newlen = newoff + t->len + var_alloc_len; + newoff = ALIGN(oldlen, t->align); + newlen = newoff + t->len; rcu_read_unlock(); - new = __krealloc(old, newlen, gfp); + alloc = max(newlen, NF_CT_EXT_PREALLOC); + new = __krealloc(old, alloc, gfp); if (!new) return NULL; - if (new != old) { + if (!old) { + memset(new->offset, 0, sizeof(new->offset)); + ct->ext = new; + } else if (new != old) { kfree_rcu(old, rcu); rcu_assign_pointer(ct->ext, new); } @@ -115,45 +88,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, memset((void *)new + newoff, 0, newlen - newoff); return (void *)new + newoff; } -EXPORT_SYMBOL(__nf_ct_ext_add_length); - -static void update_alloc_size(struct nf_ct_ext_type *type) -{ - int i, j; - struct nf_ct_ext_type *t1, *t2; - enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1; - - /* unnecessary to update all types */ - if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) { - min = type->id; - max = type->id; - } - - /* This assumes that extended areas in conntrack for the types - whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */ - for (i = min; i <= max; i++) { - t1 = rcu_dereference_protected(nf_ct_ext_types[i], - lockdep_is_held(&nf_ct_ext_type_mutex)); - if (!t1) - continue; - - t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) + - t1->len; - for (j = 0; j < NF_CT_EXT_NUM; j++) { - t2 = rcu_dereference_protected(nf_ct_ext_types[j], - lockdep_is_held(&nf_ct_ext_type_mutex)); - if (t2 == NULL || t2 == t1 || - (t2->flags & NF_CT_EXT_F_PREALLOC) == 0) - continue; - - t1->alloc_size = ALIGN(t1->alloc_size, t2->align) - + t2->len; - } - } -} +EXPORT_SYMBOL(nf_ct_ext_add); /* This MUST be called in process context. */ -int nf_ct_extend_register(struct nf_ct_ext_type *type) +int nf_ct_extend_register(const struct nf_ct_ext_type *type) { int ret = 0; @@ -163,12 +101,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type) goto out; } - /* This ensures that nf_ct_ext_create() can allocate enough area - before updating alloc_size */ - type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align) - + type->len; rcu_assign_pointer(nf_ct_ext_types[type->id], type); - update_alloc_size(type); out: mutex_unlock(&nf_ct_ext_type_mutex); return ret; @@ -176,11 +109,10 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type) EXPORT_SYMBOL_GPL(nf_ct_extend_register); /* This MUST be called in process context. */ -void nf_ct_extend_unregister(struct nf_ct_ext_type *type) +void nf_ct_extend_unregister(const struct nf_ct_ext_type *type) { mutex_lock(&nf_ct_ext_type_mutex); RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); - update_alloc_size(type); mutex_unlock(&nf_ct_ext_type_mutex); synchronize_rcu(); } diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 4aecef4a89fb135e5b50ac382f19645aafaaff94..f0e9a7511e1ac4915ed9bc43492d214417b3c1bd 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -577,6 +577,8 @@ static int __init nf_conntrack_ftp_init(void) { int i, ret = 0; + NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_ftp_master)); + ftp_buffer = kmalloc(65536, GFP_KERNEL); if (!ftp_buffer) return -ENOMEM; @@ -589,12 +591,10 @@ static int __init nf_conntrack_ftp_init(void) for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&ftp[2 * i], AF_INET, IPPROTO_TCP, "ftp", FTP_PORT, ports[i], ports[i], &ftp_exp_policy, - 0, sizeof(struct nf_ct_ftp_master), help, - nf_ct_ftp_from_nlattr, THIS_MODULE); + 0, help, nf_ct_ftp_from_nlattr, THIS_MODULE); nf_ct_helper_init(&ftp[2 * i + 1], AF_INET6, IPPROTO_TCP, "ftp", FTP_PORT, ports[i], ports[i], &ftp_exp_policy, - 0, sizeof(struct nf_ct_ftp_master), help, - nf_ct_ftp_from_nlattr, THIS_MODULE); + 0, help, nf_ct_ftp_from_nlattr, THIS_MODULE); } ret = nf_conntrack_helpers_register(ftp, ports_c * 2); diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index f65d93639d12595884081726808f4c1e17caec03..3bcdc718484e4c5352ab2de17c5a8aaf227af2f3 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -637,7 +637,6 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = { static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { .name = "H.245", .me = THIS_MODULE, - .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_UNSPEC, .tuple.dst.protonum = IPPROTO_UDP, .help = h245_help, @@ -1215,7 +1214,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { { .name = "Q.931", .me = THIS_MODULE, - .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_INET, .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, @@ -1800,7 +1798,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { { .name = "RAS", .me = THIS_MODULE, - .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_INET, .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, @@ -1810,7 +1807,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { { .name = "RAS", .me = THIS_MODULE, - .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_INET6, .tuple.src.u.udp.port = cpu_to_be16(RAS_PORT), .tuple.dst.protonum = IPPROTO_UDP, @@ -1836,6 +1832,8 @@ static int __init nf_conntrack_h323_init(void) { int ret; + NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_h323_master)); + h323_buffer = kmalloc(65536, GFP_KERNEL); if (!h323_buffer) return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 4eeb3418366ad5473945395346b6e4e5fc213fbc..4b9dfe3eef6241756d6474e1f6caad568a942b63 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -187,8 +187,7 @@ nf_ct_helper_ext_add(struct nf_conn *ct, { struct nf_conn_help *help; - help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER, - helper->data_len, gfp); + help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp); if (help) INIT_HLIST_HEAD(&help->expectations); else @@ -392,6 +391,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); + if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) + return -EINVAL; + mutex_lock(&nf_ct_helper_mutex); hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) { @@ -455,11 +457,8 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) if ((rcu_dereference_protected( help->helper, lockdep_is_held(&nf_conntrack_expect_lock) - ) == me || exp->helper == me) && - del_timer(&exp->timeout)) { - nf_ct_unlink_expect(exp); - nf_ct_expect_put(exp); - } + ) == me || exp->helper == me)) + nf_ct_remove_expect(exp); } } spin_unlock_bh(&nf_conntrack_expect_lock); @@ -491,7 +490,7 @@ void nf_ct_helper_init(struct nf_conntrack_helper *helper, u16 l3num, u16 protonum, const char *name, u16 default_port, u16 spec_port, u32 id, const struct nf_conntrack_expect_policy *exp_pol, - u32 expect_class_max, u32 data_len, + u32 expect_class_max, int (*help)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo), @@ -504,7 +503,6 @@ void nf_ct_helper_init(struct nf_conntrack_helper *helper, helper->tuple.src.u.all = htons(spec_port); helper->expect_policy = exp_pol; helper->expect_class_max = expect_class_max; - helper->data_len = data_len; helper->help = help; helper->from_nlattr = from_nlattr; helper->me = module; @@ -544,7 +542,7 @@ void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *helper, } EXPORT_SYMBOL_GPL(nf_conntrack_helpers_unregister); -static struct nf_ct_ext_type helper_extend __read_mostly = { +static const struct nf_ct_ext_type helper_extend = { .len = sizeof(struct nf_conn_help), .align = __alignof__(struct nf_conn_help), .id = NF_CT_EXT_HELPER, diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 1972a149f958350c6f6dd5f466d36fed152b76fc..5523acce9d6993dc71e467bbdf7b718ab2b25bf7 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -243,6 +243,12 @@ static int __init nf_conntrack_irc_init(void) return -EINVAL; } + if (max_dcc_channels > NF_CT_EXPECT_MAX_CNT) { + pr_err("max_dcc_channels must not be more than %u\n", + NF_CT_EXPECT_MAX_CNT); + return -EINVAL; + } + irc_exp_policy.max_expected = max_dcc_channels; irc_exp_policy.timeout = dcc_timeout; @@ -257,7 +263,7 @@ static int __init nf_conntrack_irc_init(void) for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&irc[i], AF_INET, IPPROTO_TCP, "irc", IRC_PORT, ports[i], i, &irc_exp_policy, - 0, 0, help, NULL, THIS_MODULE); + 0, help, NULL, THIS_MODULE); } ret = nf_conntrack_helpers_register(&irc[0], ports_c); diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c index bcab8bde73128f9f9f8b4dba31a1c283b07f8eb1..adf2198599018ca6ec4377ffa4b92bb0a2dcd169 100644 --- a/net/netfilter/nf_conntrack_labels.c +++ b/net/netfilter/nf_conntrack_labels.c @@ -82,7 +82,7 @@ void nf_connlabels_put(struct net *net) } EXPORT_SYMBOL_GPL(nf_connlabels_put); -static struct nf_ct_ext_type labels_extend __read_mostly = { +static const struct nf_ct_ext_type labels_extend = { .len = sizeof(struct nf_conn_labels), .align = __alignof__(struct nf_conn_labels), .id = NF_CT_EXT_LABELS, diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 4c8f30a3d6d2762e69604c4b1f6cd6821663bc64..496ce173f0c1937c73fb360e402c9c3e3936e1e9 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c @@ -58,6 +58,8 @@ static struct nf_conntrack_helper helper __read_mostly = { static int __init nf_conntrack_netbios_ns_init(void) { + NF_CT_HELPER_BUILD_BUG_ON(0); + exp_policy.timeout = timeout; return nf_conntrack_helper_register(&helper); } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index dc7dfd68fafe5d8db341488ac7d9ad5bf8f80b46..5f6f2f388928130a2717911fef15c72325ebe2f3 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -467,7 +467,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nlattr *nest_parms; unsigned int flags = portid ? NLM_F_MULTI : 0, event; - event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW); + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -627,10 +627,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) unsigned int flags = 0, group; int err; - /* ignore our fake conntrack entry */ - if (nf_ct_is_untracked(ct)) - return 0; - if (events & (1 << IPCT_DESTROY)) { type = IPCTNL_MSG_CT_DELETE; group = NFNLGRP_CONNTRACK_DESTROY; @@ -652,7 +648,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) if (skb == NULL) goto errout; - type |= NFNL_SUBSYS_CTNETLINK << 8; + type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -908,7 +904,7 @@ static int ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_l3proto *l3proto; int ret = 0; - ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); + ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL, NULL); if (ret < 0) return ret; @@ -917,7 +913,7 @@ static int ctnetlink_parse_tuple_ip(struct nlattr *attr, if (likely(l3proto->nlattr_to_tuple)) { ret = nla_validate_nested(attr, CTA_IP_MAX, - l3proto->nla_policy); + l3proto->nla_policy, NULL); if (ret == 0) ret = l3proto->nlattr_to_tuple(tb, tuple); } @@ -938,7 +934,8 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr, struct nf_conntrack_l4proto *l4proto; int ret = 0; - ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); + ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy, + NULL); if (ret < 0) return ret; @@ -951,7 +948,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr, if (likely(l4proto->nlattr_to_tuple)) { ret = nla_validate_nested(attr, CTA_PROTO_MAX, - l4proto->nla_policy); + l4proto->nla_policy, NULL); if (ret == 0) ret = l4proto->nlattr_to_tuple(tb, tuple); } @@ -1015,7 +1012,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], memset(tuple, 0, sizeof(*tuple)); - err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); + err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy, + NULL); if (err < 0) return err; @@ -1065,7 +1063,7 @@ static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, int err; struct nlattr *tb[CTA_HELP_MAX+1]; - err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy); + err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy, NULL); if (err < 0) return err; @@ -1571,7 +1569,8 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct, struct nf_conntrack_l4proto *l4proto; int err = 0; - err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy); + err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy, + NULL); if (err < 0) return err; @@ -1596,7 +1595,7 @@ static int change_seq_adj(struct nf_ct_seqadj *seq, int err; struct nlattr *cda[CTA_SEQADJ_MAX+1]; - err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy); + err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy, NULL); if (err < 0) return err; @@ -1988,7 +1987,8 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0, event; - event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU); + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, + IPCTNL_MSG_CT_GET_STATS_CPU); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -2071,7 +2071,7 @@ ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, unsigned int flags = portid ? NLM_F_MULTI : 0, event; unsigned int nr_conntracks = atomic_read(&net->ct.count); - event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS); + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -2177,13 +2177,7 @@ ctnetlink_glue_build_size(const struct nf_conn *ct) static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) { - struct nf_conn *ct; - - ct = nf_ct_get(skb, ctinfo); - if (ct && nf_ct_is_untracked(ct)) - ct = NULL; - - return ct; + return nf_ct_get(skb, ctinfo); } static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) @@ -2353,7 +2347,7 @@ ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) struct nlattr *cda[CTA_MAX+1]; int ret; - ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); + ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy, NULL); if (ret < 0) return ret; @@ -2390,7 +2384,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, struct nf_conntrack_expect *exp; int err; - err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy); + err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy, + NULL); if (err < 0) return err; @@ -2581,7 +2576,7 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; - event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -2632,7 +2627,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) if (skb == NULL) goto errout; - type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; + type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -2698,7 +2693,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { - if (!atomic_inc_not_zero(&exp->use)) + if (!refcount_inc_not_zero(&exp->use)) continue; cb->args[1] = (unsigned long)exp; goto out; @@ -2744,7 +2739,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { - if (!atomic_inc_not_zero(&exp->use)) + if (!refcount_inc_not_zero(&exp->use)) continue; cb->args[1] = (unsigned long)exp; goto out; @@ -3015,7 +3010,8 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr, struct nf_conntrack_tuple nat_tuple = {}; int err; - err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy); + err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, + exp_nat_nla_policy, NULL); if (err < 0) return err; @@ -3049,6 +3045,10 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, struct nf_conn_help *help; int err; + help = nfct_help(ct); + if (!help) + return ERR_PTR(-EOPNOTSUPP); + if (cda[CTA_EXPECT_CLASS] && helper) { class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); if (class > helper->expect_class_max) @@ -3058,26 +3058,11 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, if (!exp) return ERR_PTR(-ENOMEM); - help = nfct_help(ct); - if (!help) { - if (!cda[CTA_EXPECT_TIMEOUT]) { - err = -EINVAL; - goto err_out; - } - exp->timeout.expires = - jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; - - exp->flags = NF_CT_EXPECT_USERSPACE; - if (cda[CTA_EXPECT_FLAGS]) { - exp->flags |= - ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); - } + if (cda[CTA_EXPECT_FLAGS]) { + exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); + exp->flags &= ~NF_CT_EXPECT_USERSPACE; } else { - if (cda[CTA_EXPECT_FLAGS]) { - exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); - exp->flags &= ~NF_CT_EXPECT_USERSPACE; - } else - exp->flags = 0; + exp->flags = 0; } if (cda[CTA_EXPECT_FN]) { const char *name = nla_data(cda[CTA_EXPECT_FN]); @@ -3240,7 +3225,8 @@ ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0, event; - event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU); + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, + IPCTNL_MSG_EXP_GET_STATS_CPU); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index f60a4755d71e2b0763be35e07976e02169e1b01e..6959e93063d4c957017b97e08dfca27b775461c2 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -263,7 +263,7 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) goto out_put_both; } -static inline int +static int pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, @@ -391,7 +391,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, return NF_ACCEPT; } -static inline int +static int pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, @@ -523,6 +523,14 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff, int ret; u_int16_t msg; +#if IS_ENABLED(CONFIG_NF_NAT) + if (!nf_ct_is_confirmed(ct) && (ct->status & IPS_NAT_MASK)) { + struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); + + if (!nat && !nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC)) + return NF_DROP; + } +#endif /* don't do any tracking before tcp handshake complete */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; @@ -596,7 +604,6 @@ static const struct nf_conntrack_expect_policy pptp_exp_policy = { static struct nf_conntrack_helper pptp __read_mostly = { .name = "pptp", .me = THIS_MODULE, - .data_len = sizeof(struct nf_ct_pptp_master), .tuple.src.l3num = AF_INET, .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), .tuple.dst.protonum = IPPROTO_TCP, @@ -607,6 +614,8 @@ static struct nf_conntrack_helper pptp __read_mostly = { static int __init nf_conntrack_pptp_init(void) { + NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_pptp_master)); + return nf_conntrack_helper_register(&pptp); } diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 2d6ee18034159d12603e50c694541a9bdda147fc..2de6c1fe326149c5ab46f06b9ca7a3db992c4e7f 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -202,7 +202,7 @@ static int kill_l3proto(struct nf_conn *i, void *data) static int kill_l4proto(struct nf_conn *i, void *data) { struct nf_conntrack_l4proto *l4proto; - l4proto = (struct nf_conntrack_l4proto *)data; + l4proto = data; return nf_ct_protonum(i) == l4proto->l4proto && nf_ct_l3num(i) == l4proto->l3proto; } @@ -441,9 +441,8 @@ EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one); void nf_ct_l4proto_pernet_unregister_one(struct net *net, struct nf_conntrack_l4proto *l4proto) { - struct nf_proto_net *pn = NULL; + struct nf_proto_net *pn = nf_ct_l4proto_net(net, l4proto); - pn = nf_ct_l4proto_net(net, l4proto); if (pn == NULL) return; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 93dd1c5b7bff9e5285530a446bba6811ec26ead4..b553fdd68816b98a0f6f58cba3bd7363959479cf 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -609,6 +609,20 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl, return -NF_ACCEPT; } +static bool dccp_can_early_drop(const struct nf_conn *ct) +{ + switch (ct->proto.dccp.state) { + case CT_DCCP_CLOSEREQ: + case CT_DCCP_CLOSING: + case CT_DCCP_TIMEWAIT: + return true; + default: + break; + } + + return false; +} + static void dccp_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { @@ -665,7 +679,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) return 0; err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr, - dccp_nla_policy); + dccp_nla_policy, NULL); if (err < 0) return err; @@ -868,6 +882,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, + .can_early_drop = dccp_can_early_drop, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -902,6 +917,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = { .packet = dccp_packet, .get_timeouts = dccp_get_timeouts, .error = dccp_error, + .can_early_drop = dccp_can_early_drop, .print_tuple = dccp_print_tuple, .print_conntrack = dccp_print_conntrack, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 33279aab583d5eac3016b4a58f6bf2ea8b457395..13875d599a85713bbfeb2fee7b8978fa498a10ed 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -535,6 +535,20 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, return -NF_ACCEPT; } +static bool sctp_can_early_drop(const struct nf_conn *ct) +{ + switch (ct->proto.sctp.state) { + case SCTP_CONNTRACK_SHUTDOWN_SENT: + case SCTP_CONNTRACK_SHUTDOWN_RECD: + case SCTP_CONNTRACK_SHUTDOWN_ACK_SENT: + return true; + default: + break; + } + + return false; +} + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include @@ -584,10 +598,8 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) if (!attr) return 0; - err = nla_parse_nested(tb, - CTA_PROTOINFO_SCTP_MAX, - attr, - sctp_nla_policy); + err = nla_parse_nested(tb, CTA_PROTOINFO_SCTP_MAX, attr, + sctp_nla_policy, NULL); if (err < 0) return err; @@ -783,6 +795,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .get_timeouts = sctp_get_timeouts, .new = sctp_new, .error = sctp_error, + .can_early_drop = sctp_can_early_drop, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, @@ -818,6 +831,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .get_timeouts = sctp_get_timeouts, .new = sctp_new, .error = sctp_error, + .can_early_drop = sctp_can_early_drop, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = sctp_to_nlattr, diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index b122e9dacfed06e27aecab3fbc71203772d7b427..9758a7dfd83ef95c0bcab1257d63cb38d9faed88 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -419,10 +419,9 @@ static void tcp_options(const struct sk_buff *skb, && opsize == TCPOLEN_WINDOW) { state->td_scale = *(u_int8_t *)ptr; - if (state->td_scale > 14) { - /* See RFC1323 */ - state->td_scale = 14; - } + if (state->td_scale > TCP_MAX_WSCALE) + state->td_scale = TCP_MAX_WSCALE; + state->flags |= IP_CT_TCP_FLAG_WINDOW_SCALE; } @@ -1172,6 +1171,22 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, return true; } +static bool tcp_can_early_drop(const struct nf_conn *ct) +{ + switch (ct->proto.tcp.state) { + case TCP_CONNTRACK_FIN_WAIT: + case TCP_CONNTRACK_LAST_ACK: + case TCP_CONNTRACK_TIME_WAIT: + case TCP_CONNTRACK_CLOSE: + case TCP_CONNTRACK_CLOSE_WAIT: + return true; + default: + break; + } + + return false; +} + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include @@ -1234,7 +1249,8 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) if (!pattr) return 0; - err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy); + err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, + tcp_nla_policy, NULL); if (err < 0) return err; @@ -1549,6 +1565,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = .get_timeouts = tcp_get_timeouts, .new = tcp_new, .error = tcp_error, + .can_early_drop = tcp_can_early_drop, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = tcp_to_nlattr, .nlattr_size = tcp_nlattr_size, @@ -1586,6 +1603,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = .get_timeouts = tcp_get_timeouts, .new = tcp_new, .error = tcp_error, + .can_early_drop = tcp_can_early_drop, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = tcp_to_nlattr, .nlattr_size = tcp_nlattr_size, diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index 9dcb9ee9b97d53361c73df05aa5fe9923e933bda..ae457f39d5ceb804aa60120c7d897dd441e49d2a 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c @@ -184,6 +184,8 @@ static int __init nf_conntrack_sane_init(void) { int i, ret = 0; + NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sane_master)); + sane_buffer = kmalloc(65536, GFP_KERNEL); if (!sane_buffer) return -ENOMEM; @@ -196,13 +198,11 @@ static int __init nf_conntrack_sane_init(void) for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&sane[2 * i], AF_INET, IPPROTO_TCP, "sane", SANE_PORT, ports[i], ports[i], - &sane_exp_policy, 0, - sizeof(struct nf_ct_sane_master), help, NULL, + &sane_exp_policy, 0, help, NULL, THIS_MODULE); nf_ct_helper_init(&sane[2 * i + 1], AF_INET6, IPPROTO_TCP, "sane", SANE_PORT, ports[i], ports[i], - &sane_exp_policy, 0, - sizeof(struct nf_ct_sane_master), help, NULL, + &sane_exp_policy, 0, help, NULL, THIS_MODULE); } diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index ef7063eced7c490dd6813d3e54ee72fdad9a8565..a975efd6b8c3e3baac2a80334237be9201ace130 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -231,7 +231,7 @@ s32 nf_ct_seq_offset(const struct nf_conn *ct, } EXPORT_SYMBOL_GPL(nf_ct_seq_offset); -static struct nf_ct_ext_type nf_ct_seqadj_extend __read_mostly = { +static const struct nf_ct_ext_type nf_ct_seqadj_extend = { .len = sizeof(struct nf_conn_seqadj), .align = __alignof__(struct nf_conn_seqadj), .id = NF_CT_EXT_SEQADJ, diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0d17894798b5caea7540b3bf7d341a0a2f623fd6..d38af4274335b9bed6de1edb4b0c4ae6fdf96656 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -829,10 +829,8 @@ static void flush_expectations(struct nf_conn *ct, bool media) hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) { if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media) continue; - if (!del_timer(&exp->timeout)) + if (!nf_ct_remove_expect(exp)) continue; - nf_ct_unlink_expect(exp); - nf_ct_expect_put(exp); if (!media) break; } @@ -1624,29 +1622,27 @@ static int __init nf_conntrack_sip_init(void) { int i, ret; + NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sip_master)); + if (ports_c == 0) ports[ports_c++] = SIP_PORT; for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, - SIP_EXPECT_MAX, - sizeof(struct nf_ct_sip_master), sip_help_udp, + SIP_EXPECT_MAX, sip_help_udp, NULL, THIS_MODULE); nf_ct_helper_init(&sip[4 * i + 1], AF_INET, IPPROTO_TCP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, - SIP_EXPECT_MAX, - sizeof(struct nf_ct_sip_master), sip_help_tcp, + SIP_EXPECT_MAX, sip_help_tcp, NULL, THIS_MODULE); nf_ct_helper_init(&sip[4 * i + 2], AF_INET6, IPPROTO_UDP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, - SIP_EXPECT_MAX, - sizeof(struct nf_ct_sip_master), sip_help_udp, + SIP_EXPECT_MAX, sip_help_udp, NULL, THIS_MODULE); nf_ct_helper_init(&sip[4 * i + 3], AF_INET6, IPPROTO_TCP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, - SIP_EXPECT_MAX, - sizeof(struct nf_ct_sip_master), sip_help_tcp, + SIP_EXPECT_MAX, sip_help_tcp, NULL, THIS_MODULE); } diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 2256147dcaad80ea982868607b2e66b233c27c6f..ccb5cb9043e0e769ba463759a69f4d9f36e122e0 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -250,7 +250,7 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) - seq_printf(s, "[UNREPLIED] "); + seq_puts(s, "[UNREPLIED] "); print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l3proto, l4proto); @@ -261,7 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; if (test_bit(IPS_ASSURED_BIT, &ct->status)) - seq_printf(s, "[ASSURED] "); + seq_puts(s, "[ASSURED] "); if (seq_has_overflowed(s)) goto release; @@ -350,7 +350,7 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) const struct ip_conntrack_stat *st = v; if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); + seq_puts(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); return 0; } diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index b1227dc6f75e115c8cb2e88e488c9349686891a0..0ec6779fd5d944406eb67ff6e3256698ec40d20c 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c @@ -113,16 +113,18 @@ static int __init nf_conntrack_tftp_init(void) { int i, ret; + NF_CT_HELPER_BUILD_BUG_ON(0); + if (ports_c == 0) ports[ports_c++] = TFTP_PORT; for (i = 0; i < ports_c; i++) { nf_ct_helper_init(&tftp[2 * i], AF_INET, IPPROTO_UDP, "tftp", TFTP_PORT, ports[i], i, &tftp_exp_policy, - 0, 0, tftp_help, NULL, THIS_MODULE); + 0, tftp_help, NULL, THIS_MODULE); nf_ct_helper_init(&tftp[2 * i + 1], AF_INET6, IPPROTO_UDP, "tftp", TFTP_PORT, ports[i], i, &tftp_exp_policy, - 0, 0, tftp_help, NULL, THIS_MODULE); + 0, tftp_help, NULL, THIS_MODULE); } ret = nf_conntrack_helpers_register(tftp, ports_c * 2); diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c index 26e742006c48c944ba16aea8d8a2ed541ad06ca1..46aee65f339bea3e39e6fcdd94d82d4646b991b4 100644 --- a/net/netfilter/nf_conntrack_timeout.c +++ b/net/netfilter/nf_conntrack_timeout.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook); void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook); -static struct nf_ct_ext_type timeout_extend __read_mostly = { +static const struct nf_ct_ext_type timeout_extend = { .len = sizeof(struct nf_conn_timeout), .align = __alignof__(struct nf_conn_timeout), .id = NF_CT_EXT_TIMEOUT, diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index 7a394df0deb7686fa7fe4da162ff77d3c03435c5..4c4734b78318962360bd07265dc8179f95fc5135 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c @@ -33,7 +33,7 @@ static struct ctl_table tstamp_sysctl_table[] = { }; #endif /* CONFIG_SYSCTL */ -static struct nf_ct_ext_type tstamp_extend __read_mostly = { +static const struct nf_ct_ext_type tstamp_extend = { .len = sizeof(struct nf_conn_tstamp), .align = __alignof__(struct nf_conn_tstamp), .id = NF_CT_EXT_TSTAMP, diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index c46d214d532355b7294a543d7c68843a82fb489d..bfa742da83aff37dfc8b426e1d193f0de056fb6f 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h @@ -14,7 +14,7 @@ /* nf_queue.c */ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, struct nf_hook_entry **entryp, unsigned int verdict); -void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); +unsigned int nf_queue_nf_hook_drop(struct net *net); int __init netfilter_queue_init(void); /* nf_log.c */ diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 8d85a0598b603c5f7d1f2f6f0c0ba68bb96b4d58..8bb152a7cca499d8e9b349dcd9fe0693d6599fbf 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -71,7 +71,6 @@ void nf_log_unset(struct net *net, const struct nf_logger *logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } mutex_unlock(&nf_log_mutex); - synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unset); @@ -376,13 +375,13 @@ static int seq_show(struct seq_file *s, void *v) logger = nft_log_dereference(loggers[*pos][i]); seq_printf(s, "%s", logger->name); if (i == 0 && loggers[*pos][i + 1] != NULL) - seq_printf(s, ","); + seq_puts(s, ","); if (seq_has_overflowed(s)) return -ENOSPC; } - seq_printf(s, ")\n"); + seq_puts(s, ")\n"); if (seq_has_overflowed(s)) return -ENOSPC; diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c index eb772380a202f0037745a4357cf2c00658181bcd..e4d61a7a52589d85d23185bba1919dfd188e62c2 100644 --- a/net/netfilter/nf_nat_amanda.c +++ b/net/netfilter/nf_nat_amanda.c @@ -33,7 +33,6 @@ static unsigned int help(struct sk_buff *skb, { char buffer[sizeof("65535")]; u_int16_t port; - unsigned int ret; /* Connection comes from client. */ exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; @@ -63,14 +62,14 @@ static unsigned int help(struct sk_buff *skb, } sprintf(buffer, "%u", port); - ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, - protoff, matchoff, matchlen, - buffer, strlen(buffer)); - if (ret != NF_ACCEPT) { + if (!nf_nat_mangle_udp_packet(skb, exp->master, ctinfo, + protoff, matchoff, matchlen, + buffer, strlen(buffer))) { nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); nf_ct_unexpect_related(exp); + return NF_DROP; } - return ret; + return NF_ACCEPT; } static void __exit nf_nat_amanda_fini(void) diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 82802e4a6640817e64eb3f3a6ffcb875ad14a747..b48d6b5aae8a87d4ea69cae0e025739ebe3f1658 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -71,11 +71,10 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) if (ct == NULL) return; - family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; - rcu_read_lock(); + family = nf_ct_l3num(ct); l3proto = __nf_nat_l3proto_find(family); if (l3proto == NULL) - goto out; + return; dir = CTINFO2DIR(ctinfo); if (dir == IP_CT_DIR_ORIGINAL) @@ -84,8 +83,6 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) statusbit = IPS_SRC_NAT; l3proto->decode_session(skb, ct, dir, statusbit, fl); -out: - rcu_read_unlock(); } int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) @@ -411,12 +408,6 @@ nf_nat_setup_info(struct nf_conn *ct, enum nf_nat_manip_type maniptype) { struct nf_conntrack_tuple curr_tuple, new_tuple; - struct nf_conn_nat *nat; - - /* nat helper or nfctnetlink also setup binding */ - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || maniptype == NF_NAT_MANIP_DST); @@ -549,10 +540,6 @@ struct nf_nat_proto_clean { static int nf_nat_proto_remove(struct nf_conn *i, void *data) { const struct nf_nat_proto_clean *clean = data; - struct nf_conn_nat *nat = nfct_nat(i); - - if (!nat) - return 0; if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) @@ -563,12 +550,10 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data) static int nf_nat_proto_clean(struct nf_conn *ct, void *data) { - struct nf_conn_nat *nat = nfct_nat(ct); - if (nf_nat_proto_remove(ct, data)) return 1; - if (!nat) + if ((ct->status & IPS_SRC_NAT_DONE) == 0) return 0; /* This netns is being destroyed, and conntrack has nat null binding. @@ -716,13 +701,9 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); /* No one using conntrack by the time this called. */ static void nf_nat_cleanup_conntrack(struct nf_conn *ct) { - struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); - - if (!nat) - return; - - rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, - nf_nat_bysource_params); + if (ct->status & IPS_SRC_NAT_DONE) + rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, + nf_nat_bysource_params); } static struct nf_ct_ext_type nat_extend __read_mostly = { @@ -730,7 +711,6 @@ static struct nf_ct_ext_type nat_extend __read_mostly = { .align = __alignof__(struct nf_conn_nat), .destroy = nf_nat_cleanup_conntrack, .id = NF_CT_EXT_NAT, - .flags = NF_CT_EXT_F_PREALLOC, }; #if IS_ENABLED(CONFIG_NF_CT_NETLINK) @@ -751,7 +731,8 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr, const struct nf_nat_l4proto *l4proto; int err; - err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); + err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, + protonat_nla_policy, NULL); if (err < 0) return err; @@ -780,7 +761,7 @@ nfnetlink_parse_nat(const struct nlattr *nat, memset(range, 0, sizeof(*range)); - err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); + err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL); if (err < 0) return err; @@ -819,7 +800,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, /* No NAT information has been passed, allocate the null-binding */ if (attr == NULL) - return __nf_nat_alloc_null_binding(ct, manip); + return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; err = nfnetlink_parse_nat(attr, ct, &range, l3proto); if (err < 0) @@ -874,9 +855,6 @@ static int __init nf_nat_init(void) nf_ct_helper_expectfn_register(&follow_master_nat); - /* Initialize fake conntrack so that NAT will skip it */ - nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); - BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, nfnetlink_parse_nat_setup); diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index 211661cb2c90a5d25a57d5ce41c9c5f9898c5302..607a373379b40cf5cef2d4bba1bfefa58a8dfa33 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -70,15 +70,15 @@ static void mangle_contents(struct sk_buff *skb, } /* Unusual, but possible case. */ -static int enlarge_skb(struct sk_buff *skb, unsigned int extra) +static bool enlarge_skb(struct sk_buff *skb, unsigned int extra) { if (skb->len + extra > 65535) - return 0; + return false; if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC)) - return 0; + return false; - return 1; + return true; } /* Generic function for mangling variable-length address changes inside @@ -89,26 +89,26 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra) * skb enlargement, ... * * */ -int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, - unsigned int match_offset, - unsigned int match_len, - const char *rep_buffer, - unsigned int rep_len, bool adjust) +bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned int match_offset, + unsigned int match_len, + const char *rep_buffer, + unsigned int rep_len, bool adjust) { const struct nf_nat_l3proto *l3proto; struct tcphdr *tcph; int oldlen, datalen; if (!skb_make_writable(skb, skb->len)) - return 0; + return false; if (rep_len > match_len && rep_len - match_len > skb_tailroom(skb) && !enlarge_skb(skb, rep_len - match_len)) - return 0; + return false; SKB_LINEAR_ASSERT(skb); @@ -128,7 +128,7 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, nf_ct_seqadj_set(ct, ctinfo, tcph->seq, (int)rep_len - (int)match_len); - return 1; + return true; } EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet); @@ -142,7 +142,7 @@ EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet); * XXX - This function could be merged with nf_nat_mangle_tcp_packet which * should be fairly easy to do. */ -int +bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -157,12 +157,12 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, int datalen, oldlen; if (!skb_make_writable(skb, skb->len)) - return 0; + return false; if (rep_len > match_len && rep_len - match_len > skb_tailroom(skb) && !enlarge_skb(skb, rep_len - match_len)) - return 0; + return false; udph = (void *)skb->data + protoff; @@ -176,13 +176,13 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, /* fix udp checksum if udp checksum was previously calculated */ if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL) - return 1; + return true; l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check, datalen, oldlen); - return 1; + return true; } EXPORT_SYMBOL(nf_nat_mangle_udp_packet); diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index 1fb2258c35357c8c6f5072ae6593de2d53ab9f03..0648cb096bd87b2df3b84db5da8ff8fdbffc7018 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c @@ -37,7 +37,6 @@ static unsigned int help(struct sk_buff *skb, struct nf_conn *ct = exp->master; union nf_inet_addr newaddr; u_int16_t port; - unsigned int ret; /* Reply comes from server. */ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; @@ -83,14 +82,14 @@ static unsigned int help(struct sk_buff *skb, pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &newaddr.ip, port); - ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, - matchlen, buffer, strlen(buffer)); - if (ret != NF_ACCEPT) { + if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, + matchlen, buffer, strlen(buffer))) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); + return NF_DROP; } - return ret; + return NF_ACCEPT; } static void __exit nf_nat_irc_fini(void) diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 4a7662486f44bae527ed83e7e952c4d7195f3871..043850c9d154dcf128903d7dba6b17ea5ff20370 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -96,15 +96,18 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) } EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); -void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry) +unsigned int nf_queue_nf_hook_drop(struct net *net) { const struct nf_queue_handler *qh; + unsigned int count = 0; rcu_read_lock(); qh = rcu_dereference(net->nf.queue_handler); if (qh) - qh->nf_hook_drop(net, entry); + count = qh->nf_hook_drop(net); rcu_read_unlock(); + + return count; } static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 7c6d1fbe38b9aafc668ca39ee6e096d64f79bc7f..a504e87c6ddff1b1266a901549256f29dc1973d1 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -66,8 +66,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW) { opts->wscale = *ptr; - if (opts->wscale > 14) - opts->wscale = 14; + if (opts->wscale > TCP_MAX_WSCALE) + opts->wscale = TCP_MAX_WSCALE; opts->options |= XT_SYNPROXY_OPT_WSCALE; } break; @@ -287,9 +287,9 @@ static int synproxy_cpu_seq_show(struct seq_file *seq, void *v) struct synproxy_stats *stats = v; if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries\t\tsyn_received\t" - "cookie_invalid\tcookie_valid\t" - "cookie_retrans\tconn_reopened\n"); + seq_puts(seq, "entries\t\tsyn_received\t" + "cookie_invalid\tcookie_valid\t" + "cookie_retrans\tconn_reopened\n"); return 0; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 434c739dfecaa8727dc193f8ad86e18133932660..1c6482d2c4dcfe9299a8094a138d446fc0d9e5c8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -144,7 +144,7 @@ static int nf_tables_register_hooks(struct net *net, unsigned int hook_nops) { if (table->flags & NFT_TABLE_F_DORMANT || - !(chain->flags & NFT_BASE_CHAIN)) + !nft_is_base_chain(chain)) return 0; return nf_register_net_hooks(net, nft_base_chain(chain)->ops, @@ -157,7 +157,7 @@ static void nf_tables_unregister_hooks(struct net *net, unsigned int hook_nops) { if (table->flags & NFT_TABLE_F_DORMANT || - !(chain->flags & NFT_BASE_CHAIN)) + !nft_is_base_chain(chain)) return; nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops); @@ -438,7 +438,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; @@ -587,7 +587,7 @@ static void _nf_tables_table_disable(struct net *net, list_for_each_entry(chain, &table->chains, list) { if (!nft_is_active_next(net, chain)) continue; - if (!(chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(chain)) continue; if (cnt && i++ == cnt) @@ -608,7 +608,7 @@ static int nf_tables_table_enable(struct net *net, list_for_each_entry(chain, &table->chains, list) { if (!nft_is_active_next(net, chain)) continue; - if (!(chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(chain)) continue; err = nf_register_net_hooks(net, nft_base_chain(chain)->ops, @@ -989,7 +989,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; @@ -1007,7 +1007,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name)) goto nla_put_failure; - if (chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops[0]; struct nlattr *nest; @@ -1182,7 +1182,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) struct nft_stats *stats; int err; - err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy); + err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy, + NULL); if (err < 0) return ERR_PTR(err); @@ -1226,7 +1227,7 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) { BUG_ON(chain->use > 0); - if (chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(chain)) { struct nft_base_chain *basechain = nft_base_chain(chain); module_put(basechain->type->owner); @@ -1257,7 +1258,7 @@ static int nft_chain_parse_hook(struct net *net, int err; err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK], - nft_hook_policy); + nft_hook_policy, NULL); if (err < 0) return err; @@ -1364,8 +1365,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, } if (nla[NFTA_CHAIN_POLICY]) { - if ((chain != NULL && - !(chain->flags & NFT_BASE_CHAIN))) + if (chain != NULL && + !nft_is_base_chain(chain)) return -EOPNOTSUPP; if (chain == NULL && @@ -1396,7 +1397,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, struct nft_chain_hook hook; struct nf_hook_ops *ops; - if (!(chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(chain)) return -EBUSY; err = nft_chain_parse_hook(net, nla, afi, &hook, @@ -1433,7 +1434,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, } if (nla[NFTA_CHAIN_COUNTERS]) { - if (!(chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(chain)) return -EOPNOTSUPP; stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); @@ -1724,7 +1725,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, struct nlattr *tb[NFTA_EXPR_MAX + 1]; int err; - err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy); + err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy, NULL); if (err < 0) return err; @@ -1734,7 +1735,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, if (tb[NFTA_EXPR_DATA]) { err = nla_parse_nested(info->tb, type->maxattr, - tb[NFTA_EXPR_DATA], type->policy); + tb[NFTA_EXPR_DATA], type->policy, NULL); if (err < 0) goto err1; } else @@ -1772,8 +1773,19 @@ static int nf_tables_newexpr(const struct nft_ctx *ctx, goto err1; } + if (ops->validate) { + const struct nft_data *data = NULL; + + err = ops->validate(ctx, expr, &data); + if (err < 0) + goto err2; + } + return 0; +err2: + if (ops->destroy) + ops->destroy(ctx, expr); err1: expr->ops = NULL; return err; @@ -1874,10 +1886,9 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, const struct nft_expr *expr, *next; struct nlattr *list; const struct nft_rule *prule; - int type = event | NFNL_SUBSYS_NFTABLES << 8; + u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), - flags); + nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; @@ -1895,7 +1906,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) { - prule = list_entry(rule->list.prev, struct nft_rule, list); + prule = list_prev_entry(rule, list); if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(prule->handle), NFTA_RULE_PAD)) @@ -2523,8 +2534,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, return 0; } -struct nft_set *nf_tables_set_lookup(const struct nft_table *table, - const struct nlattr *nla, u8 genmask) +static struct nft_set *nf_tables_set_lookup(const struct nft_table *table, + const struct nlattr *nla, u8 genmask) { struct nft_set *set; @@ -2538,11 +2549,10 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table, } return ERR_PTR(-ENOENT); } -EXPORT_SYMBOL_GPL(nf_tables_set_lookup); -struct nft_set *nf_tables_set_lookup_byid(const struct net *net, - const struct nlattr *nla, - u8 genmask) +static struct nft_set *nf_tables_set_lookup_byid(const struct net *net, + const struct nlattr *nla, + u8 genmask) { struct nft_trans *trans; u32 id = ntohl(nla_get_be32(nla)); @@ -2557,7 +2567,25 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net, } return ERR_PTR(-ENOENT); } -EXPORT_SYMBOL_GPL(nf_tables_set_lookup_byid); + +struct nft_set *nft_set_lookup(const struct net *net, + const struct nft_table *table, + const struct nlattr *nla_set_name, + const struct nlattr *nla_set_id, + u8 genmask) +{ + struct nft_set *set; + + set = nf_tables_set_lookup(table, nla_set_name, genmask); + if (IS_ERR(set)) { + if (!nla_set_id) + return set; + + set = nf_tables_set_lookup_byid(net, nla_set_id, genmask); + } + return set; +} +EXPORT_SYMBOL_GPL(nft_set_lookup); static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, const char *name) @@ -2617,7 +2645,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, u32 portid = ctx->portid; u32 seq = ctx->seq; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) @@ -2851,7 +2879,8 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, struct nlattr *da[NFTA_SET_DESC_MAX + 1]; int err; - err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy); + err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, + nft_set_desc_policy, NULL); if (err < 0) return err; @@ -3353,7 +3382,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) int event, err; err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, - NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy); + NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy, + NULL); if (err < 0) return err; @@ -3367,8 +3397,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) if (IS_ERR(set)) return PTR_ERR(set); - event = NFT_MSG_NEWSETELEM; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); portid = NETLINK_CB(cb->skb).portid; seq = cb->nlh->nlmsg_seq; @@ -3453,7 +3482,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, struct nlattr *nest; int err; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) @@ -3612,7 +3641,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, int err; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, - nft_set_elem_policy); + nft_set_elem_policy, NULL); if (err < 0) return err; @@ -3842,7 +3871,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, int err; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, - nft_set_elem_policy); + nft_set_elem_policy, NULL); if (err < 0) goto err1; @@ -4064,7 +4093,8 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, }; -static struct nft_object *nft_obj_init(const struct nft_object_type *type, +static struct nft_object *nft_obj_init(const struct nft_ctx *ctx, + const struct nft_object_type *type, const struct nlattr *attr) { struct nlattr *tb[type->maxattr + 1]; @@ -4072,7 +4102,8 @@ static struct nft_object *nft_obj_init(const struct nft_object_type *type, int err; if (attr) { - err = nla_parse_nested(tb, type->maxattr, attr, type->policy); + err = nla_parse_nested(tb, type->maxattr, attr, type->policy, + NULL); if (err < 0) goto err1; } else { @@ -4084,7 +4115,7 @@ static struct nft_object *nft_obj_init(const struct nft_object_type *type, if (obj == NULL) goto err1; - err = type->init((const struct nlattr * const *)tb, obj); + err = type->init(ctx, (const struct nlattr * const *)tb, obj); if (err < 0) goto err2; @@ -4192,7 +4223,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, if (IS_ERR(type)) return PTR_ERR(type); - obj = nft_obj_init(type, nla[NFTA_OBJ_DATA]); + obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto err1; @@ -4224,7 +4255,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net, struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; - event |= NFNL_SUBSYS_NFTABLES << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; @@ -4406,8 +4437,6 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, err: kfree_skb(skb2); return err; - - return 0; } static void nft_obj_destroy(struct nft_object *obj) @@ -4497,7 +4526,7 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_NEWGEN; + int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); if (nlh == NULL) @@ -4679,7 +4708,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) if (nft_trans_chain_name(trans)[0]) strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); - if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(trans->ctx.chain)) return; basechain = nft_base_chain(trans->ctx.chain); @@ -4993,7 +5022,7 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, { const struct nft_base_chain *basechain; - if (chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(chain)) { basechain = nft_base_chain(chain); if (basechain->type->type != type) return -EOPNOTSUPP; @@ -5007,7 +5036,7 @@ int nft_chain_validate_hooks(const struct nft_chain *chain, { struct nft_base_chain *basechain; - if (chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(chain)) { basechain = nft_base_chain(chain); if ((1 << basechain->ops[0].hooknum) & hook_flags) @@ -5285,7 +5314,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_chain *chain; int err; - err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy); + err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy, + NULL); if (err < 0) return err; @@ -5316,7 +5346,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, tb[NFTA_VERDICT_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); - if (chain->flags & NFT_BASE_CHAIN) + if (nft_is_base_chain(chain)) return -EOPNOTSUPP; chain->use++; @@ -5415,7 +5445,7 @@ int nft_data_init(const struct nft_ctx *ctx, struct nlattr *tb[NFTA_DATA_MAX + 1]; int err; - err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy); + err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy, NULL); if (err < 0) return err; @@ -5489,7 +5519,7 @@ int __nft_release_basechain(struct nft_ctx *ctx) { struct nft_rule *rule, *nr; - BUG_ON(!(ctx->chain->flags & NFT_BASE_CHAIN)); + BUG_ON(!nft_is_base_chain(ctx->chain)); nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain, ctx->afi->nops); diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c index 9e2ae424b64005555d5a7132ea1aa469dc4d0fc1..403432988313567358871d5f85a5d71fc0385051 100644 --- a/net/netfilter/nf_tables_netdev.c +++ b/net/netfilter/nf_tables_netdev.c @@ -128,7 +128,7 @@ static int nf_tables_netdev_event(struct notifier_block *this, list_for_each_entry(table, &afi->tables, list) { ctx.table = table; list_for_each_entry_safe(chain, nr, &table->chains, list) { - if (!(chain->flags & NFT_BASE_CHAIN)) + if (!nft_is_base_chain(chain)) continue; ctx.chain = chain; diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 12eb9041dca284a398a2c1ac64924ba773a18489..e1b15e7a5793f6b877f63a95dc20ce2626caf7f5 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -169,7 +169,7 @@ void nft_trace_notify(struct nft_traceinfo *info) struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int size; - int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_TRACE; + u16 event; if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE)) return; @@ -198,6 +198,7 @@ void nft_trace_notify(struct nft_traceinfo *info) if (!skb) return; + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_TRACE); nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0); if (!nlh) goto nla_put_failure; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 68eda920160e12f3cb904dad08b7f6cc9422571e..80f5ecf2c3d7096f579361663da5d2e651eebd3e 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -148,7 +148,8 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, EXPORT_SYMBOL_GPL(nfnetlink_unicast); /* Process one complete nfnetlink message. */ -static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); const struct nfnl_callback *nc; @@ -191,8 +192,8 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) int attrlen = nlh->nlmsg_len - min_len; __u8 subsys_id = NFNL_SUBSYS_ID(type); - err = nla_parse(cda, ss->cb[cb_id].attr_count, - attr, attrlen, ss->cb[cb_id].policy); + err = nla_parse(cda, ss->cb[cb_id].attr_count, attr, attrlen, + ss->cb[cb_id].policy, extack); if (err < 0) { rcu_read_unlock(); return err; @@ -261,7 +262,7 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) struct nfnl_err *nfnl_err, *next; list_for_each_entry_safe(nfnl_err, next, err_list, head) { - netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); + netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, NULL); nfnl_err_del(nfnl_err); } } @@ -284,13 +285,13 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, int err; if (subsys_id >= NFNL_SUBSYS_COUNT) - return netlink_ack(skb, nlh, -EINVAL); + return netlink_ack(skb, nlh, -EINVAL, NULL); replay: status = 0; skb = netlink_skb_clone(oskb, GFP_KERNEL); if (!skb) - return netlink_ack(oskb, nlh, -ENOMEM); + return netlink_ack(oskb, nlh, -ENOMEM, NULL); nfnl_lock(subsys_id); ss = nfnl_dereference_protected(subsys_id); @@ -304,20 +305,20 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, #endif { nfnl_unlock(subsys_id); - netlink_ack(oskb, nlh, -EOPNOTSUPP); + netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); return kfree_skb(skb); } } if (!ss->commit || !ss->abort) { nfnl_unlock(subsys_id); - netlink_ack(oskb, nlh, -EOPNOTSUPP); + netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); return kfree_skb(skb); } if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) { nfnl_unlock(subsys_id); - netlink_ack(oskb, nlh, -ERESTART); + netlink_ack(oskb, nlh, -ERESTART, NULL); return kfree_skb(skb); } @@ -376,8 +377,8 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *attr = (void *)nlh + min_len; int attrlen = nlh->nlmsg_len - min_len; - err = nla_parse(cda, ss->cb[cb_id].attr_count, - attr, attrlen, ss->cb[cb_id].policy); + err = nla_parse(cda, ss->cb[cb_id].attr_count, attr, + attrlen, ss->cb[cb_id].policy, NULL); if (err < 0) goto ack; @@ -407,7 +408,8 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, * pointing to the batch header. */ nfnl_err_reset(&err_list); - netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM); + netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM, + NULL); status |= NFNL_BATCH_FAILURE; goto done; } @@ -465,9 +467,10 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) return; - err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy); + err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, + NULL); if (err < 0) { - netlink_ack(skb, nlh, err); + netlink_ack(skb, nlh, err, NULL); return; } if (cda[NFNL_BATCH_GENID]) @@ -493,14 +496,14 @@ static void nfnetlink_rcv(struct sk_buff *skb) return; if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { - netlink_ack(skb, nlh, -EPERM); + netlink_ack(skb, nlh, -EPERM, NULL); return; } if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) nfnetlink_rcv_skb_batch(skb, nlh); else - netlink_rcv_skb(skb, &nfnetlink_rcv_msg); + netlink_rcv_skb(skb, nfnetlink_rcv_msg); } #ifdef CONFIG_MODULES diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index d44d89b561275e25bb31fe2b0ed198d13995533c..9898fb4d0512ce80d0bf970340e5da43db25ce15 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,7 @@ struct nf_acct { atomic64_t bytes; unsigned long flags; struct list_head head; - atomic_t refcnt; + refcount_t refcnt; char name[NFACCT_NAME_MAX]; struct rcu_head rcu_head; char data[0]; @@ -123,7 +124,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl, atomic64_set(&nfacct->pkts, be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); } - atomic_set(&nfacct->refcnt, 1); + refcount_set(&nfacct->refcnt, 1); list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list); return 0; } @@ -138,7 +139,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, u64 pkts, bytes; u32 old_flags; - event |= NFNL_SUBSYS_ACCT << 8; + event = nfnl_msg_type(NFNL_SUBSYS_ACCT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -166,7 +167,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, NFACCT_PAD) || nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes), NFACCT_PAD) || - nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) + nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt)))) goto nla_put_failure; if (acct->flags & NFACCT_F_QUOTA) { u64 *quota = (u64 *)acct->data; @@ -243,7 +244,8 @@ nfacct_filter_alloc(const struct nlattr * const attr) struct nlattr *tb[NFACCT_FILTER_MAX + 1]; int err; - err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy); + err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy, + NULL); if (err < 0) return ERR_PTR(err); @@ -329,7 +331,7 @@ static int nfnl_acct_try_del(struct nf_acct *cur) /* We want to avoid races with nfnl_acct_put. So only when the current * refcnt is 1, we decrease it to 0. */ - if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) { + if (refcount_dec_if_one(&cur->refcnt)) { /* We are protected by nfnl mutex. */ list_del_rcu(&cur->head); kfree_rcu(cur, rcu_head); @@ -413,7 +415,7 @@ struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name) if (!try_module_get(THIS_MODULE)) goto err; - if (!atomic_inc_not_zero(&cur->refcnt)) { + if (!refcount_inc_not_zero(&cur->refcnt)) { module_put(THIS_MODULE); goto err; } @@ -429,7 +431,7 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get); void nfnl_acct_put(struct nf_acct *acct) { - if (atomic_dec_and_test(&acct->refcnt)) + if (refcount_dec_and_test(&acct->refcnt)) kfree_rcu(acct, rcu_head); module_put(THIS_MODULE); @@ -502,7 +504,7 @@ static void __net_exit nfnl_acct_net_exit(struct net *net) list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) { list_del_rcu(&cur->head); - if (atomic_dec_and_test(&cur->refcnt)) + if (refcount_dec_and_test(&cur->refcnt)) kfree_rcu(cur, rcu_head); } } diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index d45558178da5b62a8ad7c896e096c1862c512091..950bf6eadc6578516ac92b50427fe682cba3976d 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -77,7 +77,8 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, int err; struct nlattr *tb[NFCTH_TUPLE_MAX+1]; - err = nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol); + err = nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, + nfnl_cthelper_tuple_pol, NULL); if (err < 0) return err; @@ -104,7 +105,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) if (help->helper->data_len == 0) return -EINVAL; - memcpy(help->data, nla_data(attr), help->helper->data_len); + nla_memcpy(help->data, nla_data(attr), sizeof(help->data)); return 0; } @@ -137,7 +138,8 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy, int err; struct nlattr *tb[NFCTH_POLICY_MAX+1]; - err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol); + err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, + nfnl_cthelper_expect_pol, NULL); if (err < 0) return err; @@ -150,6 +152,9 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy, nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); expect_policy->max_expected = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); + if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) + return -EINVAL; + expect_policy->timeout = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); @@ -171,7 +176,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, unsigned int class_max; ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, - nfnl_cthelper_expect_policy_set); + nfnl_cthelper_expect_policy_set, NULL); if (ret < 0) return ret; @@ -213,6 +218,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[], { struct nf_conntrack_helper *helper; struct nfnl_cthelper *nfcth; + unsigned int size; int ret; if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) @@ -228,7 +234,12 @@ nfnl_cthelper_create(const struct nlattr * const tb[], goto err1; strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); - helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); + size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); + if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { + ret = -ENOMEM; + goto err2; + } + helper->flags |= NF_CT_HELPER_F_USERSPACE; memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple)); @@ -276,7 +287,7 @@ nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy, int err; err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, - nfnl_cthelper_expect_pol); + nfnl_cthelper_expect_pol, NULL); if (err < 0) return err; @@ -290,6 +301,9 @@ nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy, new_policy->max_expected = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); + if (new_policy->max_expected > NF_CT_EXPECT_MAX_CNT) + return -EINVAL; + new_policy->timeout = ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); @@ -336,7 +350,7 @@ static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper, int err; err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, - nfnl_cthelper_expect_policy_set); + nfnl_cthelper_expect_policy_set, NULL); if (err < 0) return err; @@ -501,7 +515,7 @@ nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, unsigned int flags = portid ? NLM_F_MULTI : 0; int status; - event |= NFNL_SUBSYS_CTHELPER << 8; + event = nfnl_msg_type(NFNL_SUBSYS_CTHELPER, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 47d6656c9119fd5c75fb1fb5f0bfd4bb49f1c38b..a3e7bb54d96acf2e7ac570077d57502d5b23625b 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -56,7 +56,8 @@ ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto, struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1]; ret = nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max, - attr, l4proto->ctnl_timeout.nla_policy); + attr, l4proto->ctnl_timeout.nla_policy, + NULL); if (ret < 0) return ret; @@ -138,7 +139,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl, strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME])); timeout->l3num = l3num; timeout->l4proto = l4proto; - atomic_set(&timeout->refcnt, 1); + refcount_set(&timeout->refcnt, 1); list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); return 0; @@ -158,7 +159,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, unsigned int flags = portid ? NLM_F_MULTI : 0; struct nf_conntrack_l4proto *l4proto = timeout->l4proto; - event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8; + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -172,7 +173,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || nla_put_be32(skb, CTA_TIMEOUT_USE, - htonl(atomic_read(&timeout->refcnt)))) + htonl(refcount_read(&timeout->refcnt)))) goto nla_put_failure; if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { @@ -339,7 +340,7 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout) /* We want to avoid races with ctnl_timeout_put. So only when the * current refcnt is 1, we decrease it to 0. */ - if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) { + if (refcount_dec_if_one(&timeout->refcnt)) { /* We are protected by nfnl mutex. */ list_del_rcu(&timeout->head); nf_ct_l4proto_put(timeout->l4proto); @@ -431,7 +432,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; - event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8; + event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; @@ -536,7 +537,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) if (!try_module_get(THIS_MODULE)) goto err; - if (!atomic_inc_not_zero(&timeout->refcnt)) { + if (!refcount_inc_not_zero(&timeout->refcnt)) { module_put(THIS_MODULE); goto err; } @@ -550,7 +551,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) static void ctnl_timeout_put(struct ctnl_timeout *timeout) { - if (atomic_dec_and_test(&timeout->refcnt)) + if (refcount_dec_and_test(&timeout->refcnt)) kfree_rcu(timeout, rcu_head); module_put(THIS_MODULE); @@ -601,7 +602,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) list_del_rcu(&cur->head); nf_ct_l4proto_put(cur->l4proto); - if (atomic_dec_and_test(&cur->refcnt)) + if (refcount_dec_and_test(&cur->refcnt)) kfree_rcu(cur, rcu_head); } } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 08247bf7d7b836828c8151ed07f438e05973c2c0..da9704971a83cd2f1f2808bd99061c880d9349f7 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -40,6 +40,8 @@ #include #include +#include + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #include "../bridge/br_private.h" @@ -57,7 +59,7 @@ struct nfulnl_instance { struct hlist_node hlist; /* global list of instances */ spinlock_t lock; - atomic_t use; /* use count */ + refcount_t use; /* use count */ unsigned int qlen; /* number of nlmsgs in skb */ struct sk_buff *skb; /* pre-allocatd skb */ @@ -115,7 +117,7 @@ __instance_lookup(struct nfnl_log_net *log, u_int16_t group_num) static inline void instance_get(struct nfulnl_instance *inst) { - atomic_inc(&inst->use); + refcount_inc(&inst->use); } static struct nfulnl_instance * @@ -125,7 +127,7 @@ instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num) rcu_read_lock_bh(); inst = __instance_lookup(log, group_num); - if (inst && !atomic_inc_not_zero(&inst->use)) + if (inst && !refcount_inc_not_zero(&inst->use)) inst = NULL; rcu_read_unlock_bh(); @@ -145,7 +147,7 @@ static void nfulnl_instance_free_rcu(struct rcu_head *head) static void instance_put(struct nfulnl_instance *inst) { - if (inst && atomic_dec_and_test(&inst->use)) + if (inst && refcount_dec_and_test(&inst->use)) call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); } @@ -180,7 +182,7 @@ instance_create(struct net *net, u_int16_t group_num, INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ - atomic_set(&inst->use, 2); + refcount_set(&inst->use, 2); setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); @@ -409,7 +411,7 @@ __build_packet_message(struct nfnl_log_net *log, const unsigned char *hwhdrp; nlh = nlmsg_put(inst->skb, 0, 0, - NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, + nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET), sizeof(struct nfgenmsg), 0); if (!nlh) return -1; @@ -801,7 +803,7 @@ static int nfulnl_recv_unsupp(struct net *net, struct sock *ctnl, static struct nf_logger nfulnl_logger __read_mostly = { .name = "nfnetlink_log", .type = NF_LOG_TYPE_ULOG, - .logfn = &nfulnl_log_packet, + .logfn = nfulnl_log_packet, .me = THIS_MODULE, }; @@ -1031,7 +1033,7 @@ static int seq_show(struct seq_file *s, void *v) inst->group_num, inst->peer_portid, inst->qlen, inst->copy_mode, inst->copy_range, - inst->flushtimeout, atomic_read(&inst->use)); + inst->flushtimeout, refcount_read(&inst->use)); return 0; } @@ -1138,10 +1140,10 @@ static int __init nfnetlink_log_init(void) static void __exit nfnetlink_log_fini(void) { - nf_log_unregister(&nfulnl_logger); nfnetlink_subsys_unregister(&nfulnl_subsys); netlink_unregister_notifier(&nfulnl_rtnl_notifier); unregister_pernet_subsys(&nfnl_log_net_ops); + nf_log_unregister(&nfulnl_logger); } MODULE_DESCRIPTION("netfilter userspace logging"); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 933509ebf3d3e2e84aecd55fd7f19f21f69e46d4..8a0f218b79380bb66176b54a7bcd54e43e909a0a 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -447,7 +447,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, } nlh = nlmsg_put(skb, 0, 0, - NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, + nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET), sizeof(struct nfgenmsg), 0); if (!nlh) { skb_tx_error(entskb); @@ -922,16 +922,10 @@ static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event, }; -static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long entry_ptr) -{ - return rcu_access_pointer(entry->hook) == - (struct nf_hook_entry *)entry_ptr; -} - -static void nfqnl_nf_hook_drop(struct net *net, - const struct nf_hook_entry *hook) +static unsigned int nfqnl_nf_hook_drop(struct net *net) { struct nfnl_queue_net *q = nfnl_queue_pernet(net); + unsigned int instances = 0; int i; rcu_read_lock(); @@ -939,10 +933,14 @@ static void nfqnl_nf_hook_drop(struct net *net, struct nfqnl_instance *inst; struct hlist_head *head = &q->instance_table[i]; - hlist_for_each_entry_rcu(inst, head, hlist) - nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook); + hlist_for_each_entry_rcu(inst, head, hlist) { + nfqnl_flush(inst, NULL, 0); + instances++; + } } rcu_read_unlock(); + + return instances; } static int @@ -1109,7 +1107,7 @@ static int nfqa_parse_bridge(struct nf_queue_entry *entry, int err; err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN], - nfqa_vlan_policy); + nfqa_vlan_policy, NULL); if (err < 0) return err; @@ -1213,8 +1211,8 @@ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { }; static const struct nf_queue_handler nfqh = { - .outfn = &nfqnl_enqueue_packet, - .nf_hook_drop = &nfqnl_nf_hook_drop, + .outfn = nfqnl_enqueue_packet, + .nf_hook_drop = nfqnl_nf_hook_drop, }; static int nfqnl_recv_config(struct net *net, struct sock *ctnl, diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index c21e7eb8dce02a6b73c5a466300ae793a2787b26..f753ec69f7902b80c9f448f764ecc1e48f6a2680 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -42,7 +42,8 @@ static int nft_compat_chain_validate_dependency(const char *tablename, { const struct nft_base_chain *basechain; - if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) + if (!tablename || + !nft_is_base_chain(chain)) return 0; basechain = nft_base_chain(chain); @@ -165,7 +166,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, par->entryinfo = entry; par->target = target; par->targinfo = info; - if (ctx->chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); const struct nf_hook_ops *ops = &basechain->ops[0]; @@ -200,7 +201,7 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) int err; err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr, - nft_rule_compat_policy); + nft_rule_compat_policy, NULL); if (err < 0) return err; @@ -230,10 +231,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; - ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); - if (ret < 0) - goto err; - target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -302,7 +299,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, unsigned int hook_mask = 0; int ret; - if (ctx->chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); const struct nf_hook_ops *ops = &basechain->ops[0]; @@ -383,7 +380,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, par->entryinfo = entry; par->match = match; par->matchinfo = info; - if (ctx->chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); const struct nf_hook_ops *ops = &basechain->ops[0]; @@ -419,10 +416,6 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; - ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); - if (ret < 0) - goto err; - match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -485,7 +478,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, unsigned int hook_mask = 0; int ret; - if (ctx->chain->flags & NFT_BASE_CHAIN) { + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); const struct nf_hook_ops *ops = &basechain->ops[0]; @@ -511,7 +504,7 @@ nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; - event |= NFNL_SUBSYS_NFT_COMPAT << 8; + event = nfnl_msg_type(NFNL_SUBSYS_NFT_COMPAT, event); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); if (nlh == NULL) goto nlmsg_failure; diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index 7f84222133414d4a65ed96508c83dea711295c9f..67a710ebde09da21464da4bb22a78a36c791274d 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c @@ -82,7 +82,8 @@ static int nft_counter_do_init(const struct nlattr * const tb[], return 0; } -static int nft_counter_obj_init(const struct nlattr * const tb[], +static int nft_counter_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], struct nft_object *obj) { struct nft_counter_percpu_priv *priv = nft_obj_data(obj); diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 0264258c46feb5071a8eebcf9299b4b717fd0a32..a34ceb38fc55681962daad2c323f0011a5fe683d 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -32,6 +32,12 @@ struct nft_ct { }; }; +struct nft_ct_helper_obj { + struct nf_conntrack_helper *helper4; + struct nf_conntrack_helper *helper6; + u8 l4proto; +}; + #ifdef CONFIG_NF_CONNTRACK_ZONES static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; @@ -66,12 +72,12 @@ static void nft_ct_get_eval(const struct nft_expr *expr, switch (priv->key) { case NFT_CT_STATE: - if (ct == NULL) - state = NF_CT_STATE_INVALID_BIT; - else if (nf_ct_is_untracked(ct)) + if (ct) + state = NF_CT_STATE_BIT(ctinfo); + else if (ctinfo == IP_CT_UNTRACKED) state = NF_CT_STATE_UNTRACKED_BIT; else - state = NF_CT_STATE_BIT(ctinfo); + state = NF_CT_STATE_INVALID_BIT; *dest = state; return; default: @@ -258,7 +264,7 @@ static void nft_ct_set_eval(const struct nft_expr *expr, struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL) + if (ct == NULL || nf_ct_is_template(ct)) return; switch (priv->key) { @@ -277,6 +283,22 @@ static void nft_ct_set_eval(const struct nft_expr *expr, ®s->data[priv->sreg], NF_CT_LABELS_MAX_SIZE / sizeof(u32)); break; +#endif +#ifdef CONFIG_NF_CONNTRACK_EVENTS + case NFT_CT_EVENTMASK: { + struct nf_conntrack_ecache *e = nf_ct_ecache_find(ct); + u32 ctmask = regs->data[priv->sreg]; + + if (e) { + if (e->ctmask != ctmask) + e->ctmask = ctmask; + break; + } + + if (ctmask && !nf_ct_is_confirmed(ct)) + nf_ct_ecache_ext_add(ct, ctmask, 0, GFP_ATOMIC); + break; + } #endif default: break; @@ -532,6 +554,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, nft_ct_pcpu_template_refcnt++; len = sizeof(u16); break; +#endif +#ifdef CONFIG_NF_CONNTRACK_EVENTS + case NFT_CT_EVENTMASK: + if (tb[NFTA_CT_DIRECTION]) + return -EINVAL; + len = sizeof(u32); + break; #endif default: return -EOPNOTSUPP; @@ -696,7 +725,7 @@ nft_ct_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_ct_type __read_mostly = { .name = "ct", - .select_ops = &nft_ct_select_ops, + .select_ops = nft_ct_select_ops, .policy = nft_ct_policy, .maxattr = NFTA_CT_MAX, .owner = THIS_MODULE, @@ -712,12 +741,10 @@ static void nft_notrack_eval(const struct nft_expr *expr, ct = nf_ct_get(pkt->skb, &ctinfo); /* Previously seen (loopback or untracked)? Ignore. */ - if (ct) + if (ct || ctinfo == IP_CT_UNTRACKED) return; - ct = nf_ct_untracked_get(); - atomic_inc(&ct->ct_general.use); - nf_ct_set(skb, ct, IP_CT_NEW); + nf_ct_set(skb, ct, IP_CT_UNTRACKED); } static struct nft_expr_type nft_notrack_type; @@ -733,6 +760,162 @@ static struct nft_expr_type nft_notrack_type __read_mostly = { .owner = THIS_MODULE, }; +static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_object *obj) +{ + struct nft_ct_helper_obj *priv = nft_obj_data(obj); + struct nf_conntrack_helper *help4, *help6; + char name[NF_CT_HELPER_NAME_LEN]; + int family = ctx->afi->family; + + if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO]) + return -EINVAL; + + priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]); + if (!priv->l4proto) + return -ENOENT; + + nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name)); + + if (tb[NFTA_CT_HELPER_L3PROTO]) + family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO])); + + help4 = NULL; + help6 = NULL; + + switch (family) { + case NFPROTO_IPV4: + if (ctx->afi->family == NFPROTO_IPV6) + return -EINVAL; + + help4 = nf_conntrack_helper_try_module_get(name, family, + priv->l4proto); + break; + case NFPROTO_IPV6: + if (ctx->afi->family == NFPROTO_IPV4) + return -EINVAL; + + help6 = nf_conntrack_helper_try_module_get(name, family, + priv->l4proto); + break; + case NFPROTO_NETDEV: /* fallthrough */ + case NFPROTO_BRIDGE: /* same */ + case NFPROTO_INET: + help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4, + priv->l4proto); + help6 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV6, + priv->l4proto); + break; + default: + return -EAFNOSUPPORT; + } + + /* && is intentional; only error if INET found neither ipv4 or ipv6 */ + if (!help4 && !help6) + return -ENOENT; + + priv->helper4 = help4; + priv->helper6 = help6; + + return 0; +} + +static void nft_ct_helper_obj_destroy(struct nft_object *obj) +{ + struct nft_ct_helper_obj *priv = nft_obj_data(obj); + + if (priv->helper4) + module_put(priv->helper4->me); + if (priv->helper6) + module_put(priv->helper6->me); +} + +static void nft_ct_helper_obj_eval(struct nft_object *obj, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_ct_helper_obj *priv = nft_obj_data(obj); + struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); + struct nf_conntrack_helper *to_assign = NULL; + struct nf_conn_help *help; + + if (!ct || + nf_ct_is_confirmed(ct) || + nf_ct_is_template(ct) || + priv->l4proto != nf_ct_protonum(ct)) + return; + + switch (nf_ct_l3num(ct)) { + case NFPROTO_IPV4: + to_assign = priv->helper4; + break; + case NFPROTO_IPV6: + to_assign = priv->helper6; + break; + default: + WARN_ON_ONCE(1); + return; + } + + if (!to_assign) + return; + + if (test_bit(IPS_HELPER_BIT, &ct->status)) + return; + + help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC); + if (help) { + rcu_assign_pointer(help->helper, to_assign); + set_bit(IPS_HELPER_BIT, &ct->status); + } +} + +static int nft_ct_helper_obj_dump(struct sk_buff *skb, + struct nft_object *obj, bool reset) +{ + const struct nft_ct_helper_obj *priv = nft_obj_data(obj); + const struct nf_conntrack_helper *helper = priv->helper4; + u16 family; + + if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) + return -1; + + if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) + return -1; + + if (priv->helper4 && priv->helper6) + family = NFPROTO_INET; + else if (priv->helper6) + family = NFPROTO_IPV6; + else + family = NFPROTO_IPV4; + + if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) + return -1; + + return 0; +} + +static const struct nla_policy nft_ct_helper_policy[NFTA_CT_HELPER_MAX + 1] = { + [NFTA_CT_HELPER_NAME] = { .type = NLA_STRING, + .len = NF_CT_HELPER_NAME_LEN - 1 }, + [NFTA_CT_HELPER_L3PROTO] = { .type = NLA_U16 }, + [NFTA_CT_HELPER_L4PROTO] = { .type = NLA_U8 }, +}; + +static struct nft_object_type nft_ct_helper_obj __read_mostly = { + .type = NFT_OBJECT_CT_HELPER, + .size = sizeof(struct nft_ct_helper_obj), + .maxattr = NFTA_CT_HELPER_MAX, + .policy = nft_ct_helper_policy, + .eval = nft_ct_helper_obj_eval, + .init = nft_ct_helper_obj_init, + .destroy = nft_ct_helper_obj_destroy, + .dump = nft_ct_helper_obj_dump, + .owner = THIS_MODULE, +}; + static int __init nft_ct_module_init(void) { int err; @@ -747,7 +930,14 @@ static int __init nft_ct_module_init(void) if (err < 0) goto err1; + err = nft_register_obj(&nft_ct_helper_obj); + if (err < 0) + goto err2; + return 0; + +err2: + nft_unregister_expr(&nft_notrack_type); err1: nft_unregister_expr(&nft_ct_type); return err; @@ -755,6 +945,7 @@ static int __init nft_ct_module_init(void) static void __exit nft_ct_module_exit(void) { + nft_unregister_obj(&nft_ct_helper_obj); nft_unregister_expr(&nft_notrack_type); nft_unregister_expr(&nft_ct_type); } @@ -766,3 +957,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_EXPR("ct"); MODULE_ALIAS_NFT_EXPR("notrack"); +MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER); diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 049ad2d9ee66959367a051903563dca6ba654edb..3948da380259538c2fd4823f65a9407241f8af4e 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -133,16 +133,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, priv->invert = true; } - set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME], - genmask); - if (IS_ERR(set)) { - if (tb[NFTA_DYNSET_SET_ID]) - set = nf_tables_set_lookup_byid(ctx->net, - tb[NFTA_DYNSET_SET_ID], - genmask); - if (IS_ERR(set)) - return PTR_ERR(set); - } + set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_DYNSET_SET_NAME], + tb[NFTA_DYNSET_SET_ID], genmask); + if (IS_ERR(set)) + return PTR_ERR(set); if (set->ops->update == NULL) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index c308920b194cdbe5e3a2e9a09cfb8aab7267f588..1ec49fe5845f1ec8f289f411d456a5765303e244 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -98,14 +98,21 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr, goto err; offset = i + priv->offset; - dest[priv->len / NFT_REG32_SIZE] = 0; - memcpy(dest, opt + offset, priv->len); + if (priv->flags & NFT_EXTHDR_F_PRESENT) { + *dest = 1; + } else { + dest[priv->len / NFT_REG32_SIZE] = 0; + memcpy(dest, opt + offset, priv->len); + } return; } err: - regs->verdict.code = NFT_BREAK; + if (priv->flags & NFT_EXTHDR_F_PRESENT) + *dest = 0; + else + regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { @@ -225,7 +232,7 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_exthdr_type __read_mostly = { .name = "exthdr", - .select_ops = &nft_exthdr_select_ops, + .select_ops = nft_exthdr_select_ops, .policy = nft_exthdr_policy, .maxattr = NFTA_EXTHDR_MAX, .owner = THIS_MODULE, diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c index 29a4906adc277cd3a2cf55242224e4c99fd070e7..21df8cccea6582e56d7bfbb6fba21f821b7c56d9 100644 --- a/net/netfilter/nft_fib.c +++ b/net/netfilter/nft_fib.c @@ -24,7 +24,8 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = { EXPORT_SYMBOL(nft_fib_policy); #define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \ - NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF) + NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF | \ + NFTA_FIB_F_PRESENT) int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) @@ -112,7 +113,7 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (err < 0) return err; - return nft_fib_validate(ctx, expr, NULL); + return 0; } EXPORT_SYMBOL_GPL(nft_fib_init); @@ -133,19 +134,22 @@ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr) } EXPORT_SYMBOL_GPL(nft_fib_dump); -void nft_fib_store_result(void *reg, enum nft_fib_result r, +void nft_fib_store_result(void *reg, const struct nft_fib *priv, const struct nft_pktinfo *pkt, int index) { struct net_device *dev; u32 *dreg = reg; - switch (r) { + switch (priv->result) { case NFT_FIB_RESULT_OIF: - *dreg = index; + *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index; break; case NFT_FIB_RESULT_OIFNAME: dev = dev_get_by_index_rcu(nft_net(pkt), index); - strncpy(reg, dev ? dev->name : "", IFNAMSIZ); + if (priv->flags & NFTA_FIB_F_PRESENT) + *dreg = !!dev; + else + strncpy(reg, dev ? dev->name : "", IFNAMSIZ); break; default: WARN_ON_ONCE(1); diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index c4dad1254ead01818fb5b2c0474b26f74762fee2..24f2f7567ddb779af44ee5050df36292f413d4d6 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -17,7 +17,7 @@ #include #include -struct nft_hash { +struct nft_jhash { enum nft_registers sreg:8; enum nft_registers dreg:8; u8 len; @@ -27,11 +27,11 @@ struct nft_hash { u32 offset; }; -static void nft_hash_eval(const struct nft_expr *expr, - struct nft_regs *regs, - const struct nft_pktinfo *pkt) +static void nft_jhash_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) { - struct nft_hash *priv = nft_expr_priv(expr); + struct nft_jhash *priv = nft_expr_priv(expr); const void *data = ®s->data[priv->sreg]; u32 h; @@ -39,6 +39,25 @@ static void nft_hash_eval(const struct nft_expr *expr, regs->data[priv->dreg] = h + priv->offset; } +struct nft_symhash { + enum nft_registers dreg:8; + u32 modulus; + u32 offset; +}; + +static void nft_symhash_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_symhash *priv = nft_expr_priv(expr); + struct sk_buff *skb = pkt->skb; + u32 h; + + h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus); + + regs->data[priv->dreg] = h + priv->offset; +} + static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { [NFTA_HASH_SREG] = { .type = NLA_U32 }, [NFTA_HASH_DREG] = { .type = NLA_U32 }, @@ -46,13 +65,14 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, [NFTA_HASH_SEED] = { .type = NLA_U32 }, [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, + [NFTA_HASH_TYPE] = { .type = NLA_U32 }, }; -static int nft_hash_init(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nlattr * const tb[]) +static int nft_jhash_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) { - struct nft_hash *priv = nft_expr_priv(expr); + struct nft_jhash *priv = nft_expr_priv(expr); u32 len; int err; @@ -95,10 +115,36 @@ static int nft_hash_init(const struct nft_ctx *ctx, NFT_DATA_VALUE, sizeof(u32)); } -static int nft_hash_dump(struct sk_buff *skb, - const struct nft_expr *expr) +static int nft_symhash_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_symhash *priv = nft_expr_priv(expr); + + if (!tb[NFTA_HASH_DREG] || + !tb[NFTA_HASH_MODULUS]) + return -EINVAL; + + if (tb[NFTA_HASH_OFFSET]) + priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); + + priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); + + priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); + if (priv->modulus <= 1) + return -ERANGE; + + if (priv->offset + priv->modulus - 1 < priv->offset) + return -EOVERFLOW; + + return nft_validate_register_store(ctx, priv->dreg, NULL, + NFT_DATA_VALUE, sizeof(u32)); +} + +static int nft_jhash_dump(struct sk_buff *skb, + const struct nft_expr *expr) { - const struct nft_hash *priv = nft_expr_priv(expr); + const struct nft_jhash *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg)) goto nla_put_failure; @@ -114,6 +160,28 @@ static int nft_hash_dump(struct sk_buff *skb, if (priv->offset != 0) if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) goto nla_put_failure; + if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS))) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -1; +} + +static int nft_symhash_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_symhash *priv = nft_expr_priv(expr); + + if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg)) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) + goto nla_put_failure; + if (priv->offset != 0) + if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM))) + goto nla_put_failure; return 0; nla_put_failure: @@ -121,17 +189,46 @@ static int nft_hash_dump(struct sk_buff *skb, } static struct nft_expr_type nft_hash_type; -static const struct nft_expr_ops nft_hash_ops = { +static const struct nft_expr_ops nft_jhash_ops = { .type = &nft_hash_type, - .size = NFT_EXPR_SIZE(sizeof(struct nft_hash)), - .eval = nft_hash_eval, - .init = nft_hash_init, - .dump = nft_hash_dump, + .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)), + .eval = nft_jhash_eval, + .init = nft_jhash_init, + .dump = nft_jhash_dump, }; +static const struct nft_expr_ops nft_symhash_ops = { + .type = &nft_hash_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)), + .eval = nft_symhash_eval, + .init = nft_symhash_init, + .dump = nft_symhash_dump, +}; + +static const struct nft_expr_ops * +nft_hash_select_ops(const struct nft_ctx *ctx, + const struct nlattr * const tb[]) +{ + u32 type; + + if (!tb[NFTA_HASH_TYPE]) + return &nft_jhash_ops; + + type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE])); + switch (type) { + case NFT_HASH_SYM: + return &nft_symhash_ops; + case NFT_HASH_JENKINS: + return &nft_jhash_ops; + default: + break; + } + return ERR_PTR(-EOPNOTSUPP); +} + static struct nft_expr_type nft_hash_type __read_mostly = { .name = "hash", - .ops = &nft_hash_ops, + .select_ops = nft_hash_select_ops, .policy = nft_hash_policy, .maxattr = NFTA_HASH_MAX, .owner = THIS_MODULE, diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index c6baf412236d662b0d165fec3b4ff26579c9c6d8..18dd57a526513bd726944fa7ad7a9e41fcfb0251 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -17,9 +17,8 @@ #include #include -static DEFINE_SPINLOCK(limit_lock); - struct nft_limit { + spinlock_t lock; u64 last; u64 tokens; u64 tokens_max; @@ -34,7 +33,7 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) u64 now, tokens; s64 delta; - spin_lock_bh(&limit_lock); + spin_lock_bh(&limit->lock); now = ktime_get_ns(); tokens = limit->tokens + now - limit->last; if (tokens > limit->tokens_max) @@ -44,11 +43,11 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) delta = tokens - cost; if (delta >= 0) { limit->tokens = delta; - spin_unlock_bh(&limit_lock); + spin_unlock_bh(&limit->lock); return limit->invert; } limit->tokens = tokens; - spin_unlock_bh(&limit_lock); + spin_unlock_bh(&limit->lock); return !limit->invert; } @@ -86,6 +85,7 @@ static int nft_limit_init(struct nft_limit *limit, limit->invert = true; } limit->last = ktime_get_ns(); + spin_lock_init(&limit->lock); return 0; } diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index e21aea7e5ec8f141ea3155d1da3c491484c00a73..475570e89ede710b323868792bb16fd5f09d1b09 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -71,16 +71,10 @@ static int nft_lookup_init(const struct nft_ctx *ctx, tb[NFTA_LOOKUP_SREG] == NULL) return -EINVAL; - set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET], genmask); - if (IS_ERR(set)) { - if (tb[NFTA_LOOKUP_SET_ID]) { - set = nf_tables_set_lookup_byid(ctx->net, - tb[NFTA_LOOKUP_SET_ID], - genmask); - } - if (IS_ERR(set)) - return PTR_ERR(set); - } + set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET], + tb[NFTA_LOOKUP_SET_ID], genmask); + if (IS_ERR(set)) + return PTR_ERR(set); if (set->flags & NFT_SET_EVAL) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 11ce016cd47948f3a2902402e0cbbda5e22d9438..6ac03d4266c9038cb4ffd3de412a216b769d07a1 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -46,10 +46,6 @@ int nft_masq_init(const struct nft_ctx *ctx, struct nft_masq *priv = nft_expr_priv(expr); int err; - err = nft_masq_validate(ctx, expr, NULL); - if (err) - return err; - if (tb[NFTA_MASQ_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); if (priv->flags & ~NF_NAT_RANGE_MASK) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 7b60e01f38ff9f2f9fa7d28f6f99b4f889d190d7..5a60eb23a7ed8cc82cc2500c7f8c116ee6df2f30 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -372,10 +372,6 @@ int nft_meta_set_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - err = nft_meta_set_validate(ctx, expr, NULL); - if (err < 0) - return err; - priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); err = nft_validate_register_load(priv->sreg, len); if (err < 0) @@ -471,7 +467,7 @@ nft_meta_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_meta_type __read_mostly = { .name = "meta", - .select_ops = &nft_meta_select_ops, + .select_ops = nft_meta_select_ops, .policy = nft_meta_policy, .maxattr = NFTA_META_MAX, .owner = THIS_MODULE, diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 439e0bd152a004c98664a19ae6c920458fa6160a..ed548d06b6dda9a98888bb83f2baa6b45c965c15 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -138,10 +138,6 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; } - err = nft_nat_validate(ctx, expr, NULL); - if (err < 0) - return err; - if (tb[NFTA_NAT_FAMILY] == NULL) return -EINVAL; diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index a66b36097b8f4f3cc1d230d9943be852866fcfbd..5a3a52c71545ac15ae5c906dbf53d6d425c3c557 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -188,7 +188,7 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) static struct nft_expr_type nft_ng_type __read_mostly = { .name = "numgen", - .select_ops = &nft_ng_select_ops, + .select_ops = nft_ng_select_ops, .policy = nft_ng_policy, .maxattr = NFTA_NG_MAX, .owner = THIS_MODULE, diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 1ae8c49ca4a1fac06f69c41f68a36b7e85593adb..1dd428fbaaa3f836e7bcecb50355f60c14d8faad 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -116,16 +116,10 @@ static int nft_objref_map_init(const struct nft_ctx *ctx, struct nft_set *set; int err; - set = nf_tables_set_lookup(ctx->table, tb[NFTA_OBJREF_SET_NAME], genmask); - if (IS_ERR(set)) { - if (tb[NFTA_OBJREF_SET_ID]) { - set = nf_tables_set_lookup_byid(ctx->net, - tb[NFTA_OBJREF_SET_ID], - genmask); - } - if (IS_ERR(set)) - return PTR_ERR(set); - } + set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_OBJREF_SET_NAME], + tb[NFTA_OBJREF_SET_ID], genmask); + if (IS_ERR(set)) + return PTR_ERR(set); if (!(set->flags & NFT_SET_OBJECT)) return -EINVAL; diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index dbb6aaff67ec5c151f8c8c6333721421f8e85076..98613658d4ac5ce359e946efca4c93d9df216817 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -197,7 +197,7 @@ nft_queue_select_ops(const struct nft_ctx *ctx, static struct nft_expr_type nft_queue_type __read_mostly = { .name = "queue", - .select_ops = &nft_queue_select_ops, + .select_ops = nft_queue_select_ops, .policy = nft_queue_policy, .maxattr = NFTA_QUEUE_MAX, .owner = THIS_MODULE, diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index 2d6fe3559912674385e7679557fc31ddeb901b38..25e33159be57882fcf9875725079188dfaa7d113 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -99,7 +99,8 @@ static int nft_quota_do_init(const struct nlattr * const tb[], return 0; } -static int nft_quota_obj_init(const struct nlattr * const tb[], +static int nft_quota_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], struct nft_object *obj) { struct nft_quota *priv = nft_obj_data(obj); diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 40dcd05146d5fb0f346e6e170d16de9813e92804..1e66538bf0ff24e3286ec6312e4d593c6197bd9b 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -47,10 +47,6 @@ int nft_redir_init(const struct nft_ctx *ctx, unsigned int plen; int err; - err = nft_redir_validate(ctx, expr, NULL); - if (err < 0) - return err; - plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); if (tb[NFTA_REDIR_REG_PROTO_MIN]) { priv->sreg_proto_min = diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index c64de3f7379df551fa413a4af186f3c16886f112..29f5bd2377b0deaf7ede8ec0573bf71cfeef7478 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -42,11 +42,6 @@ int nft_reject_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_reject *priv = nft_expr_priv(expr); - int err; - - err = nft_reject_validate(ctx, expr, NULL); - if (err < 0) - return err; if (tb[NFTA_REJECT_TYPE] == NULL) return -EINVAL; diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 9e90a02cb104dad81daf84208902e90390a8f504..5a7fb5ff867d382f04633a2fab53997d0ca1f2b2 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -66,11 +66,7 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_reject *priv = nft_expr_priv(expr); - int icmp_code, err; - - err = nft_reject_validate(ctx, expr, NULL); - if (err < 0) - return err; + int icmp_code; if (tb[NFTA_REJECT_TYPE] == NULL) return -EINVAL; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 5f652720fc78e6de437c100d31524588e729001c..8ec086b6b56b742485e34511b38a77af848d9f99 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -352,7 +352,7 @@ static int nft_hash_init(const struct nft_set *set, static void nft_hash_elem_destroy(void *ptr, void *arg) { - nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); + nft_set_elem_destroy(arg, ptr, true); } static void nft_hash_destroy(const struct nft_set *set) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 78dfbf9588b368107bdc385c7ef208a9abd3d297..e97e2fb53f0a107b0361322be10f16b4ab4b5d32 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -18,9 +18,8 @@ #include #include -static DEFINE_SPINLOCK(nft_rbtree_lock); - struct nft_rbtree { + rwlock_t lock; struct rb_root root; }; @@ -44,14 +43,14 @@ static bool nft_rbtree_equal(const struct nft_set *set, const void *this, static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext) { - const struct nft_rbtree *priv = nft_set_priv(set); + struct nft_rbtree *priv = nft_set_priv(set); const struct nft_rbtree_elem *rbe, *interval = NULL; u8 genmask = nft_genmask_cur(net); const struct rb_node *parent; const void *this; int d; - spin_lock_bh(&nft_rbtree_lock); + read_lock_bh(&priv->lock); parent = priv->root.rb_node; while (parent != NULL) { rbe = rb_entry(parent, struct nft_rbtree_elem, node); @@ -75,7 +74,7 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, } if (nft_rbtree_interval_end(rbe)) goto out; - spin_unlock_bh(&nft_rbtree_lock); + read_unlock_bh(&priv->lock); *ext = &rbe->ext; return true; @@ -85,12 +84,12 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && !nft_rbtree_interval_end(interval)) { - spin_unlock_bh(&nft_rbtree_lock); + read_unlock_bh(&priv->lock); *ext = &interval->ext; return true; } out: - spin_unlock_bh(&nft_rbtree_lock); + read_unlock_bh(&priv->lock); return false; } @@ -140,12 +139,13 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, struct nft_set_ext **ext) { + struct nft_rbtree *priv = nft_set_priv(set); struct nft_rbtree_elem *rbe = elem->priv; int err; - spin_lock_bh(&nft_rbtree_lock); + write_lock_bh(&priv->lock); err = __nft_rbtree_insert(net, set, rbe, ext); - spin_unlock_bh(&nft_rbtree_lock); + write_unlock_bh(&priv->lock); return err; } @@ -157,9 +157,9 @@ static void nft_rbtree_remove(const struct net *net, struct nft_rbtree *priv = nft_set_priv(set); struct nft_rbtree_elem *rbe = elem->priv; - spin_lock_bh(&nft_rbtree_lock); + write_lock_bh(&priv->lock); rb_erase(&rbe->node, &priv->root); - spin_unlock_bh(&nft_rbtree_lock); + write_unlock_bh(&priv->lock); } static void nft_rbtree_activate(const struct net *net, @@ -224,12 +224,12 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { - const struct nft_rbtree *priv = nft_set_priv(set); + struct nft_rbtree *priv = nft_set_priv(set); struct nft_rbtree_elem *rbe; struct nft_set_elem elem; struct rb_node *node; - spin_lock_bh(&nft_rbtree_lock); + read_lock_bh(&priv->lock); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rbe = rb_entry(node, struct nft_rbtree_elem, node); @@ -242,13 +242,13 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, iter->err = iter->fn(ctx, set, iter, &elem); if (iter->err < 0) { - spin_unlock_bh(&nft_rbtree_lock); + read_unlock_bh(&priv->lock); return; } cont: iter->count++; } - spin_unlock_bh(&nft_rbtree_lock); + read_unlock_bh(&priv->lock); } static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) @@ -262,6 +262,7 @@ static int nft_rbtree_init(const struct nft_set *set, { struct nft_rbtree *priv = nft_set_priv(set); + rwlock_init(&priv->lock); priv->root = RB_ROOT; return 0; } diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index b008db0184b8a2a1679d5d4737427a672c36b560..3cbe1bcf6a742c6d74fa455daa7f1cb7bf56ba57 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -26,11 +26,12 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) if (skb->_nfct != 0) return XT_CONTINUE; - /* special case the untracked ct : we want the percpu object */ - if (!ct) - ct = nf_ct_untracked_get(); - atomic_inc(&ct->ct_general.use); - nf_ct_set(skb, ct, IP_CT_NEW); + if (ct) { + atomic_inc(&ct->ct_general.use); + nf_ct_set(skb, ct, IP_CT_NEW); + } else { + nf_ct_set(skb, ct, IP_CT_UNTRACKED); + } return XT_CONTINUE; } @@ -335,7 +336,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par, struct nf_conn *ct = info->ct; struct nf_conn_help *help; - if (ct && !nf_ct_is_untracked(ct)) { + if (ct) { help = nfct_help(ct); if (help) module_put(help->helper->me); @@ -412,8 +413,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) if (skb->_nfct != 0) return XT_CONTINUE; - nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); - nf_conntrack_get(skb_nfct(skb)); + nf_ct_set(skb, NULL, IP_CT_UNTRACKED); return XT_CONTINUE; } diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 02afaf48a7290b15e6f2d56b2ca2c5cb07455b4d..60e6dbe124605569f249f0e15c7304b231fe1500 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c @@ -84,7 +84,7 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, struct nf_conntrack_tuple *otuple; struct nf_conntrack_tuple *rtuple; - if (ct == NULL || nf_ct_is_untracked(ct)) + if (ct == NULL) return -1; otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 9a9884a39c0e9cc806a3a6b0b16bbd387a26d220..57ef175dfbfaa86db8368c10fbbd51ba3b2a691c 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -121,9 +121,6 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) if (ct == NULL) return false; - if (nf_ct_is_untracked(ct)) - return false; - if (ct->master) hash = xt_cluster_hash(ct->master, info); else diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c index 7827128d5a95f5faf699d49c5dac796d03744a51..23372879e6e3004398454f6c96944422db8e1cc7 100644 --- a/net/netfilter/xt_connlabel.c +++ b/net/netfilter/xt_connlabel.c @@ -29,7 +29,7 @@ connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par) bool invert = info->options & XT_CONNLABEL_OP_INVERT; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL || nf_ct_is_untracked(ct)) + if (ct == NULL) return invert; labels = nf_ct_labels_find(ct); diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 9935d5029b0e52735b45bca017e52dd72f593c1e..ec377cc6a369c71025136fc4b8b3cf3fce177eea 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) u_int32_t newmark; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL || nf_ct_is_untracked(ct)) + if (ct == NULL) return XT_CONTINUE; switch (info->mode) { @@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) const struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL || nf_ct_is_untracked(ct)) + if (ct == NULL) return false; return ((ct->mark & info->mask) == info->mark) ^ info->invert; diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index c0fb217bc64969bd0da2c656d4418f8c80b29d14..39cf1d019240e61f504bc8ced96a63c41c9bd839 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c @@ -172,12 +172,11 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, ct = nf_ct_get(skb, &ctinfo); - if (ct) { - if (nf_ct_is_untracked(ct)) - statebit = XT_CONNTRACK_STATE_UNTRACKED; - else - statebit = XT_CONNTRACK_STATE_BIT(ctinfo); - } else + if (ct) + statebit = XT_CONNTRACK_STATE_BIT(ctinfo); + else if (ctinfo == IP_CT_UNTRACKED) + statebit = XT_CONNTRACK_STATE_UNTRACKED; + else statebit = XT_CONNTRACK_STATE_INVALID; if (info->match_flags & XT_CONNTRACK_STATE) { diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 2a6dfe8b74d37f7d3d8ed0569650f6417b76d58a..762e1874f28b7b1faba5b48e94f743c0414689f3 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -119,7 +119,7 @@ static int cfg_copy(struct hashlimit_cfg2 *to, void *from, int revision) { if (revision == 1) { - struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from; + struct hashlimit_cfg1 *cfg = from; to->mode = cfg->mode; to->avg = cfg->avg; @@ -895,7 +895,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct xt_hashlimit_htable *htable = s->private; - unsigned int *bucket = (unsigned int *)v; + unsigned int *bucket = v; *pos = ++(*bucket); if (*pos >= htable->cfg.size) { @@ -909,7 +909,7 @@ static void dl_seq_stop(struct seq_file *s, void *v) __releases(htable->lock) { struct xt_hashlimit_htable *htable = s->private; - unsigned int *bucket = (unsigned int *)v; + unsigned int *bucket = v; if (!IS_ERR(bucket)) kfree(bucket); @@ -980,7 +980,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_show_v1(struct seq_file *s, void *v) { struct xt_hashlimit_htable *htable = s->private; - unsigned int *bucket = (unsigned int *)v; + unsigned int *bucket = v; struct dsthash_ent *ent; if (!hlist_empty(&htable->hash[*bucket])) { @@ -994,7 +994,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v) static int dl_seq_show(struct seq_file *s, void *v) { struct xt_hashlimit_htable *htable = s->private; - unsigned int *bucket = (unsigned int *)v; + unsigned int *bucket = v; struct dsthash_ent *ent; if (!hlist_empty(&htable->hash[*bucket])) { diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c index 0fdc89064488050bf4e667b1c09a26169a326872..42540d26c2b8ec34ab62422fe5894588df2306b6 100644 --- a/net/netfilter/xt_ipvs.c +++ b/net/netfilter/xt_ipvs.c @@ -116,7 +116,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par) enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL || nf_ct_is_untracked(ct)) { + if (ct == NULL) { match = false; goto out_put_cp; } diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index dab962df178795612580a1c8e22257213bdab07d..d27b5f1ea619f9696912b58bd5012358206725d7 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -18,6 +18,7 @@ #include struct xt_limit_priv { + spinlock_t lock; unsigned long prev; uint32_t credit; }; @@ -32,8 +33,6 @@ MODULE_ALIAS("ip6t_limit"); * see net/sched/sch_tbf.c in the linux source tree */ -static DEFINE_SPINLOCK(limit_lock); - /* Rusty: This is my (non-mathematically-inclined) understanding of this algorithm. The `average rate' in jiffies becomes your initial amount of credit `credit' and the most credit you can ever have @@ -72,7 +71,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) struct xt_limit_priv *priv = r->master; unsigned long now = jiffies; - spin_lock_bh(&limit_lock); + spin_lock_bh(&priv->lock); priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; if (priv->credit > r->credit_cap) priv->credit = r->credit_cap; @@ -80,11 +79,11 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) if (priv->credit >= r->cost) { /* We're not limited. */ priv->credit -= r->cost; - spin_unlock_bh(&limit_lock); + spin_unlock_bh(&priv->lock); return true; } - spin_unlock_bh(&limit_lock); + spin_unlock_bh(&priv->lock); return false; } @@ -126,6 +125,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par) r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } + spin_lock_init(&priv->lock); + return 0; } diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 1d89a4eaf841a33e95247badfba064ce7dad0f63..37d581a31cffcee58cb73209dd53d0a18e3fc68f 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -532,7 +532,7 @@ static int recent_seq_show(struct seq_file *seq, void *v) &e->addr.in6, e->ttl, e->stamps[i], e->index); for (i = 0; i < e->nstamps; i++) seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]); - seq_printf(seq, "\n"); + seq_putc(seq, '\n'); return 0; } diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c index 5746a33789a50ced02746d57403970872f2cf29f..5fbd79194d21ee1f26fa669162b8de92d965f8c8 100644 --- a/net/netfilter/xt_state.c +++ b/net/netfilter/xt_state.c @@ -28,14 +28,13 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par) unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - if (!ct) + if (ct) + statebit = XT_STATE_BIT(ctinfo); + else if (ctinfo == IP_CT_UNTRACKED) + statebit = XT_STATE_UNTRACKED; + else statebit = XT_STATE_INVALID; - else { - if (nf_ct_is_untracked(ct)) - statebit = XT_STATE_UNTRACKED; - else - statebit = XT_STATE_BIT(ctinfo); - } + return (sinfo->statemask & statebit); } diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 4149d3e6358976f093dbcb25dee79d10f6275231..9aacf2da3d98feaf94319f7315b255323f4416f2 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -101,7 +101,7 @@ static int netlbl_cipsov4_add_common(struct genl_info *info, if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_TAGLST], NLBL_CIPSOV4_A_MAX, - netlbl_cipsov4_genl_policy) != 0) + netlbl_cipsov4_genl_policy, NULL) != 0) return -EINVAL; nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) @@ -148,7 +148,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], NLBL_CIPSOV4_A_MAX, - netlbl_cipsov4_genl_policy) != 0) + netlbl_cipsov4_genl_policy, NULL) != 0) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); @@ -170,10 +170,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSLVL) { - if (nla_validate_nested(nla_a, - NLBL_CIPSOV4_A_MAX, - netlbl_cipsov4_genl_policy) != 0) - goto add_std_failure; + if (nla_validate_nested(nla_a, NLBL_CIPSOV4_A_MAX, + netlbl_cipsov4_genl_policy, + NULL) != 0) + goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_type(nla_b)) { case NLBL_CIPSOV4_A_MLSLVLLOC: @@ -236,7 +236,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, if (info->attrs[NLBL_CIPSOV4_A_MLSCATLST]) { if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_MLSCATLST], NLBL_CIPSOV4_A_MAX, - netlbl_cipsov4_genl_policy) != 0) + netlbl_cipsov4_genl_policy, NULL) != 0) goto add_std_failure; nla_for_each_nested(nla_a, @@ -244,8 +244,9 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSCAT) { if (nla_validate_nested(nla_a, - NLBL_CIPSOV4_A_MAX, - netlbl_cipsov4_genl_policy) != 0) + NLBL_CIPSOV4_A_MAX, + netlbl_cipsov4_genl_policy, + NULL) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_type(nla_b)) { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 596eaff66649e5955d6c0f349f062b6d8360dc2d..ee841f00a6ec715914fb14bb31dc8405f8dd4c4e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -78,14 +78,6 @@ struct listeners { /* state bits */ #define NETLINK_S_CONGESTED 0x0 -/* flags */ -#define NETLINK_F_KERNEL_SOCKET 0x1 -#define NETLINK_F_RECV_PKTINFO 0x2 -#define NETLINK_F_BROADCAST_SEND_ERROR 0x4 -#define NETLINK_F_RECV_NO_ENOBUFS 0x8 -#define NETLINK_F_LISTEN_ALL_NSID 0x10 -#define NETLINK_F_CAP_ACK 0x20 - static inline int netlink_is_kernel(struct sock *sk) { return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; @@ -1660,6 +1652,13 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, nlk->flags &= ~NETLINK_F_CAP_ACK; err = 0; break; + case NETLINK_EXT_ACK: + if (val) + nlk->flags |= NETLINK_F_EXT_ACK; + else + nlk->flags &= ~NETLINK_F_EXT_ACK; + err = 0; + break; default: err = -ENOPROTOOPT; } @@ -1744,6 +1743,15 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname, return -EFAULT; err = 0; break; + case NETLINK_EXT_ACK: + if (len < sizeof(int)) + return -EINVAL; + len = sizeof(int); + val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0; + if (put_user(len, optlen) || put_user(val, optval)) + return -EFAULT; + err = 0; + break; default: err = -ENOPROTOOPT; } @@ -2275,21 +2283,44 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, } EXPORT_SYMBOL(__netlink_dump_start); -void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) +void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); + size_t tlvlen = 0; struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); + unsigned int flags = 0; /* Error messages get the original request appened, unless the user - * requests to cap the error message. + * requests to cap the error message, and get extra error data if + * requested. */ - if (!(nlk->flags & NETLINK_F_CAP_ACK) && err) - payload += nlmsg_len(nlh); + if (err) { + if (!(nlk->flags & NETLINK_F_CAP_ACK)) + payload += nlmsg_len(nlh); + else + flags |= NLM_F_CAPPED; + if (nlk->flags & NETLINK_F_EXT_ACK && extack) { + if (extack->_msg) + tlvlen += nla_total_size(strlen(extack->_msg) + 1); + if (extack->bad_attr) + tlvlen += nla_total_size(sizeof(u32)); + } + } else { + flags |= NLM_F_CAPPED; + + if (nlk->flags & NETLINK_F_EXT_ACK && + extack && extack->cookie_len) + tlvlen += nla_total_size(extack->cookie_len); + } + + if (tlvlen) + flags |= NLM_F_ACK_TLVS; - skb = nlmsg_new(payload, GFP_KERNEL); + skb = nlmsg_new(payload + tlvlen, GFP_KERNEL); if (!skb) { struct sock *sk; @@ -2305,17 +2336,42 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) } rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, - NLMSG_ERROR, payload, 0); + NLMSG_ERROR, payload, flags); errmsg = nlmsg_data(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); + + if (nlk->flags & NETLINK_F_EXT_ACK && extack) { + if (err) { + if (extack->_msg) + WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, + extack->_msg)); + if (extack->bad_attr && + !WARN_ON((u8 *)extack->bad_attr < in_skb->data || + (u8 *)extack->bad_attr >= in_skb->data + + in_skb->len)) + WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, + (u8 *)extack->bad_attr - + in_skb->data)); + } else { + if (extack->cookie_len) + WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, + extack->cookie_len, + extack->cookie)); + } + } + + nlmsg_end(skb, rep); + netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); } EXPORT_SYMBOL(netlink_ack); int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, - struct nlmsghdr *)) + struct nlmsghdr *, + struct netlink_ext_ack *)) { + struct netlink_ext_ack extack = {}; struct nlmsghdr *nlh; int err; @@ -2336,13 +2392,13 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; - err = cb(skb, nlh); + err = cb(skb, nlh, &extack); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) - netlink_ack(skb, nlh, err); + netlink_ack(skb, nlh, err, &extack); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 4fdb3831897775547f77c069a8018c0d2a253c8c..3490f2430532cf753118e14883b52c4af151b18c 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -6,6 +6,15 @@ #include #include +/* flags */ +#define NETLINK_F_KERNEL_SOCKET 0x1 +#define NETLINK_F_RECV_PKTINFO 0x2 +#define NETLINK_F_BROADCAST_SEND_ERROR 0x4 +#define NETLINK_F_RECV_NO_ENOBUFS 0x8 +#define NETLINK_F_LISTEN_ALL_NSID 0x10 +#define NETLINK_F_CAP_ACK 0x20 +#define NETLINK_F_EXT_ACK 0x40 + #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) diff --git a/net/netlink/diag.c b/net/netlink/diag.c index a5546249fb1022b52144a40717b8a4268755b972..8faa20b4d4573f3e2b510f03f3d4a85a70352559 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -19,6 +19,27 @@ static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) nlk->groups); } +static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb) +{ + struct netlink_sock *nlk = nlk_sk(sk); + u32 flags = 0; + + if (nlk->cb_running) + flags |= NDIAG_FLAG_CB_RUNNING; + if (nlk->flags & NETLINK_F_RECV_PKTINFO) + flags |= NDIAG_FLAG_PKTINFO; + if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR) + flags |= NDIAG_FLAG_BROADCAST_ERROR; + if (nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) + flags |= NDIAG_FLAG_NO_ENOBUFS; + if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID) + flags |= NDIAG_FLAG_LISTEN_ALL_NSID; + if (nlk->flags & NETLINK_F_CAP_ACK) + flags |= NDIAG_FLAG_CAP_ACK; + + return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags); +} + static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_diag_req *req, u32 portid, u32 seq, u32 flags, int sk_ino) @@ -52,6 +73,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->ndiag_show & NDIAG_SHOW_FLAGS) && + sk_diag_put_flags(sk, skb)) + goto out_nlmsg_trim; + nlmsg_end(skb, nlh); return 0; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 92e0981f74040d7029b65863167b459322612024..10f8b4cff40accd99898ea4d0696a2ba5d90080c 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -497,7 +497,8 @@ static int genl_lock_done(struct netlink_callback *cb) static int genl_family_rcv_msg(const struct genl_family *family, struct sk_buff *skb, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { const struct genl_ops *ops; struct net *net = sock_net(skb->sk); @@ -573,7 +574,7 @@ static int genl_family_rcv_msg(const struct genl_family *family, if (attrbuf) { err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, - ops->policy); + ops->policy, extack); if (err < 0) goto out; } @@ -584,6 +585,7 @@ static int genl_family_rcv_msg(const struct genl_family *family, info.genlhdr = nlmsg_data(nlh); info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; info.attrs = attrbuf; + info.extack = extack; genl_info_net_set(&info, net); memset(&info.user_ptr, 0, sizeof(info.user_ptr)); @@ -605,7 +607,8 @@ static int genl_family_rcv_msg(const struct genl_family *family, return err; } -static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { const struct genl_family *family; int err; @@ -617,7 +620,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (!family->parallel_ops) genl_lock(); - err = genl_family_rcv_msg(family, skb, nlh); + err = genl_family_rcv_msg(family, skb, nlh, extack); if (!family->parallel_ops) genl_unlock(); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 03f3d5c7beb8d173fae25456b278ee9cf3e04c5e..6b0850e63e09cf747a498f913f7eabe11e59c380 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -119,7 +119,8 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb) u32 idx; rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, - attrbuf, nfc_genl_family.maxattr, nfc_genl_policy); + attrbuf, nfc_genl_family.maxattr, nfc_genl_policy, + NULL); if (rc < 0) return ERR_PTR(rc); @@ -303,6 +304,17 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev) return -EMSGSIZE; } +static int nfc_genl_setup_device_added(struct nfc_dev *dev, struct sk_buff *msg) +{ + if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || + nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || + nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) + return -1; + return 0; +} + int nfc_genl_device_added(struct nfc_dev *dev) { struct sk_buff *msg; @@ -317,10 +329,7 @@ int nfc_genl_device_added(struct nfc_dev *dev) if (!hdr) goto free_msg; - if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || - nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || - nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || - nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) + if (nfc_genl_setup_device_added(dev, msg)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -596,11 +605,7 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, if (cb) genl_dump_check_consistent(cb, hdr, &nfc_genl_family); - if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || - nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || - nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || - nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || - nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) + if (nfc_genl_setup_device_added(dev, msg)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -918,7 +923,7 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) rc = nfc_activate_target(dev, target_idx, protocol); nfc_put_device(dev); - return 0; + return rc; } static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info) @@ -1161,7 +1166,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(attr, info->attrs[NFC_ATTR_LLC_SDP], rem) { rc = nla_parse_nested(sdp_attrs, NFC_SDP_ATTR_MAX, attr, - nfc_sdp_genl_policy); + nfc_sdp_genl_policy, info->extack); if (rc != 0) { rc = -EINVAL; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index c82301ce3fffb6caeb41a9882a53289ec7b63c8d..e4610676299bcdac626db1a30cd4da44ccc62c0b 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014 Nicira, Inc. + * Copyright (c) 2007-2017 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -44,13 +44,10 @@ #include "conntrack.h" #include "vport.h" -static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *key, - const struct nlattr *attr, int len); - struct deferred_action { struct sk_buff *skb; const struct nlattr *actions; + int actions_len; /* Store pkt_key clone when creating deferred action. */ struct sw_flow_key pkt_key; @@ -82,14 +79,31 @@ struct action_fifo { struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE]; }; -struct recirc_keys { +struct action_flow_keys { struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD]; }; static struct action_fifo __percpu *action_fifos; -static struct recirc_keys __percpu *recirc_keys; +static struct action_flow_keys __percpu *flow_keys; static DEFINE_PER_CPU(int, exec_actions_level); +/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys' + * space. Return NULL if out of key spaces. + */ +static struct sw_flow_key *clone_key(const struct sw_flow_key *key_) +{ + struct action_flow_keys *keys = this_cpu_ptr(flow_keys); + int level = this_cpu_read(exec_actions_level); + struct sw_flow_key *key = NULL; + + if (level <= OVS_DEFERRED_ACTION_THRESHOLD) { + key = &keys->key[level - 1]; + *key = *key_; + } + + return key; +} + static void action_fifo_init(struct action_fifo *fifo) { fifo->head = 0; @@ -119,8 +133,9 @@ static struct deferred_action *action_fifo_put(struct action_fifo *fifo) /* Return true if fifo is not full */ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *attr) + const struct sw_flow_key *key, + const struct nlattr *actions, + const int actions_len) { struct action_fifo *fifo; struct deferred_action *da; @@ -129,7 +144,8 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, da = action_fifo_put(fifo); if (da) { da->skb = skb; - da->actions = attr; + da->actions = actions; + da->actions_len = actions_len; da->pkt_key = *key; } @@ -146,6 +162,12 @@ static bool is_flow_key_valid(const struct sw_flow_key *key) return !(key->mac_proto & SW_FLOW_KEY_INVALID); } +static int clone_execute(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, + u32 recirc_id, + const struct nlattr *actions, int len, + bool last, bool clone_flow_key); + static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, __be16 ethertype) { @@ -908,72 +930,35 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, return ovs_dp_upcall(dp, skb, key, &upcall, cutlen); } +/* When 'last' is true, sample() should always consume the 'skb'. + * Otherwise, sample() should keep 'skb' intact regardless what + * actions are executed within sample(). + */ static int sample(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, - const struct nlattr *actions, int actions_len) + bool last) { - const struct nlattr *acts_list = NULL; - const struct nlattr *a; - int rem; - u32 cutlen = 0; - - for (a = nla_data(attr), rem = nla_len(attr); rem > 0; - a = nla_next(a, &rem)) { - u32 probability; - - switch (nla_type(a)) { - case OVS_SAMPLE_ATTR_PROBABILITY: - probability = nla_get_u32(a); - if (!probability || prandom_u32() > probability) - return 0; - break; - - case OVS_SAMPLE_ATTR_ACTIONS: - acts_list = a; - break; - } - } - - rem = nla_len(acts_list); - a = nla_data(acts_list); - - /* Actions list is empty, do nothing */ - if (unlikely(!rem)) + struct nlattr *actions; + struct nlattr *sample_arg; + int rem = nla_len(attr); + const struct sample_arg *arg; + bool clone_flow_key; + + /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */ + sample_arg = nla_data(attr); + arg = nla_data(sample_arg); + actions = nla_next(sample_arg, &rem); + + if ((arg->probability != U32_MAX) && + (!arg->probability || prandom_u32() > arg->probability)) { + if (last) + consume_skb(skb); return 0; - - /* The only known usage of sample action is having a single user-space - * action, or having a truncate action followed by a single user-space - * action. Treat this usage as a special case. - * The output_userspace() should clone the skb to be sent to the - * user space. This skb will be consumed by its caller. - */ - if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) { - struct ovs_action_trunc *trunc = nla_data(a); - - if (skb->len > trunc->max_len) - cutlen = skb->len - trunc->max_len; - - a = nla_next(a, &rem); } - if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && - nla_is_last(a, rem))) - return output_userspace(dp, skb, key, a, actions, - actions_len, cutlen); - - skb = skb_clone(skb, GFP_ATOMIC); - if (!skb) - /* Skip the sample action when out of memory. */ - return 0; - - if (!add_deferred_actions(skb, key, a)) { - if (net_ratelimit()) - pr_warn("%s: deferred actions limit reached, dropping sample action\n", - ovs_dp_name(dp)); - - kfree_skb(skb); - } - return 0; + clone_flow_key = !arg->exec; + return clone_execute(dp, skb, key, 0, actions, rem, last, + clone_flow_key); } static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, @@ -1084,10 +1069,9 @@ static int execute_masked_set_action(struct sk_buff *skb, static int execute_recirc(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, - const struct nlattr *a, int rem) + const struct nlattr *a, bool last) { - struct deferred_action *da; - int level; + u32 recirc_id; if (!is_flow_key_valid(key)) { int err; @@ -1098,43 +1082,8 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb, } BUG_ON(!is_flow_key_valid(key)); - if (!nla_is_last(a, rem)) { - /* Recirc action is the not the last action - * of the action list, need to clone the skb. - */ - skb = skb_clone(skb, GFP_ATOMIC); - - /* Skip the recirc action when out of memory, but - * continue on with the rest of the action list. - */ - if (!skb) - return 0; - } - - level = this_cpu_read(exec_actions_level); - if (level <= OVS_DEFERRED_ACTION_THRESHOLD) { - struct recirc_keys *rks = this_cpu_ptr(recirc_keys); - struct sw_flow_key *recirc_key = &rks->key[level - 1]; - - *recirc_key = *key; - recirc_key->recirc_id = nla_get_u32(a); - ovs_dp_process_packet(skb, recirc_key); - - return 0; - } - - da = add_deferred_actions(skb, key, NULL); - if (da) { - da->pkt_key.recirc_id = nla_get_u32(a); - } else { - kfree_skb(skb); - - if (net_ratelimit()) - pr_warn("%s: deferred action limit reached, drop recirc action\n", - ovs_dp_name(dp)); - } - - return 0; + recirc_id = nla_get_u32(a); + return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true); } /* Execute a list of actions against 'skb'. */ @@ -1206,9 +1155,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = pop_vlan(skb, key); break; - case OVS_ACTION_ATTR_RECIRC: - err = execute_recirc(dp, skb, key, a, rem); - if (nla_is_last(a, rem)) { + case OVS_ACTION_ATTR_RECIRC: { + bool last = nla_is_last(a, rem); + + err = execute_recirc(dp, skb, key, a, last); + if (last) { /* If this is the last action, the skb has * been consumed or freed. * Return immediately. @@ -1216,6 +1167,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, return err; } break; + } case OVS_ACTION_ATTR_SET: err = execute_set_action(skb, key, nla_data(a)); @@ -1226,9 +1178,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = execute_masked_set_action(skb, key, nla_data(a)); break; - case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, key, a, attr, len); + case OVS_ACTION_ATTR_SAMPLE: { + bool last = nla_is_last(a, rem); + + err = sample(dp, skb, key, a, last); + if (last) + return err; + break; + } case OVS_ACTION_ATTR_CT: if (!is_flow_key_valid(key)) { @@ -1264,6 +1222,79 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, return 0; } +/* Execute the actions on the clone of the packet. The effect of the + * execution does not affect the original 'skb' nor the original 'key'. + * + * The execution may be deferred in case the actions can not be executed + * immediately. + */ +static int clone_execute(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, u32 recirc_id, + const struct nlattr *actions, int len, + bool last, bool clone_flow_key) +{ + struct deferred_action *da; + struct sw_flow_key *clone; + + skb = last ? skb : skb_clone(skb, GFP_ATOMIC); + if (!skb) { + /* Out of memory, skip this action. + */ + return 0; + } + + /* When clone_flow_key is false, the 'key' will not be change + * by the actions, then the 'key' can be used directly. + * Otherwise, try to clone key from the next recursion level of + * 'flow_keys'. If clone is successful, execute the actions + * without deferring. + */ + clone = clone_flow_key ? clone_key(key) : key; + if (clone) { + int err = 0; + + if (actions) { /* Sample action */ + if (clone_flow_key) + __this_cpu_inc(exec_actions_level); + + err = do_execute_actions(dp, skb, clone, + actions, len); + + if (clone_flow_key) + __this_cpu_dec(exec_actions_level); + } else { /* Recirc action */ + clone->recirc_id = recirc_id; + ovs_dp_process_packet(skb, clone); + } + return err; + } + + /* Out of 'flow_keys' space. Defer actions */ + da = add_deferred_actions(skb, key, actions, len); + if (da) { + if (!actions) { /* Recirc action */ + key = &da->pkt_key; + key->recirc_id = recirc_id; + } + } else { + /* Out of per CPU action FIFO space. Drop the 'skb' and + * log an error. + */ + kfree_skb(skb); + + if (net_ratelimit()) { + if (actions) { /* Sample action */ + pr_warn("%s: deferred action limit reached, drop sample action\n", + ovs_dp_name(dp)); + } else { /* Recirc action */ + pr_warn("%s: deferred action limit reached, drop recirc action\n", + ovs_dp_name(dp)); + } + } + } + return 0; +} + static void process_deferred_actions(struct datapath *dp) { struct action_fifo *fifo = this_cpu_ptr(action_fifos); @@ -1278,10 +1309,10 @@ static void process_deferred_actions(struct datapath *dp) struct sk_buff *skb = da->skb; struct sw_flow_key *key = &da->pkt_key; const struct nlattr *actions = da->actions; + int actions_len = da->actions_len; if (actions) - do_execute_actions(dp, skb, key, actions, - nla_len(actions)); + do_execute_actions(dp, skb, key, actions, actions_len); else ovs_dp_process_packet(skb, key); } while (!action_fifo_is_empty(fifo)); @@ -1323,8 +1354,8 @@ int action_fifos_init(void) if (!action_fifos) return -ENOMEM; - recirc_keys = alloc_percpu(struct recirc_keys); - if (!recirc_keys) { + flow_keys = alloc_percpu(struct action_flow_keys); + if (!flow_keys) { free_percpu(action_fifos); return -ENOMEM; } @@ -1335,5 +1366,5 @@ int action_fifos_init(void) void action_fifos_exit(void) { free_percpu(action_fifos); - free_percpu(recirc_keys); + free_percpu(flow_keys); } diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 7b2c2fce408a02d4251f03a2e3f0b4d9e7fccb80..42a95919df094ba0f21cf4e4c1ade6c570f10284 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -66,7 +66,9 @@ struct ovs_conntrack_info { u8 commit : 1; u8 nat : 3; /* enum ovs_ct_nat */ u8 force : 1; + u8 have_eventmask : 1; u16 family; + u32 eventmask; /* Mask of 1 << IPCT_*. */ struct md_mark mark; struct md_labels labels; #ifdef CONFIG_NF_NAT_NEEDED @@ -373,7 +375,7 @@ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, } /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the - * IPCT_LABEL bit it set in the event cache. + * IPCT_LABEL bit is set in the event cache. */ nf_conntrack_event_cache(IPCT_LABEL, ct); @@ -795,11 +797,6 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, enum nf_nat_manip_type maniptype; int err; - if (nf_ct_is_untracked(ct)) { - /* A NAT action may only be performed on tracked packets. */ - return NF_ACCEPT; - } - /* Add NAT extension if not confirmed yet. */ if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) return NF_ACCEPT; /* Can't NAT. */ @@ -1007,6 +1004,20 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, if (!ct) return 0; + /* Set the conntrack event mask if given. NEW and DELETE events have + * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener + * typically would receive many kinds of updates. Setting the event + * mask allows those events to be filtered. The set event mask will + * remain in effect for the lifetime of the connection unless changed + * by a further CT action with both the commit flag and the eventmask + * option. */ + if (info->have_eventmask) { + struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct); + + if (cache) + cache->ctmask = info->eventmask; + } + /* Apply changes before confirming the connection so that the initial * conntrack NEW netlink event carries the values given in the CT * action. @@ -1238,6 +1249,8 @@ static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { /* NAT length is checked when parsing the nested attributes. */ [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX }, #endif + [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32), + .maxlen = sizeof(u32) }, }; static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, @@ -1316,6 +1329,11 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, break; } #endif + case OVS_CT_ATTR_EVENTMASK: + info->have_eventmask = true; + info->eventmask = nla_get_u32(a); + break; + default: OVS_NLERR(log, "Unknown conntrack attr (%d)", type); @@ -1515,6 +1533,10 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, ct_info->helper->name)) return -EMSGSIZE; } + if (ct_info->have_eventmask && + nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask)) + return -EMSGSIZE; + #ifdef CONFIG_NF_NAT_NEEDED if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb)) return -EMSGSIZE; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9c62b6325f7adc92420b605cf4cdc3b26da437fc..7b17da9a94a0c15ab8ec28205e025d9554c798fb 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1353,7 +1353,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int err; err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a, - OVS_FLOW_ATTR_MAX, flow_policy); + OVS_FLOW_ATTR_MAX, flow_policy, NULL); if (err) return err; ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 1c6e9377436df1e93081c825c142b712a811277b..da931bdef8a7b5f25c189a5fe76e5fc2c4c1efdf 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -34,8 +34,6 @@ #define DP_MAX_PORTS USHRT_MAX #define DP_VPORT_HASH_BUCKETS 1024 -#define SAMPLE_ACTION_DEPTH 3 - /** * struct dp_stats_percpu - per-cpu packet processing statistics for a given * datapath. diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 1105a838bab83f9fe647423060d08e4a415bab61..7e1d8a2afa63687aeb87121dbae614bad2ec34a3 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2014 Nicira, Inc. + * Copyright (c) 2007-2017 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -59,6 +59,39 @@ struct ovs_len_tbl { #define OVS_ATTR_NESTED -1 #define OVS_ATTR_VARIABLE -2 +static bool actions_may_change_flow(const struct nlattr *actions) +{ + struct nlattr *nla; + int rem; + + nla_for_each_nested(nla, actions, rem) { + u16 action = nla_type(nla); + + switch (action) { + case OVS_ACTION_ATTR_OUTPUT: + case OVS_ACTION_ATTR_RECIRC: + case OVS_ACTION_ATTR_TRUNC: + case OVS_ACTION_ATTR_USERSPACE: + break; + + case OVS_ACTION_ATTR_CT: + case OVS_ACTION_ATTR_HASH: + case OVS_ACTION_ATTR_POP_ETH: + case OVS_ACTION_ATTR_POP_MPLS: + case OVS_ACTION_ATTR_POP_VLAN: + case OVS_ACTION_ATTR_PUSH_ETH: + case OVS_ACTION_ATTR_PUSH_MPLS: + case OVS_ACTION_ATTR_PUSH_VLAN: + case OVS_ACTION_ATTR_SAMPLE: + case OVS_ACTION_ATTR_SET: + case OVS_ACTION_ATTR_SET_MASKED: + default: + return true; + } + } + return false; +} + static void update_range(struct sw_flow_match *match, size_t offset, size_t size, bool is_mask) { @@ -2023,18 +2056,20 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa, static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, const struct sw_flow_key *key, - int depth, struct sw_flow_actions **sfa, + struct sw_flow_actions **sfa, __be16 eth_type, __be16 vlan_tci, bool log); static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, - const struct sw_flow_key *key, int depth, + const struct sw_flow_key *key, struct sw_flow_actions **sfa, - __be16 eth_type, __be16 vlan_tci, bool log) + __be16 eth_type, __be16 vlan_tci, + bool log, bool last) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; const struct nlattr *a; - int rem, start, err, st_acts; + int rem, start, err; + struct sample_arg arg; memset(attrs, 0, sizeof(attrs)); nla_for_each_nested(a, attr, rem) { @@ -2058,20 +2093,32 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log); if (start < 0) return start; - err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, - nla_data(probability), sizeof(u32), log); + + /* When both skb and flow may be changed, put the sample + * into a deferred fifo. On the other hand, if only skb + * may be modified, the actions can be executed in place. + * + * Do this analysis at the flow installation time. + * Set 'clone_action->exec' to true if the actions can be + * executed without being deferred. + * + * If the sample is the last action, it can always be excuted + * rather than deferred. + */ + arg.exec = last || !actions_may_change_flow(actions); + arg.probability = nla_get_u32(probability); + + err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg), + log); if (err) return err; - st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log); - if (st_acts < 0) - return st_acts; - err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa, + err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type, vlan_tci, log); + if (err) return err; - add_nested_action_end(*sfa, st_acts); add_nested_action_end(*sfa, start); return 0; @@ -2380,8 +2427,8 @@ static int validate_userspace(const struct nlattr *attr) struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; - error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, - attr, userspace_policy); + error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, + userspace_policy, NULL); if (error) return error; @@ -2408,16 +2455,13 @@ static int copy_action(const struct nlattr *from, static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, const struct sw_flow_key *key, - int depth, struct sw_flow_actions **sfa, + struct sw_flow_actions **sfa, __be16 eth_type, __be16 vlan_tci, bool log) { u8 mac_proto = ovs_key_mac_proto(key); const struct nlattr *a; int rem, err; - if (depth >= SAMPLE_ACTION_DEPTH) - return -EOVERFLOW; - nla_for_each_nested(a, attr, rem) { /* Expected argument lengths, (u32)-1 for variable length. */ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { @@ -2555,13 +2599,17 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return err; break; - case OVS_ACTION_ATTR_SAMPLE: - err = validate_and_copy_sample(net, a, key, depth, sfa, - eth_type, vlan_tci, log); + case OVS_ACTION_ATTR_SAMPLE: { + bool last = nla_is_last(a, rem); + + err = validate_and_copy_sample(net, a, key, sfa, + eth_type, vlan_tci, + log, last); if (err) return err; skip_copy = true; break; + } case OVS_ACTION_ATTR_CT: err = ovs_ct_copy_action(net, a, key, sfa, log); @@ -2615,7 +2663,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return PTR_ERR(*sfa); (*sfa)->orig_len = nla_len(attr); - err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type, + err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type, key->eth.vlan.tci, log); if (err) ovs_nla_free_flow_actions(*sfa); @@ -2623,39 +2671,44 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return err; } -static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) +static int sample_action_to_attr(const struct nlattr *attr, + struct sk_buff *skb) { - const struct nlattr *a; - struct nlattr *start; - int err = 0, rem; + struct nlattr *start, *ac_start = NULL, *sample_arg; + int err = 0, rem = nla_len(attr); + const struct sample_arg *arg; + struct nlattr *actions; start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); if (!start) return -EMSGSIZE; - nla_for_each_nested(a, attr, rem) { - int type = nla_type(a); - struct nlattr *st_sample; + sample_arg = nla_data(attr); + arg = nla_data(sample_arg); + actions = nla_next(sample_arg, &rem); - switch (type) { - case OVS_SAMPLE_ATTR_PROBABILITY: - if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, - sizeof(u32), nla_data(a))) - return -EMSGSIZE; - break; - case OVS_SAMPLE_ATTR_ACTIONS: - st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); - if (!st_sample) - return -EMSGSIZE; - err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb); - if (err) - return err; - nla_nest_end(skb, st_sample); - break; - } + if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) { + err = -EMSGSIZE; + goto out; + } + + ac_start = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); + if (!ac_start) { + err = -EMSGSIZE; + goto out; + } + + err = ovs_nla_put_actions(actions, rem, skb); + +out: + if (err) { + nla_nest_cancel(skb, ac_start); + nla_nest_cancel(skb, start); + } else { + nla_nest_end(skb, ac_start); + nla_nest_end(skb, start); } - nla_nest_end(skb, start); return err; } diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 7eb955e453e6d657d13d0aa7b35ce7c8b7de2f15..869acb3b3d3f25a75ee4912359ee0fea31fa7c2d 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -70,7 +70,8 @@ static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr, if (nla_len(attr) < sizeof(struct nlattr)) return -EINVAL; - err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy); + err = nla_parse_nested(exts, OVS_VXLAN_EXT_MAX, attr, exts_policy, + NULL); if (err < 0) return err; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ea81ccf3c7d6a53095b4922329c25b566d5b5940..f4001763134da35ed5f0f04bc2836015bb15a4af 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1496,6 +1496,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); +static u16 fanout_next_id; static void __fanout_link(struct sock *sk, struct packet_sock *po) { @@ -1629,6 +1630,36 @@ static void fanout_release_data(struct packet_fanout *f) }; } +static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) +{ + struct packet_fanout *f; + + list_for_each_entry(f, &fanout_list, list) { + if (f->id == candidate_id && + read_pnet(&f->net) == sock_net(sk)) { + return false; + } + } + return true; +} + +static bool fanout_find_new_id(struct sock *sk, u16 *new_id) +{ + u16 id = fanout_next_id; + + do { + if (__fanout_id_is_free(sk, id)) { + *new_id = id; + fanout_next_id = id + 1; + return true; + } + + id++; + } while (id != fanout_next_id); + + return false; +} + static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_rollover *rollover = NULL; @@ -1676,6 +1707,19 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) po->rollover = rollover; } + if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { + if (id != 0) { + err = -EINVAL; + goto out; + } + if (!fanout_find_new_id(sk, &id)) { + err = -ENOMEM; + goto out; + } + /* ephemeral flag for the first socket in the group: drop it */ + flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); + } + match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index bc5ee5fbe6ae3845b6ec67eaea2beaec2d12ba08..45b3af3080d8349f9e9c6e7f424f0548a79a42b0 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -61,7 +61,8 @@ static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { [IFA_LOCAL] = { .type = NLA_U8 }, }; -static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; @@ -78,7 +79,8 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy, + extack); if (err < 0) return err; @@ -226,7 +228,8 @@ static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = { [RTA_OIF] = { .type = NLA_U32 }, }; -static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[RTA_MAX+1]; @@ -243,7 +246,8 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); - err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); + err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy, + extack); if (err < 0) return err; diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig index b83c6807a5ae5cedc63b073c7f928b1776f42524..326fd97444f5bed5c82af7d632d8a4424f9908d3 100644 --- a/net/qrtr/Kconfig +++ b/net/qrtr/Kconfig @@ -16,7 +16,7 @@ if QRTR config QRTR_SMD tristate "SMD IPC Router channels" - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG || (COMPILE_TEST && RPMSG=n) ---help--- Say Y here to support SMD based ipcrouter channels. SMD is the most common transport for IPC Router. diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 9da7368b0140f9e4a96794e21d66f487881987ba..a9a8c7d5a4a983b9be12fe85a7f71f3e6a825f19 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -945,7 +945,8 @@ static const struct nla_policy qrtr_policy[IFA_MAX + 1] = { [IFA_LOCAL] = { .type = NLA_U32 }, }; -static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct nlattr *tb[IFA_MAX + 1]; struct ifaddrmsg *ifm; @@ -959,7 +960,7 @@ static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) ASSERT_RTNL(); - rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy); + rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack); if (rc < 0) return rc; diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index 0d11132b3370a4024be644dede74a77d188a4f64..50615d5efac1529a0fd617c2692b1e9419da7137 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c @@ -14,21 +14,21 @@ #include #include -#include +#include #include "qrtr.h" struct qrtr_smd_dev { struct qrtr_endpoint ep; - struct qcom_smd_channel *channel; + struct rpmsg_endpoint *channel; struct device *dev; }; /* from smd to qrtr */ -static int qcom_smd_qrtr_callback(struct qcom_smd_channel *channel, - const void *data, size_t len) +static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev, + void *data, int len, void *priv, u32 addr) { - struct qrtr_smd_dev *qdev = qcom_smd_get_drvdata(channel); + struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); int rc; if (!qdev) @@ -54,7 +54,7 @@ static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) if (rc) goto out; - rc = qcom_smd_send(qdev->channel, skb->data, skb->len); + rc = rpmsg_send(qdev->channel, skb->data, skb->len); out: if (rc) @@ -64,57 +64,55 @@ static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) return rc; } -static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev) +static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev) { struct qrtr_smd_dev *qdev; int rc; - qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL); + qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) return -ENOMEM; - qdev->channel = sdev->channel; - qdev->dev = &sdev->dev; + qdev->channel = rpdev->ept; + qdev->dev = &rpdev->dev; qdev->ep.xmit = qcom_smd_qrtr_send; rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO); if (rc) return rc; - qcom_smd_set_drvdata(sdev->channel, qdev); - dev_set_drvdata(&sdev->dev, qdev); + dev_set_drvdata(&rpdev->dev, qdev); - dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n"); + dev_dbg(&rpdev->dev, "Qualcomm SMD QRTR driver probed\n"); return 0; } -static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev) +static void qcom_smd_qrtr_remove(struct rpmsg_device *rpdev) { - struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev); + struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); qrtr_endpoint_unregister(&qdev->ep); - dev_set_drvdata(&sdev->dev, NULL); + dev_set_drvdata(&rpdev->dev, NULL); } -static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = { +static const struct rpmsg_device_id qcom_smd_qrtr_smd_match[] = { { "IPCRTR" }, {} }; -static struct qcom_smd_driver qcom_smd_qrtr_driver = { +static struct rpmsg_driver qcom_smd_qrtr_driver = { .probe = qcom_smd_qrtr_probe, .remove = qcom_smd_qrtr_remove, .callback = qcom_smd_qrtr_callback, - .smd_match_table = qcom_smd_qrtr_smd_match, - .driver = { + .id_table = qcom_smd_qrtr_smd_match, + .drv = { .name = "qcom_smd_qrtr", - .owner = THIS_MODULE, }, }; -module_qcom_smd_driver(qcom_smd_qrtr_driver); +module_rpmsg_driver(qcom_smd_qrtr_driver); MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/net/rds/connection.c b/net/rds/connection.c index 1fa75ab7b733230585666abcb7279ba691365256..6a5ebdea7d2e9eb3b624a01a8a87ef601e3b2f13 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -333,11 +333,19 @@ void rds_conn_shutdown(struct rds_conn_path *cp) rds_conn_path_reset(cp); if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING, + RDS_CONN_DOWN) && + !rds_conn_path_transition(cp, RDS_CONN_ERROR, RDS_CONN_DOWN)) { /* This can happen - eg when we're in the middle of tearing * down the connection, and someone unloads the rds module. - * Quite reproduceable with loopback connections. + * Quite reproducible with loopback connections. * Mostly harmless. + * + * Note that this also happens with rds-tcp because + * we could have triggered rds_conn_path_drop in irq + * mode from rds_tcp_state change on the receipt of + * a FIN, thus we need to recheck for RDS_CONN_ERROR + * here. */ rds_conn_path_error(cp, "%s: failed to transition " "to state DOWN, current state " diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 1c38d2c7caa8e955585b45f0c9218a0775013b4d..80fb6f63e768d3461c47533615c875526bb8bab9 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -702,9 +702,8 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, event->param.conn.initiator_depth); /* rdma_accept() calls rdma_reject() internally if it fails */ - err = rdma_accept(cm_id, &conn_param); - if (err) - rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); + if (rdma_accept(cm_id, &conn_param)) + rds_ib_conn_error(conn, "rdma_accept failed\n"); out: if (conn) diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c index 4fe8f4fec4eee66c826b5beb5b02ae61d19b483a..86ef907067bb084e01ac4f8d5f00d0c17f40ac55 100644 --- a/net/rds/ib_fmr.c +++ b/net/rds/ib_fmr.c @@ -78,17 +78,15 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages) return ibmr; out_no_cigar: - if (ibmr) { - if (fmr->fmr) - ib_dealloc_fmr(fmr->fmr); - kfree(ibmr); - } + kfree(ibmr); atomic_dec(&pool->item_count); + return ERR_PTR(err); } -int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, - struct scatterlist *sg, unsigned int nents) +static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, + struct rds_ib_mr *ibmr, struct scatterlist *sg, + unsigned int nents) { struct ib_device *dev = rds_ibdev->dev; struct rds_ib_fmr *fmr = &ibmr->u.fmr; @@ -114,29 +112,39 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); if (dma_addr & ~PAGE_MASK) { - if (i > 0) + if (i > 0) { + ib_dma_unmap_sg(dev, sg, nents, + DMA_BIDIRECTIONAL); return -EINVAL; - else + } else { ++page_cnt; + } } if ((dma_addr + dma_len) & ~PAGE_MASK) { - if (i < sg_dma_len - 1) + if (i < sg_dma_len - 1) { + ib_dma_unmap_sg(dev, sg, nents, + DMA_BIDIRECTIONAL); return -EINVAL; - else + } else { ++page_cnt; + } } len += dma_len; } page_cnt += len >> PAGE_SHIFT; - if (page_cnt > ibmr->pool->fmr_attr.max_pages) + if (page_cnt > ibmr->pool->fmr_attr.max_pages) { + ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL); return -EINVAL; + } dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, rdsibdev_to_node(rds_ibdev)); - if (!dma_pages) + if (!dma_pages) { + ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL); return -ENOMEM; + } page_cnt = 0; for (i = 0; i < sg_dma_len; ++i) { @@ -149,8 +157,10 @@ int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, } ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr); - if (ret) + if (ret) { + ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL); goto out; + } /* Success - we successfully remapped the MR, so we can * safely tear down the old mapping. diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 5d6e98a79a5e4b3de1f472c5fc513fce545bf6f9..0ea4ab017a8cc3f807931e1194cddb5048a82956 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -125,8 +125,6 @@ void rds_ib_mr_exit(void); void __rds_ib_teardown_mr(struct rds_ib_mr *); void rds_ib_teardown_mr(struct rds_ib_mr *); struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int); -int rds_ib_map_fmr(struct rds_ib_device *, struct rds_ib_mr *, - struct scatterlist *, unsigned int); struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *); int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **); struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *, diff --git a/net/rds/threads.c b/net/rds/threads.c index e36e333a0aa0d7430c852ce419ac7ed85094bbec..3e447d056d092a405311265a06a8596b2ce8dc87 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -156,7 +156,7 @@ void rds_connect_worker(struct work_struct *work) struct rds_connection *conn = cp->cp_conn; int ret; - if (cp->cp_index > 1 && cp->cp_conn->c_laddr > cp->cp_conn->c_faddr) + if (cp->cp_index > 0 && cp->cp_conn->c_laddr > cp->cp_conn->c_faddr) return; clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 26a7b1db1361e554733b0ff40a54d8e68e59af09..7486926e60a88a56a364b727800f3cd1a671d808 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -739,6 +739,25 @@ static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call, return ret; } +/* + * Abort a call due to a protocol error. + */ +static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, + struct sk_buff *skb, + const char *eproto_why, + const char *why, + u32 abort_code) +{ + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why); + return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO); +} + +#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \ + __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \ + (abort_why), (abort_code)) + /* * conn_client.c */ diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 0ed181f53f32a0145c03b0006b92de5c7a0101aa..1752fcf8e8f1dd85866004a2ee38c8bbaa040138 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -413,11 +413,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, case RXRPC_CONN_REMOTELY_ABORTED: rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, - conn->remote_abort, ECONNABORTED); + conn->remote_abort, -ECONNABORTED); break; case RXRPC_CONN_LOCALLY_ABORTED: rxrpc_abort_call("CON", call, sp->hdr.seq, - conn->local_abort, ECONNABORTED); + conn->local_abort, -ECONNABORTED); break; default: BUG(); @@ -600,7 +600,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx) write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: - __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED); + __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); abort = true; /* fall through */ case RXRPC_CALL_COMPLETE: diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 97a17ada4431d58b7a0f9c07be3b13b0230a6390..7a77844aab16be5f32a16b7edd3a7f440006c221 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -386,7 +386,7 @@ void rxrpc_process_call(struct work_struct *work) now = ktime_get_real(); if (ktime_before(call->expire_at, now)) { - rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME); + rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME); set_bit(RXRPC_CALL_EV_ABORT, &call->events); goto recheck_state; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index d79cd36987a95b86f2af9fac4688ab86e20f41d5..47f7f4205653aa0643c77f7381688df79b51cbbe 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -486,7 +486,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) call = list_entry(rx->to_be_accepted.next, struct rxrpc_call, accept_link); list_del(&call->accept_link); - rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET); + rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET); rxrpc_put_call(call, rxrpc_call_put); } @@ -494,7 +494,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) call = list_entry(rx->sock_calls.next, struct rxrpc_call, sock_link); rxrpc_get_call(call, rxrpc_call_got); - rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET); + rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET); rxrpc_send_abort_packet(call); rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index c3be03e8d098213e4956bb644827865206470e19..e8dea0d49e7fedf2951616c1aed8e93ec693a1dd 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -550,6 +550,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, call->cid = conn->proto.cid | channel; call->call_id = call_id; + trace_rxrpc_connect_call(call); _net("CONNECT call %08x:%08x as call %d on conn %d", call->cid, call->call_id, call->debug_id, conn->debug_id); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index b099b64366f356c27dea0a4dd215cc1034e61b55..46babcf82ce8648190e018ee69ad16701e2e969a 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -168,7 +168,7 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, * generate a connection-level abort */ static int rxrpc_abort_connection(struct rxrpc_connection *conn, - u32 error, u32 abort_code) + int error, u32 abort_code) { struct rxrpc_wire_header whdr; struct msghdr msg; @@ -281,14 +281,17 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, case RXRPC_PACKET_TYPE_ABORT: if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), - &wtmp, sizeof(wtmp)) < 0) + &wtmp, sizeof(wtmp)) < 0) { + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, + tracepoint_string("bad_abort")); return -EPROTO; + } abort_code = ntohl(wtmp); _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); conn->state = RXRPC_CONN_REMOTELY_ABORTED; rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, - abort_code, ECONNABORTED); + abort_code, -ECONNABORTED); return -ECONNABORTED; case RXRPC_PACKET_TYPE_CHALLENGE: @@ -327,7 +330,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return 0; default: - _leave(" = -EPROTO [%u]", sp->hdr.type); + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, + tracepoint_string("bad_conn_pkt")); return -EPROTO; } } @@ -370,7 +374,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) abort: _debug("abort %d, %d", ret, abort_code); - rxrpc_abort_connection(conn, -ret, abort_code); + rxrpc_abort_connection(conn, ret, abort_code); _leave(" [aborted]"); } @@ -419,9 +423,8 @@ void rxrpc_process_connection(struct work_struct *work) goto out; protocol_error: - if (rxrpc_abort_connection(conn, -ret, abort_code) < 0) + if (rxrpc_abort_connection(conn, ret, abort_code) < 0) goto requeue_and_leave; rxrpc_free_skb(skb, rxrpc_skb_rx_freed); - _leave(" [EPROTO]"); goto out; } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 18b2ad8be8e2b57dd57ef846287add68b027b08e..45dba732a3b4743ba37c46d05b02125f16eac23d 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -30,7 +30,7 @@ static void rxrpc_proto_abort(const char *why, struct rxrpc_call *call, rxrpc_seq_t seq) { - if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) { + if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) { set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } @@ -665,6 +665,8 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, + ntohl(ackinfo->rwind), wake); call->tx_winsize = rwind; } @@ -877,7 +879,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) } /* - * Process an ABORT packet. + * Process an ABORT packet directed at a call. */ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) { @@ -892,10 +894,12 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) &wtmp, sizeof(wtmp)) >= 0) abort_code = ntohl(wtmp); + trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code); + _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, - abort_code, ECONNABORTED)) + abort_code, -ECONNABORTED)) rxrpc_notify_socket(call); } @@ -958,7 +962,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, case RXRPC_CALL_COMPLETE: break; default: - if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, ESHUTDOWN)) { + if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) { set_bit(RXRPC_CALL_EV_ABORT, &call->events); rxrpc_queue_call(call); } @@ -1017,8 +1021,11 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) struct rxrpc_wire_header whdr; /* dig out the RxRPC connection details */ - if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) + if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) { + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, + tracepoint_string("bad_hdr")); return -EBADMSG; + } memset(sp, 0, sizeof(*sp)); sp->hdr.epoch = ntohl(whdr.epoch); diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c index 7d4375e557e6ee4a50961ad25d9e6f01a34290db..af276f173b10ebb26a8b68d2560ad958b8f3d09a 100644 --- a/net/rxrpc/insecure.c +++ b/net/rxrpc/insecure.c @@ -46,7 +46,10 @@ static int none_respond_to_challenge(struct rxrpc_connection *conn, struct sk_buff *skb, u32 *_abort_code) { - *_abort_code = RX_PROTOCOL_ERROR; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, + tracepoint_string("chall_none")); return -EPROTO; } @@ -54,7 +57,10 @@ static int none_verify_response(struct rxrpc_connection *conn, struct sk_buff *skb, u32 *_abort_code) { - *_abort_code = RX_PROTOCOL_ERROR; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, + tracepoint_string("resp_none")); return -EPROTO; } diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index bf13b8470c9ad51783325d7f47c32414b07f0c40..1ed9c0c2e94f1c70ab0ca61fa16e87f76530f78a 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -296,7 +296,7 @@ void rxrpc_peer_error_distributor(struct work_struct *work) hlist_del_init(&call->error_link); rxrpc_see_call(call); - if (rxrpc_set_call_completion(call, compl, 0, error)) + if (rxrpc_set_call_completion(call, compl, 0, -error)) rxrpc_notify_socket(call); } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 3e2f1a8e9c5b51bf90ce8679c06b6ec8a8958ea9..f9caf3b775097f62e8cb27fe12f3e925a3f8c323 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -83,11 +83,11 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp); break; case RXRPC_CALL_NETWORK_ERROR: - tmp = call->error; + tmp = -call->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp); break; case RXRPC_CALL_LOCAL_ERROR: - tmp = call->error; + tmp = -call->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp); break; default: @@ -682,14 +682,16 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, return ret; short_data: + trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data")); ret = -EBADMSG; goto out; excess_data: + trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data")); ret = -EMSGSIZE; goto out; call_complete: *_abort = call->abort_code; - ret = -call->error; + ret = call->error; if (call->completion == RXRPC_CALL_SUCCEEDED) { ret = 1; if (size > 0) diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 4374e7b9c7bff9fdd1d36a5a9fc8f135414ed360..1bb9b2ccc2673714367d2ec4356d39b5a3839895 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -148,15 +148,13 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, u32 data_size, void *sechdr) { - struct rxrpc_skb_priv *sp; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxkad_level1_hdr hdr; struct rxrpc_crypt iv; struct scatterlist sg; u16 check; - sp = rxrpc_skb(skb); - _enter(""); check = sp->hdr.seq ^ call->call_id; @@ -323,6 +321,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; + bool aborted; u32 data_size, buf; u16 check; int nsg; @@ -330,7 +329,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, _enter(""); if (len < 8) { - rxrpc_abort_call("V1H", call, seq, RXKADSEALEDINCON, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H", + RXKADSEALEDINCON); goto protocol_error; } @@ -355,7 +355,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, /* Extract the decrypted packet length */ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) { - rxrpc_abort_call("XV1", call, seq, RXKADDATALEN, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1", + RXKADDATALEN); goto protocol_error; } offset += sizeof(sechdr); @@ -368,12 +369,14 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, check ^= seq ^ call->call_id; check &= 0xffff; if (check != 0) { - rxrpc_abort_call("V1C", call, seq, RXKADSEALEDINCON, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C", + RXKADSEALEDINCON); goto protocol_error; } if (data_size > len) { - rxrpc_abort_call("V1L", call, seq, RXKADDATALEN, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L", + RXKADDATALEN); goto protocol_error; } @@ -381,8 +384,8 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, return 0; protocol_error: - rxrpc_send_abort_packet(call); - _leave(" = -EPROTO"); + if (aborted) + rxrpc_send_abort_packet(call); return -EPROTO; nomem: @@ -403,6 +406,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; struct sk_buff *trailer; + bool aborted; u32 data_size, buf; u16 check; int nsg; @@ -410,7 +414,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, _enter(",{%d}", skb->len); if (len < 8) { - rxrpc_abort_call("V2H", call, seq, RXKADSEALEDINCON, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H", + RXKADSEALEDINCON); goto protocol_error; } @@ -445,7 +450,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, /* Extract the decrypted packet length */ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) { - rxrpc_abort_call("XV2", call, seq, RXKADDATALEN, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2", + RXKADDATALEN); goto protocol_error; } offset += sizeof(sechdr); @@ -458,12 +464,14 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, check ^= seq ^ call->call_id; check &= 0xffff; if (check != 0) { - rxrpc_abort_call("V2C", call, seq, RXKADSEALEDINCON, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C", + RXKADSEALEDINCON); goto protocol_error; } if (data_size > len) { - rxrpc_abort_call("V2L", call, seq, RXKADDATALEN, EPROTO); + aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L", + RXKADDATALEN); goto protocol_error; } @@ -471,8 +479,8 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, return 0; protocol_error: - rxrpc_send_abort_packet(call); - _leave(" = -EPROTO"); + if (aborted) + rxrpc_send_abort_packet(call); return -EPROTO; nomem: @@ -491,6 +499,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher); struct rxrpc_crypt iv; struct scatterlist sg; + bool aborted; u16 cksum; u32 x, y; @@ -522,10 +531,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, cksum = 1; /* zero checksums are not permitted */ if (cksum != expected_cksum) { - rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO); - rxrpc_send_abort_packet(call); - _leave(" = -EPROTO [csum failed]"); - return -EPROTO; + aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK", + RXKADSEALEDINCON); + goto protocol_error; } switch (call->conn->params.security_level) { @@ -538,6 +546,11 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, default: return -ENOANO; } + +protocol_error: + if (aborted) + rxrpc_send_abort_packet(call); + return -EPROTO; } /* @@ -754,22 +767,23 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, struct rxkad_response resp __attribute__((aligned(8))); /* must be aligned for crypto */ struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + const char *eproto; u32 version, nonce, min_level, abort_code; int ret; _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); - if (!conn->params.key) { - _leave(" = -EPROTO [no key]"); - return -EPROTO; - } + eproto = tracepoint_string("chall_no_key"); + abort_code = RX_PROTOCOL_ERROR; + if (!conn->params.key) + goto protocol_error; + abort_code = RXKADEXPIRED; ret = key_validate(conn->params.key); - if (ret < 0) { - *_abort_code = RXKADEXPIRED; - return ret; - } + if (ret < 0) + goto other_error; + eproto = tracepoint_string("chall_short"); abort_code = RXKADPACKETSHORT; if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), &challenge, sizeof(challenge)) < 0) @@ -782,13 +796,15 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }", sp->hdr.serial, version, nonce, min_level); + eproto = tracepoint_string("chall_ver"); abort_code = RXKADINCONSISTENCY; if (version != RXKAD_VERSION) goto protocol_error; abort_code = RXKADLEVELFAIL; + ret = -EACCES; if (conn->params.security_level < min_level) - goto protocol_error; + goto other_error; token = conn->params.key->payload.data[0]; @@ -815,28 +831,34 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, return rxkad_send_response(conn, &sp->hdr, &resp, token->kad); protocol_error: + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); + ret = -EPROTO; +other_error: *_abort_code = abort_code; - _leave(" = -EPROTO [%d]", abort_code); - return -EPROTO; + return ret; } /* * decrypt the kerberos IV ticket in the response */ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, + struct sk_buff *skb, void *ticket, size_t ticket_len, struct rxrpc_crypt *_session_key, time_t *_expiry, u32 *_abort_code) { struct skcipher_request *req; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_crypt iv, key; struct scatterlist sg[1]; struct in_addr addr; unsigned int life; + const char *eproto; time_t issue, now; bool little_endian; int ret; + u32 abort_code; u8 *p, *q, *name, *end; _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key)); @@ -847,11 +869,11 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, if (ret < 0) { switch (ret) { case -EKEYEXPIRED: - *_abort_code = RXKADEXPIRED; - goto error; + abort_code = RXKADEXPIRED; + goto other_error; default: - *_abort_code = RXKADNOAUTH; - goto error; + abort_code = RXKADNOAUTH; + goto other_error; } } @@ -860,13 +882,11 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv)); + ret = -ENOMEM; req = skcipher_request_alloc(conn->server_key->payload.data[0], GFP_NOFS); - if (!req) { - *_abort_code = RXKADNOAUTH; - ret = -ENOMEM; - goto error; - } + if (!req) + goto temporary_error; sg_init_one(&sg[0], ticket, ticket_len); skcipher_request_set_callback(req, 0, NULL, NULL); @@ -877,11 +897,12 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, p = ticket; end = p + ticket_len; -#define Z(size) \ +#define Z(field) \ ({ \ u8 *__str = p; \ + eproto = tracepoint_string("rxkad_bad_"#field); \ q = memchr(p, 0, end - p); \ - if (!q || q - p > (size)) \ + if (!q || q - p > (field##_SZ)) \ goto bad_ticket; \ for (; p < q; p++) \ if (!isprint(*p)) \ @@ -896,17 +917,18 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, p++; /* extract the authentication name */ - name = Z(ANAME_SZ); + name = Z(ANAME); _debug("KIV ANAME: %s", name); /* extract the principal's instance */ - name = Z(INST_SZ); + name = Z(INST); _debug("KIV INST : %s", name); /* extract the principal's authentication domain */ - name = Z(REALM_SZ); + name = Z(REALM); _debug("KIV REALM: %s", name); + eproto = tracepoint_string("rxkad_bad_len"); if (end - p < 4 + 8 + 4 + 2) goto bad_ticket; @@ -941,36 +963,37 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, /* check the ticket is in date */ if (issue > now) { - *_abort_code = RXKADNOAUTH; + abort_code = RXKADNOAUTH; ret = -EKEYREJECTED; - goto error; + goto other_error; } if (issue < now - life) { - *_abort_code = RXKADEXPIRED; + abort_code = RXKADEXPIRED; ret = -EKEYEXPIRED; - goto error; + goto other_error; } *_expiry = issue + life; /* get the service name */ - name = Z(SNAME_SZ); + name = Z(SNAME); _debug("KIV SNAME: %s", name); /* get the service instance name */ - name = Z(INST_SZ); + name = Z(INST); _debug("KIV SINST: %s", name); - - ret = 0; -error: - _leave(" = %d", ret); - return ret; + return 0; bad_ticket: - *_abort_code = RXKADBADTICKET; - ret = -EBADMSG; - goto error; + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); + abort_code = RXKADBADTICKET; + ret = -EPROTO; +other_error: + *_abort_code = abort_code; + return ret; +temporary_error: + return ret; } /* @@ -1020,6 +1043,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, __attribute__((aligned(8))); /* must be aligned for crypto */ struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_crypt session_key; + const char *eproto; time_t expiry; void *ticket; u32 abort_code, version, kvno, ticket_len, level; @@ -1028,6 +1052,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); + eproto = tracepoint_string("rxkad_rsp_short"); abort_code = RXKADPACKETSHORT; if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), &response, sizeof(response)) < 0) @@ -1041,40 +1066,43 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }", sp->hdr.serial, version, kvno, ticket_len); + eproto = tracepoint_string("rxkad_rsp_ver"); abort_code = RXKADINCONSISTENCY; if (version != RXKAD_VERSION) goto protocol_error; + eproto = tracepoint_string("rxkad_rsp_tktlen"); abort_code = RXKADTICKETLEN; if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) goto protocol_error; + eproto = tracepoint_string("rxkad_rsp_unkkey"); abort_code = RXKADUNKNOWNKEY; if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) goto protocol_error; /* extract the kerberos ticket and decrypt and decode it */ + ret = -ENOMEM; ticket = kmalloc(ticket_len, GFP_NOFS); if (!ticket) - return -ENOMEM; + goto temporary_error; + eproto = tracepoint_string("rxkad_tkt_short"); abort_code = RXKADPACKETSHORT; if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), ticket, ticket_len) < 0) goto protocol_error_free; - ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key, - &expiry, &abort_code); - if (ret < 0) { - *_abort_code = abort_code; - kfree(ticket); - return ret; - } + ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, + &expiry, _abort_code); + if (ret < 0) + goto temporary_error_free; /* use the session key from inside the ticket to decrypt the * response */ rxkad_decrypt_response(conn, &response, &session_key); + eproto = tracepoint_string("rxkad_rsp_param"); abort_code = RXKADSEALEDINCON; if (ntohl(response.encrypted.epoch) != conn->proto.epoch) goto protocol_error_free; @@ -1085,6 +1113,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, csum = response.encrypted.checksum; response.encrypted.checksum = 0; rxkad_calc_response_checksum(&response); + eproto = tracepoint_string("rxkad_rsp_csum"); if (response.encrypted.checksum != csum) goto protocol_error_free; @@ -1093,11 +1122,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, struct rxrpc_call *call; u32 call_id = ntohl(response.encrypted.call_id[i]); + eproto = tracepoint_string("rxkad_rsp_callid"); if (call_id > INT_MAX) goto protocol_error_unlock; + eproto = tracepoint_string("rxkad_rsp_callctr"); if (call_id < conn->channels[i].call_counter) goto protocol_error_unlock; + + eproto = tracepoint_string("rxkad_rsp_callst"); if (call_id > conn->channels[i].call_counter) { call = rcu_dereference_protected( conn->channels[i].call, @@ -1109,10 +1142,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, } spin_unlock(&conn->channel_lock); + eproto = tracepoint_string("rxkad_rsp_seq"); abort_code = RXKADOUTOFSEQUENCE; if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1) goto protocol_error_free; + eproto = tracepoint_string("rxkad_rsp_level"); abort_code = RXKADLEVELFAIL; level = ntohl(response.encrypted.level); if (level > RXRPC_SECURITY_ENCRYPT) @@ -1123,10 +1158,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, * this the connection security can be handled in exactly the same way * as for a client connection */ ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); - if (ret < 0) { - kfree(ticket); - return ret; - } + if (ret < 0) + goto temporary_error_free; kfree(ticket); _leave(" = 0"); @@ -1137,9 +1170,18 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, protocol_error_free: kfree(ticket); protocol_error: + trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); *_abort_code = abort_code; - _leave(" = -EPROTO [%d]", abort_code); return -EPROTO; + +temporary_error_free: + kfree(ticket); +temporary_error: + /* Ignore the response packet if we got a temporary error such as + * ENOMEM. We just want to send the challenge again. Note that we + * also come out this way if the ticket decryption fails. + */ + return ret; } /* diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 97ab214ca4118d7a451a4e56a916bd5809ae81f3..96ffa5d5733bd73249e32e2c10a420af9946e9b2 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -556,7 +556,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { ret = 0; - if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED)) + if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED)) ret = rxrpc_send_abort_packet(call); } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; @@ -623,7 +623,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, read_unlock_bh(&call->state_lock); break; default: - /* Request phase complete for this client call */ + /* Request phase complete for this client call */ + trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); ret = -EPROTO; break; } @@ -642,20 +643,24 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data); * @error: Local error value * @why: 3-char string indicating why. * - * Allow a kernel service to abort a call, if it's still in an abortable state. + * Allow a kernel service to abort a call, if it's still in an abortable state + * and return true if the call was aborted, false if it was already complete. */ -void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, +bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, u32 abort_code, int error, const char *why) { + bool aborted; + _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); mutex_lock(&call->user_mutex); - if (rxrpc_abort_call(why, call, 0, abort_code, error)) + aborted = rxrpc_abort_call(why, call, 0, abort_code, error); + if (aborted) rxrpc_send_abort_packet(call); mutex_unlock(&call->user_mutex); - _leave(""); + return aborted; } EXPORT_SYMBOL(rxrpc_kernel_abort_call); diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 403790cce7d2324054aa48b49c70f7d4deff8a2a..9fb84f0de6af0c3a75f190ed66cab0dc771db5e4 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -352,6 +352,51 @@ config NET_SCH_PLUG To compile this code as a module, choose M here: the module will be called sch_plug. +menuconfig NET_SCH_DEFAULT + bool "Allow override default queue discipline" + ---help--- + Support for selection of default queuing discipline. + + Nearly all users can safely say no here, and the default + of pfifo_fast will be used. Many distributions already set + the default value via /proc/sys/net/core/default_qdisc. + + If unsure, say N. + +if NET_SCH_DEFAULT + +choice + prompt "Default queuing discipline" + default DEFAULT_PFIFO_FAST + help + Select the queueing discipline that will be used by default + for all network devices. + + config DEFAULT_FQ + bool "Fair Queue" if NET_SCH_FQ + + config DEFAULT_CODEL + bool "Controlled Delay" if NET_SCH_CODEL + + config DEFAULT_FQ_CODEL + bool "Fair Queue Controlled Delay" if NET_SCH_FQ_CODEL + + config DEFAULT_SFQ + bool "Stochastic Fair Queue" if NET_SCH_SFQ + + config DEFAULT_PFIFO_FAST + bool "Priority FIFO Fast" +endchoice + +config DEFAULT_NET_SCH + string + default "pfifo_fast" if DEFAULT_PFIFO_FAST + default "fq" if DEFAULT_FQ + default "fq_codel" if DEFAULT_FQ_CODEL + default "sfq" if DEFAULT_SFQ + default "pfifo_fast" +endif + comment "Classification" config NET_CLS diff --git a/net/sched/act_api.c b/net/sched/act_api.c index e05b924618a03e6a58655873700ee66e0688b7ad..a90e8f355c00e9a39b4a17403b6c7459b6ea9c2f 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -428,24 +428,49 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) return res; } +/*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ +#define TCA_ACT_MAX_PRIO_MASK 0x1FF int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res) { int ret = -1, i; + u32 jmp_prgcnt = 0; + u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ if (skb_skip_tc_classify(skb)) return TC_ACT_OK; +restart_act_graph: for (i = 0; i < nr_actions; i++) { const struct tc_action *a = actions[i]; + if (jmp_prgcnt > 0) { + jmp_prgcnt -= 1; + continue; + } repeat: ret = a->ops->act(skb, a, res); if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ + + if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { + jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; + if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { + /* faulty opcode, stop pipeline */ + return TC_ACT_OK; + } else { + jmp_ttl -= 1; + if (jmp_ttl > 0) + goto restart_act_graph; + else /* faulty graph, stop pipeline */ + return TC_ACT_OK; + } + } + if (ret != TC_ACT_PIPE) break; } + return ret; } EXPORT_SYMBOL(tcf_action_exec); @@ -558,7 +583,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, int err; if (name == NULL) { - err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); if (err < 0) goto err_out; err = -EINVAL; @@ -663,7 +688,7 @@ int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est, int err; int i; - err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); if (err < 0) return err; @@ -795,7 +820,7 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, int index; int err; - err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); if (err < 0) goto err_out; @@ -844,7 +869,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, b = skb_tail_pointer(skb); - err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); + err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); if (err < 0) goto err_out; @@ -930,7 +955,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, struct tc_action *act; LIST_HEAD(actions); - ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); + ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); if (ret < 0) return ret; @@ -1002,7 +1027,8 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, return tcf_add_notify(net, n, &actions, portid); } -static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) +static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_ACT_MAX + 1]; @@ -1013,7 +1039,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; - ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); + ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL, + extack); if (ret < 0) return ret; @@ -1060,19 +1087,20 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n) struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; - if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) + if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, + NULL, NULL) < 0) return NULL; tb1 = nla[TCA_ACT_TAB]; if (tb1 == NULL) return NULL; if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), - NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) + NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) return NULL; if (tb[1] == NULL) return NULL; - if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0) + if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0) return NULL; kind = tb2[TCA_ACT_KIND]; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 520baa41cba3a8d24a659b67c5a5ed8a44129f37..d33947d6e9d017a05b704d233ec97f73abb2ab74 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -283,7 +283,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy); + ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy, NULL); if (ret < 0) return ret; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index f9bb43c25697e70d18fe9bbba90f6e98dfe05759..2155bc6c6a1e79049de9dc9c8e611b592e42a77f 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -109,7 +109,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy); + ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy, + NULL); if (ret < 0) return ret; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e978ccd4402cbc68ba1c46e20909a047978df1c2..ab6fdbd34db774bdb9d8a958ead2c808a3405d13 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -59,7 +59,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy); + err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL); if (err < 0) return err; @@ -181,6 +181,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, struct tcphdr *tcph; const struct iphdr *iph; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + return 1; + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; @@ -202,6 +205,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, struct tcphdr *tcph; const struct ipv6hdr *ip6h; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + return 1; + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; @@ -225,6 +231,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, const struct iphdr *iph; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, @@ -278,6 +287,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, const struct ipv6hdr *ip6h; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index e6c874a2b283f6e265b61a39f1fcd053389b9051..99afe8b1f1fb015584f303e7352da8b6171774eb 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -73,7 +73,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy); + err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 71e7ff22f7c92a86cacad9a1b8d18d3d726f52fb..c5dec308b8b1eb29505fed1b9a71b3ef70f5e91a 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -443,7 +443,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, int ret = 0; int err; - err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); + err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL); if (err < 0) return err; @@ -514,7 +514,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (tb[TCA_IFE_METALST]) { err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], - NULL); + NULL, NULL); if (err) { metadata_parse_err: if (exists) @@ -603,8 +603,8 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, return -1; } -int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, - u16 metaid, u16 mlen, void *mdata) +static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, + u16 metaid, u16 mlen, void *mdata) { struct tcf_meta_info *e; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 992ef8d624f11819e032411e72ea60aa6aa93ba1..36f0ced9e60c03297e195135ca5a8a53d1a3a27b 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -107,7 +107,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy); + err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index af49c7dca8608cebbaa0224976ae7df104e5526f..1b5549ababd4690617b5ff0c3842c6cbb4806a41 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -87,7 +87,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); + ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy, NULL); if (ret < 0) return ret; if (tb[TCA_MIRRED_PARMS] == NULL) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 9b6aec665495992fd6d63773890b5e897ba51819..9016ab8a0649780a02d8b29f76f98f19c0283e11 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -50,7 +50,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy); + err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index c1310472f620fd44b9ae8a6cfeefef5419cad54e..164b5ac094be6d8bbd7a04aa8aa962a3c693ab44 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -72,7 +72,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, } err = nla_parse_nested(tb, TCA_PEDIT_KEY_EX_MAX, ka, - pedit_key_ex_policy); + pedit_key_ex_policy, NULL); if (err) goto err_out; @@ -147,7 +147,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy); + err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0ba91d1ce99480c4817684dbe0b4fde61811e03e..f42008b293112d1a6da2bcf6ef8af564b382ba39 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -90,7 +90,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy); + err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 0b8217b4763f59d2d4a71684080536b080ab27e1..59d6645a4007818656376b02a9c5a458e028420b 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -50,7 +50,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy); + ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy, NULL); if (ret < 0) return ret; if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] || diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 823a73ad0c602b8e079be04934f8a57d53480da1..43605e7ce05107adb79bd1dbc562f4961eb20e51 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -94,7 +94,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy); + err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 06ccae3c12eecf85383105489320537d52c4d948..6b3e65d7de0c2e81240618e4500ee894819df833 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -82,7 +82,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (nla == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); + err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index c736627f8f4a0e0ff86db535ec95459a417e4ada..a73c4bbcada293b2923a41572104d6f9362d37d9 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -103,7 +103,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy); + err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy, NULL); if (err < 0) return err; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index e3a58e02119877d237f9aa18c5f9d9fdfb44d0c9..b9a2f241a5b3adca04a5d162569f7908c01c2912 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -89,7 +89,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy); + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy, + NULL); if (err < 0) return err; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 19e0dba305ce8101d9db33ff0a1eaab849a730d0..13ba3a89f675d7d5ddaeab893a9d548be735e39e 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -121,7 +121,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, if (!nla) return -EINVAL; - err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy); + err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy, NULL); if (err < 0) return err; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 732f7cae459d4656aa8d69020f44a66abb501f34..22f88b35a5463e1d896271f1e1b05675bcaa1955 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -178,14 +178,11 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, return ERR_PTR(err); } -static bool tcf_proto_destroy(struct tcf_proto *tp, bool force) +static void tcf_proto_destroy(struct tcf_proto *tp) { - if (tp->ops->destroy(tp, force)) { - module_put(tp->ops->owner); - kfree_rcu(tp, rcu); - return true; - } - return false; + tp->ops->destroy(tp); + module_put(tp->ops->owner); + kfree_rcu(tp, rcu); } void tcf_destroy_chain(struct tcf_proto __rcu **fl) @@ -194,14 +191,15 @@ void tcf_destroy_chain(struct tcf_proto __rcu **fl) while ((tp = rtnl_dereference(*fl)) != NULL) { RCU_INIT_POINTER(*fl, tp->next); - tcf_proto_destroy(tp, true); + tcf_proto_destroy(tp); } } EXPORT_SYMBOL(tcf_destroy_chain); /* Add/change/delete/get a filter node */ -static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) +static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; @@ -229,7 +227,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) replay: tp_created = 0; - err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); if (err < 0) return err; @@ -360,7 +358,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) RCU_INIT_POINTER(*back, next); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER, false); - tcf_proto_destroy(tp, true); + tcf_proto_destroy(tp); err = 0; goto errout; } @@ -371,24 +369,28 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) goto errout; } } else { + bool last; + switch (n->nlmsg_type) { case RTM_NEWTFILTER: if (n->nlmsg_flags & NLM_F_EXCL) { if (tp_created) - tcf_proto_destroy(tp, true); + tcf_proto_destroy(tp); err = -EEXIST; goto errout; } break; case RTM_DELTFILTER: - err = tp->ops->delete(tp, fh); + err = tp->ops->delete(tp, fh, &last); if (err) goto errout; next = rtnl_dereference(tp->next); tfilter_notify(net, skb, n, tp, t->tcm_handle, RTM_DELTFILTER, false); - if (tcf_proto_destroy(tp, false)) + if (last) { RCU_INIT_POINTER(*back, next); + tcf_proto_destroy(tp); + } goto errout; case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, @@ -410,7 +412,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false); } else { if (tp_created) - tcf_proto_destroy(tp, true); + tcf_proto_destroy(tp); } errout: diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 5877f6061b57589ce9ba9226281e85f47eafb523..c4fd63a068f98b1288bacebd5f352af6037362b9 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -93,30 +93,28 @@ static void basic_delete_filter(struct rcu_head *head) kfree(f); } -static bool basic_destroy(struct tcf_proto *tp, bool force) +static void basic_destroy(struct tcf_proto *tp) { struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f, *n; - if (!force && !list_empty(&head->flist)) - return false; - list_for_each_entry_safe(f, n, &head->flist, link) { list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); } kfree_rcu(head, rcu); - return true; } -static int basic_delete(struct tcf_proto *tp, unsigned long arg) +static int basic_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f = (struct basic_filter *) arg; list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); + *last = list_empty(&head->flist); return 0; } @@ -174,7 +172,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; err = nla_parse_nested(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS], - basic_policy); + basic_policy, NULL); if (err < 0) return err; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 80f688436dd70bae84f0c2fffd3d16c4b1d2c4da..5ebeae996e6327023bfce0307cd6b6a07c0860c2 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -274,25 +274,24 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); } -static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) +static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { + struct cls_bpf_head *head = rtnl_dereference(tp->root); + __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg); + *last = list_empty(&head->plist); return 0; } -static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) +static void cls_bpf_destroy(struct tcf_proto *tp) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *tmp; - if (!force && !list_empty(&head->plist)) - return false; - list_for_each_entry_safe(prog, tmp, &head->plist, link) __cls_bpf_delete(tp, prog); kfree_rcu(head, rcu); - return true; } static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) @@ -478,7 +477,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (tca[TCA_OPTIONS] == NULL) return -EINVAL; - ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy); + ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, + NULL); if (ret < 0) return ret; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index c1f20077837f06bb1998f5621c797e575e10ca21..12ce547eea04dfb57d3f47540b57e8150a8ebca4 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -99,7 +99,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, new->handle = handle; new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], - cgroup_policy); + cgroup_policy, NULL); if (err < 0) goto errout; @@ -131,20 +131,16 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, return err; } -static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) +static void cls_cgroup_destroy(struct tcf_proto *tp) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); - if (!force) - return false; /* Head can still be NULL due to cls_cgroup_init(). */ if (head) call_rcu(&head->rcu, cls_cgroup_destroy_rcu); - - return true; } -static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) +static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { return -EOPNOTSUPP; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 3d6b9286c203f298b14b5254e5c12cb4781eb4b1..3065752b9cda59e4cb2555acfe2ae26722152ffc 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -400,7 +400,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); + err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL); if (err < 0) return err; @@ -508,9 +508,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, get_random_bytes(&fnew->hashrnd, 4); } - fnew->perturb_timer.function = flow_perturbation; - fnew->perturb_timer.data = (unsigned long)fnew; - init_timer_deferrable(&fnew->perturb_timer); + setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation, + (unsigned long)fnew); tcf_exts_change(tp, &fnew->exts, &e); tcf_em_tree_change(tp, &fnew->ematches, &t); @@ -563,12 +562,14 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, return err; } -static int flow_delete(struct tcf_proto *tp, unsigned long arg) +static int flow_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f = (struct flow_filter *)arg; list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); + *last = list_empty(&head->filters); return 0; } @@ -584,20 +585,16 @@ static int flow_init(struct tcf_proto *tp) return 0; } -static bool flow_destroy(struct tcf_proto *tp, bool force) +static void flow_destroy(struct tcf_proto *tp) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f, *next; - if (!force && !list_empty(&head->filters)) - return false; - list_for_each_entry_safe(f, next, &head->filters, list) { list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); } kfree_rcu(head, rcu); - return true; } static unsigned long flow_get(struct tcf_proto *tp, u32 handle) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9d0c99d2e9fbcc35aee5a51274cf9fe7f542a14f..ca526c0881bd6b4bcff19835befb537f45c6d5ff 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,7 @@ struct fl_flow_key { struct flow_dissector_key_ipv6_addrs enc_ipv6; }; struct flow_dissector_key_ports enc_tp; + struct flow_dissector_key_mpls mpls; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -328,21 +330,16 @@ static void fl_destroy_rcu(struct rcu_head *rcu) schedule_work(&head->work); } -static bool fl_destroy(struct tcf_proto *tp, bool force) +static void fl_destroy(struct tcf_proto *tp) { struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f, *next; - if (!force && !list_empty(&head->filters)) - return false; - list_for_each_entry_safe(f, next, &head->filters, list) __fl_delete(tp, f); __module_get(THIS_MODULE); call_rcu(&head->rcu, fl_destroy_rcu); - - return true; } static unsigned long fl_get(struct tcf_proto *tp, u32 handle) @@ -423,6 +420,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, + [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, }; static void fl_set_key_val(struct nlattr **tb, @@ -438,6 +439,41 @@ static void fl_set_key_val(struct nlattr **tb, memcpy(mask, nla_data(tb[mask_type]), len); } +static int fl_set_key_mpls(struct nlattr **tb, + struct flow_dissector_key_mpls *key_val, + struct flow_dissector_key_mpls *key_mask) +{ + if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { + key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); + key_mask->mpls_ttl = MPLS_TTL_MASK; + } + if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { + u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); + + if (bos & ~MPLS_BOS_MASK) + return -EINVAL; + key_val->mpls_bos = bos; + key_mask->mpls_bos = MPLS_BOS_MASK; + } + if (tb[TCA_FLOWER_KEY_MPLS_TC]) { + u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); + + if (tc & ~MPLS_TC_MASK) + return -EINVAL; + key_val->mpls_tc = tc; + key_mask->mpls_tc = MPLS_TC_MASK; + } + if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { + u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); + + if (label & ~MPLS_LABEL_MASK) + return -EINVAL; + key_val->mpls_label = label; + key_mask->mpls_label = MPLS_LABEL_MASK; + } + return 0; +} + static void fl_set_key_vlan(struct nlattr **tb, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) @@ -594,6 +630,11 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE_MASK, sizeof(key->icmp.code)); + } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || + key->basic.n_proto == htons(ETH_P_MPLS_MC)) { + ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls); + if (ret) + return ret; } else if (key->basic.n_proto == htons(ETH_P_ARP) || key->basic.n_proto == htons(ETH_P_RARP)) { fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, @@ -729,6 +770,8 @@ static void fl_init_dissector(struct cls_fl_head *head, FLOW_DISSECTOR_KEY_ICMP, icmp); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_ARP, arp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_MPLS, mpls); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, FLOW_DISSECTOR_KEY_VLAN, vlan); FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, @@ -848,7 +891,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!tb) return -ENOBUFS; - err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy); + err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], + fl_policy, NULL); if (err < 0) goto errout_tb; @@ -946,7 +990,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, return err; } -static int fl_delete(struct tcf_proto *tp, unsigned long arg) +static int fl_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f = (struct cls_fl_filter *) arg; @@ -955,6 +999,7 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg) rhashtable_remove_fast(&head->ht, &f->ht_node, head->ht_params); __fl_delete(tp, f); + *last = list_empty(&head->filters); return 0; } @@ -994,6 +1039,41 @@ static int fl_dump_key_val(struct sk_buff *skb, return 0; } +static int fl_dump_key_mpls(struct sk_buff *skb, + struct flow_dissector_key_mpls *mpls_key, + struct flow_dissector_key_mpls *mpls_mask) +{ + int err; + + if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask))) + return 0; + if (mpls_mask->mpls_ttl) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, + mpls_key->mpls_ttl); + if (err) + return err; + } + if (mpls_mask->mpls_tc) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, + mpls_key->mpls_tc); + if (err) + return err; + } + if (mpls_mask->mpls_label) { + err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, + mpls_key->mpls_label); + if (err) + return err; + } + if (mpls_mask->mpls_bos) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, + mpls_key->mpls_bos); + if (err) + return err; + } + return 0; +} + static int fl_dump_key_vlan(struct sk_buff *skb, struct flow_dissector_key_vlan *vlan_key, struct flow_dissector_key_vlan *vlan_mask) @@ -1099,6 +1179,9 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, sizeof(key->basic.n_proto))) goto nla_put_failure; + if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) + goto nla_put_failure; + if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) goto nla_put_failure; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 9dc63d54e1672e82cfae5f677a1ba811c72df337..d3885362e017e1b477c6c2e4d8abea4b1e021c53 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -127,20 +127,14 @@ static void fw_delete_filter(struct rcu_head *head) kfree(f); } -static bool fw_destroy(struct tcf_proto *tp, bool force) +static void fw_destroy(struct tcf_proto *tp) { struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; int h; if (head == NULL) - return true; - - if (!force) { - for (h = 0; h < HTSIZE; h++) - if (rcu_access_pointer(head->ht[h])) - return false; - } + return; for (h = 0; h < HTSIZE; h++) { while ((f = rtnl_dereference(head->ht[h])) != NULL) { @@ -150,17 +144,17 @@ static bool fw_destroy(struct tcf_proto *tp, bool force) call_rcu(&f->rcu, fw_delete_filter); } } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); - return true; } -static int fw_delete(struct tcf_proto *tp, unsigned long arg) +static int fw_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)arg; struct fw_filter __rcu **fp; struct fw_filter *pfp; + int ret = -EINVAL; + int h; if (head == NULL || f == NULL) goto out; @@ -173,11 +167,21 @@ static int fw_delete(struct tcf_proto *tp, unsigned long arg) RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, fw_delete_filter); - return 0; + ret = 0; + break; } } + + *last = true; + for (h = 0; h < HTSIZE; h++) { + if (rcu_access_pointer(head->ht[h])) { + *last = false; + break; + } + } + out: - return -EINVAL; + return ret; } static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { @@ -250,7 +254,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (!opt) return handle ? -EINVAL : 0; /* Succeed if it is old method. */ - err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); + err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy, NULL); if (err < 0) return err; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 224eb2c143462a39f3a05e78203344e44e6eb578..2efb36c08f2a57051f5412fcb479bc7e531e7fa3 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -90,19 +90,18 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, &offload); } -static bool mall_destroy(struct tcf_proto *tp, bool force) +static void mall_destroy(struct tcf_proto *tp) { struct cls_mall_head *head = rtnl_dereference(tp->root); struct net_device *dev = tp->q->dev_queue->dev; if (!head) - return true; + return; if (tc_should_offload(dev, tp, head->flags)) mall_destroy_hw_filter(tp, head, (unsigned long) head); call_rcu(&head->rcu, mall_destroy_rcu); - return true; } static unsigned long mall_get(struct tcf_proto *tp, u32 handle) @@ -161,8 +160,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (head) return -EEXIST; - err = nla_parse_nested(tb, TCA_MATCHALL_MAX, - tca[TCA_OPTIONS], mall_policy); + err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS], + mall_policy, NULL); if (err < 0) return err; @@ -216,7 +215,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, return err; } -static int mall_delete(struct tcf_proto *tp, unsigned long arg) +static int mall_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { return -EOPNOTSUPP; } diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 455fc8f83d0ae0ab48ba97fc2a11ce99e8e00784..d63d5502ee020350e72b818c5607000a21df915a 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -140,8 +140,6 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, goto failure; id = dst->tclassid; - if (head == NULL) - goto old_method; iif = inet_iif(skb); @@ -194,15 +192,6 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); failure: return -1; - -old_method: - if (id && (TC_H_MAJ(id) == 0 || - !(TC_H_MAJ(id^tp->q->handle)))) { - res->classid = id; - res->class = 0; - return 0; - } - return -1; } static inline u32 to_hash(u32 id) @@ -234,9 +223,6 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) struct route4_filter *f; unsigned int h1, h2; - if (!head) - return 0; - h1 = to_hash(handle); if (h1 > 256) return 0; @@ -276,20 +262,13 @@ static void route4_delete_filter(struct rcu_head *head) kfree(f); } -static bool route4_destroy(struct tcf_proto *tp, bool force) +static void route4_destroy(struct tcf_proto *tp) { struct route4_head *head = rtnl_dereference(tp->root); int h1, h2; if (head == NULL) - return true; - - if (!force) { - for (h1 = 0; h1 <= 256; h1++) { - if (rcu_access_pointer(head->table[h1])) - return false; - } - } + return; for (h1 = 0; h1 <= 256; h1++) { struct route4_bucket *b; @@ -312,12 +291,10 @@ static bool route4_destroy(struct tcf_proto *tp, bool force) kfree_rcu(b, rcu); } } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); - return true; } -static int route4_delete(struct tcf_proto *tp, unsigned long arg) +static int route4_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_filter *f = (struct route4_filter *)arg; @@ -325,7 +302,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) struct route4_filter *nf; struct route4_bucket *b; unsigned int h = 0; - int i; + int i, h1; if (!head || !f) return -EINVAL; @@ -356,16 +333,25 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) rt = rtnl_dereference(b->ht[i]); if (rt) - return 0; + goto out; } /* OK, session has no flows */ RCU_INIT_POINTER(head->table[to_hash(h)], NULL); kfree_rcu(b, rcu); + break; + } + } - return 0; +out: + *last = true; + for (h1 = 0; h1 <= 256; h1++) { + if (rcu_access_pointer(head->table[h1])) { + *last = false; + break; } } + return 0; } @@ -489,7 +475,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (opt == NULL) return handle ? -EINVAL : 0; - err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy); + err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL); if (err < 0) return err; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 322438fb3ffcb426194be6c1dd05893b27cdf51c..0d9d077986992926a79af6236b8840181517679d 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -152,8 +152,6 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; nhptr = ip_hdr(skb); #endif - if (unlikely(!head)) - return -1; restart: #if RSVP_DST_LEN == 4 @@ -302,22 +300,13 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) call_rcu(&f->rcu, rsvp_delete_filter_rcu); } -static bool rsvp_destroy(struct tcf_proto *tp, bool force) +static void rsvp_destroy(struct tcf_proto *tp) { struct rsvp_head *data = rtnl_dereference(tp->root); int h1, h2; if (data == NULL) - return true; - - if (!force) { - for (h1 = 0; h1 < 256; h1++) { - if (rcu_access_pointer(data->ht[h1])) - return false; - } - } - - RCU_INIT_POINTER(tp->root, NULL); + return; for (h1 = 0; h1 < 256; h1++) { struct rsvp_session *s; @@ -337,10 +326,9 @@ static bool rsvp_destroy(struct tcf_proto *tp, bool force) } } kfree_rcu(data, rcu); - return true; } -static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) +static int rsvp_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct rsvp_head *head = rtnl_dereference(tp->root); struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg; @@ -348,7 +336,7 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) unsigned int h = f->handle; struct rsvp_session __rcu **sp; struct rsvp_session *nsp, *s = f->sess; - int i; + int i, h1; fp = &s->ht[(h >> 8) & 0xFF]; for (nfp = rtnl_dereference(*fp); nfp; @@ -361,7 +349,7 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) for (i = 0; i <= 16; i++) if (s->ht[i]) - return 0; + goto out; /* OK, session has no flows */ sp = &head->ht[h & 0xFF]; @@ -370,13 +358,23 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) if (nsp == s) { RCU_INIT_POINTER(*sp, s->next); kfree_rcu(s, rcu); - return 0; + goto out; } } - return 0; + break; } } + +out: + *last = true; + for (h1 = 0; h1 < 256; h1++) { + if (rcu_access_pointer(head->ht[h1])) { + *last = false; + break; + } + } + return 0; } @@ -484,7 +482,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, if (opt == NULL) return handle ? -EINVAL : 0; - err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy); + err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy, NULL); if (err < 0) return err; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 0751245a6aced60163601b94ab1386e0c5e30565..8a8a58357c39b8e38c2d3740aaa93ed67866fe54 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -150,7 +150,7 @@ static void tcindex_destroy_fexts(struct rcu_head *head) kfree(f); } -static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) +static int tcindex_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; @@ -186,6 +186,8 @@ static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) call_rcu(&f->rcu, tcindex_destroy_fexts); else call_rcu(&r->rcu, tcindex_destroy_rexts); + + *last = false; return 0; } @@ -193,7 +195,9 @@ static int tcindex_destroy_element(struct tcf_proto *tp, unsigned long arg, struct tcf_walker *walker) { - return tcindex_delete(tp, arg); + bool last; + + return tcindex_delete(tp, arg, &last); } static void __tcindex_destroy(struct rcu_head *head) @@ -482,7 +486,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, if (!opt) return 0; - err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy); + err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL); if (err < 0) return err; @@ -529,14 +533,11 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) } } -static bool tcindex_destroy(struct tcf_proto *tp, bool force) +static void tcindex_destroy(struct tcf_proto *tp) { struct tcindex_data *p = rtnl_dereference(tp->root); struct tcf_walker walker; - if (!force) - return false; - pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); walker.count = 0; walker.skip = 0; @@ -544,7 +545,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force) tcindex_walk(tp, &walker); call_rcu(&p->rcu, __tcindex_destroy); - return true; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4dbe0c680fe6363a88ca47cb167dcae9327920d1..d20e72a095d578e65eda0dafc62217146aeea1c1 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -585,37 +585,13 @@ static bool ht_empty(struct tc_u_hnode *ht) return true; } -static bool u32_destroy(struct tcf_proto *tp, bool force) +static void u32_destroy(struct tcf_proto *tp) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); WARN_ON(root_ht == NULL); - if (!force) { - if (root_ht) { - if (root_ht->refcnt > 1) - return false; - if (root_ht->refcnt == 1) { - if (!ht_empty(root_ht)) - return false; - } - } - - if (tp_c->refcnt > 1) - return false; - - if (tp_c->refcnt == 1) { - struct tc_u_hnode *ht; - - for (ht = rtnl_dereference(tp_c->hlist); - ht; - ht = rtnl_dereference(ht->next)) - if (!ht_empty(ht)) - return false; - } - } - if (root_ht && --root_ht->refcnt == 0) u32_destroy_hnode(tp, root_ht); @@ -640,20 +616,22 @@ static bool u32_destroy(struct tcf_proto *tp, bool force) } tp->data = NULL; - return true; } -static int u32_delete(struct tcf_proto *tp, unsigned long arg) +static int u32_delete(struct tcf_proto *tp, unsigned long arg, bool *last) { struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); + struct tc_u_common *tp_c = tp->data; + int ret = 0; if (ht == NULL) - return 0; + goto out; if (TC_U32_KEY(ht->handle)) { u32_remove_hw_knode(tp, ht->handle); - return u32_delete_key(tp, (struct tc_u_knode *)ht); + ret = u32_delete_key(tp, (struct tc_u_knode *)ht); + goto out; } if (root_ht == ht) @@ -666,7 +644,40 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) return -EBUSY; } - return 0; +out: + *last = true; + if (root_ht) { + if (root_ht->refcnt > 1) { + *last = false; + goto ret; + } + if (root_ht->refcnt == 1) { + if (!ht_empty(root_ht)) { + *last = false; + goto ret; + } + } + } + + if (tp_c->refcnt > 1) { + *last = false; + goto ret; + } + + if (tp_c->refcnt == 1) { + struct tc_u_hnode *ht; + + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) + if (!ht_empty(ht)) { + *last = false; + break; + } + } + +ret: + return ret; } #define NR_U32_NODE (1<<12) @@ -860,7 +871,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (opt == NULL) return handle ? -EINVAL : 0; - err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy); + err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, NULL); if (err < 0) return err; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index ae7e4f5b348b86ad352e0011c8e0d0227a44f1e3..eb0e9bab54c175d706b6f89f354f2dd5b8793a50 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -912,7 +912,7 @@ static int em_meta_change(struct net *net, void *data, int len, struct tcf_meta_hdr *hdr; struct meta_match *meta = NULL; - err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy); + err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy, NULL); if (err < 0) goto errout; diff --git a/net/sched/ematch.c b/net/sched/ematch.c index fbb7ebfc58c6761f6afb58e62646908b10e2bf09..03b677bc07005c7f864077edad6f22ca9dadbfdd 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -314,7 +314,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, if (!nla) return 0; - err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy); + err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy, NULL); if (err < 0) goto errout; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index bcf49cd2278670197f2a7e9d4e9a62ae8d117468..bbe57d57b67fd498692bd41db49147511f1bb091 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -251,6 +251,15 @@ int qdisc_set_default(const char *name) return ops ? 0 : -ENOENT; } +#ifdef CONFIG_NET_SCH_DEFAULT +/* Set default value from kernel config */ +static int __init sch_default_qdisc(void) +{ + return qdisc_set_default(CONFIG_DEFAULT_NET_SCH); +} +late_initcall(sch_default_qdisc); +#endif + /* We know handle. Find qdisc among all qdisc's attached to device * (root qdisc, all its children, children of children etc.) * Note: caller either uses rtnl or rcu_read_lock() @@ -274,7 +283,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } -void qdisc_hash_add(struct Qdisc *q) +void qdisc_hash_add(struct Qdisc *q, bool invisible) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { struct Qdisc *root = qdisc_dev(q)->qdisc; @@ -282,6 +291,8 @@ void qdisc_hash_add(struct Qdisc *q) WARN_ON_ONCE(root == &noop_qdisc); ASSERT_RTNL(); hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); + if (invisible) + q->flags |= TCQ_F_INVISIBLE; } } EXPORT_SYMBOL(qdisc_hash_add); @@ -455,7 +466,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) u16 *tab = NULL; int err; - err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy); + err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL); if (err < 0) return ERR_PTR(err); if (!tb[TCA_STAB_BASE]) @@ -1003,7 +1014,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, goto err_out4; } - qdisc_hash_add(sch); + qdisc_hash_add(sch, false); return sch; } @@ -1114,7 +1125,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) * Delete/get qdisc. */ -static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n) +static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = nlmsg_data(n); @@ -1129,7 +1141,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n) !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM; - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); if (err < 0) return err; @@ -1183,7 +1195,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n) * Create/change qdisc. */ -static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) +static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm; @@ -1198,7 +1211,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) replay: /* Reinit, just in case something touches this. */ - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); if (err < 0) return err; @@ -1401,9 +1414,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, return -1; } -static bool tc_qdisc_dump_ignore(struct Qdisc *q) +static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) { - return (q->flags & TCQ_F_BUILTIN) ? true : false; + if (q->flags & TCQ_F_BUILTIN) + return true; + if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible) + return true; + + return false; } static int qdisc_notify(struct net *net, struct sk_buff *oskb, @@ -1417,12 +1435,12 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, if (!skb) return -ENOBUFS; - if (old && !tc_qdisc_dump_ignore(old)) { + if (old && !tc_qdisc_dump_ignore(old, false)) { if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) goto err_out; } - if (new && !tc_qdisc_dump_ignore(new)) { + if (new && !tc_qdisc_dump_ignore(new, false)) { if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) goto err_out; @@ -1439,7 +1457,8 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, - int *q_idx_p, int s_q_idx, bool recur) + int *q_idx_p, int s_q_idx, bool recur, + bool dump_invisible) { int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; @@ -1452,7 +1471,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, if (q_idx < s_q_idx) { q_idx++; } else { - if (!tc_qdisc_dump_ignore(q) && + if (!tc_qdisc_dump_ignore(q, dump_invisible) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) @@ -1474,7 +1493,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, q_idx++; continue; } - if (!tc_qdisc_dump_ignore(q) && + if (!tc_qdisc_dump_ignore(q, dump_invisible) && tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) @@ -1496,12 +1515,21 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) int idx, q_idx; int s_idx, s_q_idx; struct net_device *dev; + const struct nlmsghdr *nlh = cb->nlh; + struct tcmsg *tcm = nlmsg_data(nlh); + struct nlattr *tca[TCA_MAX + 1]; + int err; s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; idx = 0; ASSERT_RTNL(); + + err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); + if (err < 0) + return err; + for_each_netdev(net, dev) { struct netdev_queue *dev_queue; @@ -1512,13 +1540,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) q_idx = 0; if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, - true) < 0) + true, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, - &q_idx, s_q_idx, false) < 0) + &q_idx, s_q_idx, false, + tca[TCA_DUMP_INVISIBLE]) < 0) goto done; cont: @@ -1540,7 +1569,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) -static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n) +static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct tcmsg *tcm = nlmsg_data(n); @@ -1559,7 +1589,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n) !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM; - err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); + err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); if (err < 0) return err; @@ -1762,7 +1792,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, { struct qdisc_dump_args arg; - if (tc_qdisc_dump_ignore(q) || + if (tc_qdisc_dump_ignore(q, false) || *t_p < s_t || !q->ops->cl_ops || (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)) { diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 2209c2ddacbfb0b1f222cb593477891d7124d9e3..40cbceed4de82aa58d70c2c67782fc65088b25c5 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -214,7 +214,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, if (opt == NULL) return -EINVAL; - error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy); + error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy, NULL); if (error < 0) return error; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d6ca18dc04c3e9e72efedd44088e95118a06b711..7415859fd4c3f65e6ff801b08d683b796d1eaa03 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1137,7 +1137,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) struct tc_ratespec *r; int err; - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); if (err < 0) return err; @@ -1161,6 +1161,8 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) sch->handle); if (!q->link.q) q->link.q = &noop_qdisc; + else + qdisc_hash_add(q->link.q, true); q->link.priority = TC_CBQ_MAXPRIO - 1; q->link.priority2 = TC_CBQ_MAXPRIO - 1; @@ -1472,7 +1474,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); if (err < 0) return err; @@ -1600,6 +1602,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!cl->q) cl->q = &noop_qdisc; + else + qdisc_hash_add(cl->q, true); + cl->common.classid = classid; cl->tparent = parent; cl->qdisc = sch; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 3b86a97bc67c3e953cb181eddcb5c0c16bf3b27f..d00f4c7c2f3af980b82281485e6fb31006765e92 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -58,7 +58,6 @@ struct choke_sched_data { /* Variables */ struct red_vars vars; - struct tcf_proto __rcu *filter_list; struct { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ @@ -152,11 +151,6 @@ static inline void choke_set_classid(struct sk_buff *skb, u16 classid) choke_skb_cb(skb)->classid = classid; } -static u16 choke_get_classid(const struct sk_buff *skb) -{ - return choke_skb_cb(skb)->classid; -} - /* * Compare flow of two packets * Returns true only if source and destination address and port match. @@ -187,40 +181,6 @@ static bool choke_match_flow(struct sk_buff *skb1, sizeof(choke_skb_cb(skb1)->keys)); } -/* - * Classify flow using either: - * 1. pre-existing classification result in skb - * 2. fast internal classification - * 3. use TC filter based classification - */ -static bool choke_classify(struct sk_buff *skb, - struct Qdisc *sch, int *qerr) - -{ - struct choke_sched_data *q = qdisc_priv(sch); - struct tcf_result res; - struct tcf_proto *fl; - int result; - - fl = rcu_dereference_bh(q->filter_list); - result = tc_classify(skb, fl, &res, false); - if (result >= 0) { -#ifdef CONFIG_NET_CLS_ACT - switch (result) { - case TC_ACT_STOLEN: - case TC_ACT_QUEUED: - *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - case TC_ACT_SHOT: - return false; - } -#endif - choke_set_classid(skb, TC_H_MIN(res.classid)); - return true; - } - - return false; -} - /* * Select a packet at random from queue * HACK: since queue can have holes from previous deletion; retry several @@ -257,25 +217,15 @@ static bool choke_match_random(const struct choke_sched_data *q, return false; oskb = choke_peek_random(q, pidx); - if (rcu_access_pointer(q->filter_list)) - return choke_get_classid(nskb) == choke_get_classid(oskb); - return choke_match_flow(oskb, nskb); } static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { - int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; - if (rcu_access_pointer(q->filter_list)) { - /* If using external classifiers, get result and record it. */ - if (!choke_classify(skb, sch, &ret)) - goto other_drop; /* Packet was eaten by filter */ - } - choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); @@ -339,12 +289,6 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, congestion_drop: qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; - -other_drop: - if (ret & __NET_XMIT_BYPASS) - qdisc_qstats_drop(sch); - __qdisc_drop(skb, to_free); - return ret; } static struct sk_buff *choke_dequeue(struct Qdisc *sch) @@ -413,7 +357,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); + err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy, NULL); if (err < 0) return err; @@ -538,7 +482,6 @@ static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); - tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 5bfa79ee657cf3674d2b6204aee80f63207b84f9..c518a1efcb9d3bd98d1de0d921bd250ed46bc69c 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -140,7 +140,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); + err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy, NULL); if (err < 0) return err; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index bb4cbdf7500482b170eef6e7923cf2f2259e52b5..58a8c32eab2394f90e5935ad661b6223afee106b 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -76,7 +76,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); + err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL); if (err < 0) return err; @@ -117,6 +117,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; + else + qdisc_hash_add(cl->qdisc, true); if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 5334e309f17f0ef4416dddcdc9278c14ecb5585d..1c0f877f673a73977d3191a78a2964e6c1d41259 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -129,7 +129,7 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, if (!opt) goto errout; - err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); + err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL); if (err < 0) goto errout; @@ -342,7 +342,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) if (!opt) goto errout; - err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); + err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL); if (err < 0) goto errout; @@ -374,6 +374,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (p->q == NULL) p->q = &noop_qdisc; + else + qdisc_hash_add(p->q, true); pr_debug("%s: qdisc %p\n", __func__, p->q); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a4f738ac77283b19927aef0a0e6c9fc8dafcdd42..da4f67bda0ee148910cc79a7d64cdf85ce318beb 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -698,7 +698,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); + err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL); if (err < 0) return err; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 9f3a884d15903fd9012c01b5eee802e02f9f709e..18bbb5476c836128dae1779c7b493cbb5c485e14 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -288,7 +288,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) struct fq_codel_flow *flow; struct list_head *head; u32 prev_drop_count, prev_ecn_mark; - unsigned int prev_backlog; begin: head = &q->new_flows; @@ -307,7 +306,6 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) prev_drop_count = q->cstats.drop_count; prev_ecn_mark = q->cstats.ecn_mark; - prev_backlog = sch->qstats.backlog; skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, &flow->cvars, &q->cstats, qdisc_pkt_len, @@ -385,7 +383,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); + err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy, + NULL); if (err < 0) return err; if (tb[TCA_FQ_CODEL_FLOWS]) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 1a2f9e964330a5cd0c0b9a5cac91807221b0ffd9..52a2c55f6d9eb29ec1de3135596fe5a1529ebf23 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -795,7 +795,7 @@ static void attach_default_qdiscs(struct net_device *dev) } #ifdef CONFIG_NET_SCHED if (dev->qdisc != &noop_qdisc) - qdisc_hash_add(dev->qdisc); + qdisc_hash_add(dev->qdisc, false); #endif } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index c78a093c551ae84a0ba5ba13a7af9cc89e2ca383..17c7130454bd90e8af1d17e95f477ea558fb481d 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -401,7 +401,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); + err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL); if (err < 0) return err; @@ -470,7 +470,7 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); + err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL); if (err < 0) return err; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3ffaa6fb0990f0aa31487a2f1829b1f1accf8b21..5cb82f6c1b0605d7a4373062dc4232e14347bc22 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -957,7 +957,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); + err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy, NULL); if (err < 0) return err; @@ -1066,6 +1066,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; + else + qdisc_hash_add(cl->qdisc, true); INIT_LIST_HEAD(&cl->children); cl->vt_tree = RB_ROOT; cl->cf_tree = RB_ROOT; @@ -1425,6 +1427,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) sch->handle); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; + else + qdisc_hash_add(q->root.qdisc, true); INIT_LIST_HEAD(&q->root.children); q->root.vt_tree = RB_ROOT; q->root.cf_tree = RB_ROOT; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 2fae8b5f1b80c017c4ae60df54c9143f82de4e9d..c19d346e6c5a438c051abf8ce4eb785d075d36c2 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -529,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy); + err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy, NULL); if (err < 0) return err; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 4cd5fb134bc9e2dbcdd61b51fb951f94301ed54c..570ef3b0c09b7abe49da4787b668397d4413f0e8 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1017,7 +1017,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); + err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL); if (err < 0) return err; @@ -1342,7 +1342,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, if (!opt) goto failure; - err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); + err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL); if (err < 0) goto failure; @@ -1460,6 +1460,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, qdisc_class_hash_insert(&q->clhash, &cl->common); if (parent) parent->children++; + if (cl->un.leaf.q != &noop_qdisc) + qdisc_hash_add(cl->un.leaf.q, true); } else { if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, NULL, diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 20b7f1646f69270e08d8b7588759a0146f262e89..cadfdd4f1e521b3d68b8fa62d5797f3ff604651d 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -84,7 +84,7 @@ static void mq_attach(struct Qdisc *sch) qdisc_destroy(old); #ifdef CONFIG_NET_SCHED if (ntx < dev->real_num_tx_queues) - qdisc_hash_add(qdisc); + qdisc_hash_add(qdisc, false); #endif } diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 922683418e53853cb71747d8d30ab0e4a989254b..0a4cf27ea54bd78768d4fa084f7b082460f5f266 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -21,14 +21,13 @@ struct mqprio_sched { struct Qdisc **qdiscs; - int hw_owned; + int hw_offload; }; static void mqprio_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); - struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO}; unsigned int ntx; if (priv->qdiscs) { @@ -39,10 +38,15 @@ static void mqprio_destroy(struct Qdisc *sch) kfree(priv->qdiscs); } - if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) + if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { + struct tc_mqprio_qopt offload = { 0 }; + struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, + { .mqprio = &offload } }; + dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); - else + } else { netdev_set_num_tc(dev, 0); + } } static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) @@ -59,15 +63,20 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) return -EINVAL; } - /* net_device does not support requested operation */ - if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) - return -EINVAL; + /* Limit qopt->hw to maximum supported offload value. Drivers have + * the option of overriding this later if they don't support the a + * given offload type. + */ + if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) + qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; - /* if hw owned qcount and qoffset are taken from LLD so - * no reason to verify them here + /* If hardware offload is requested we will leave it to the device + * to either populate the queue counts itself or to validate the + * provided queue counts. If ndo_setup_tc is not present then + * hardware doesn't support offload and we should return an error. */ if (qopt->hw) - return 0; + return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL; for (i = 0; i < qopt->num_tc; i++) { unsigned int last = qopt->offset[i] + qopt->count[i]; @@ -139,13 +148,15 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) * supplied and verified mapping */ if (qopt->hw) { - struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO, - { .tc = qopt->num_tc }}; + struct tc_mqprio_qopt offload = *qopt; + struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, + { .mqprio = &offload } }; - priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); if (err) return err; + + priv->hw_offload = offload.hw; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) @@ -175,7 +186,7 @@ static void mqprio_attach(struct Qdisc *sch) if (old) qdisc_destroy(old); if (ntx < dev->real_num_tx_queues) - qdisc_hash_add(qdisc); + qdisc_hash_add(qdisc, false); } kfree(priv->qdiscs); priv->qdiscs = NULL; @@ -243,7 +254,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) opt.num_tc = netdev_get_num_tc(dev); memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); - opt.hw = priv->hw_owned; + opt.hw = priv->hw_offload; for (i = 0; i < netdev_get_num_tc(dev); i++) { opt.count[i] = dev->tc_to_txq[i].count; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index e7839a0d0eaa52572f675fdb1dfc590c2a70ac76..43a3a10b3c8118fc2e0deff98be2635d2ad81330 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -217,6 +217,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; + if (child != &noop_qdisc) + qdisc_hash_add(child, true); if (old != &noop_qdisc) { qdisc_tree_reduce_backlog(old, diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index c8bb62a1e7449344a0fd81241fe0102ea2f9c0f9..f0ce4780f395c50683fbbd070c8fa3cb7eede7ec 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -462,7 +462,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* If a delay is expected, orphan the skb. (orphaning usually takes * place at TX completion time, so _before_ the link transit delay) */ - if (q->latency || q->jitter) + if (q->latency || q->jitter || q->rate) skb_orphan_partial(skb); /* @@ -530,21 +530,31 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, now = psched_get_time(); if (q->rate) { - struct sk_buff *last; + struct netem_skb_cb *last = NULL; + + if (sch->q.tail) + last = netem_skb_cb(sch->q.tail); + if (q->t_root.rb_node) { + struct sk_buff *t_skb; + struct netem_skb_cb *t_last; + + t_skb = netem_rb_to_skb(rb_last(&q->t_root)); + t_last = netem_skb_cb(t_skb); + if (!last || + t_last->time_to_send > last->time_to_send) { + last = t_last; + } + } - if (sch->q.qlen) - last = sch->q.tail; - else - last = netem_rb_to_skb(rb_last(&q->t_root)); if (last) { /* * Last packet in queue is reference point (now), * calculate this time bonus and subtract * from delay. */ - delay -= netem_skb_cb(last)->time_to_send - now; + delay -= last->time_to_send - now; delay = max_t(psched_tdiff_t, 0, delay); - now = netem_skb_cb(last)->time_to_send; + now = last->time_to_send; } delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); @@ -833,7 +843,7 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), - nested_len, policy); + nested_len, policy, NULL); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 5c3a99d6aa82ae0dcfd71ebceaae8de5d943e352..6c2791d6102dae67fe006b17723d6f50edc07885 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -190,7 +190,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy); + err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy, NULL); if (err < 0) return err; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index d4d7db267b6edfa56582ca4a588590e0ded9fe66..92c2e6d448d7984af35d6beb2cb3aea717b76511 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -192,8 +192,11 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) qdisc_destroy(child); } - for (i = oldbands; i < q->bands; i++) + for (i = oldbands; i < q->bands; i++) { q->queues[i] = queues[i]; + if (q->queues[i] != &noop_qdisc) + qdisc_hash_add(q->queues[i], true); + } sch_tree_unlock(sch); return 0; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index f9e712ce2d15ce9280c31d2f75d62b84034ae51d..041eba3006ccdd84e1ea1419443f1d169550ceb5 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -418,7 +418,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return -EINVAL; } - err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy); + err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy, + NULL); if (err < 0) return err; @@ -494,6 +495,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, goto destroy_class; } + if (cl->qdisc != &noop_qdisc) + qdisc_hash_add(cl->qdisc, true); sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); sch_tree_unlock(sch); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 249b2a18acbd99288eb0a2579a0f29c2ab0b3ded..11292adce4121022a86aff0aed537be3d9319490 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -173,7 +173,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy); + err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL); if (err < 0) return err; @@ -191,6 +191,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) return PTR_ERR(child); } + if (child != &noop_qdisc) + qdisc_hash_add(child, true); sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index fe6963d2151956c508b510edec680b89201173ce..0f777273ba293dcf2e62ae484dd0033bce942a2b 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -495,7 +495,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt) int err; if (opt) { - err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy); + err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy, NULL); if (err < 0) return -EINVAL; @@ -513,6 +513,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt) if (IS_ERR(child)) return PTR_ERR(child); + if (child != &noop_qdisc) + qdisc_hash_add(child, true); sch_tree_lock(sch); qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 42e8c8615e6563a2deabbb3c3437e3985d01ae14..b00e02c139de8d7c0b66ec6ee0d8b6c677529609 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -714,9 +714,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) struct sfq_sched_data *q = qdisc_priv(sch); int i; - q->perturb_timer.function = sfq_perturbation; - q->perturb_timer.data = (unsigned long)sch; - init_timer_deferrable(&q->perturb_timer); + setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, + (unsigned long)sch); for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { q->dep[i].next = i + SFQ_MAX_FLOWS; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 303355c449ab336227d9b115496e0882f2f2a079..b2e4b6ad241a8e538c654ef4e8417ab756740a45 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -315,7 +315,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) s64 buffer, mtu; u64 rate64 = 0, prate64 = 0; - err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); + err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL); if (err < 0) return err; @@ -396,6 +396,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) q->qdisc->qstats.backlog); qdisc_destroy(q->qdisc); q->qdisc = child; + if (child != &noop_qdisc) + qdisc_hash_add(child, true); } q->limit = qopt->limit; if (tb[TCA_TBF_PBURST]) diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index e3621cb4827fadb5f5cb41ebe8455dfa3300a765..697721a7a3f1761373aa66b847bd744ea1b42d10 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -306,14 +306,24 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk) if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && time_after(jiffies, chunk->msg->expires_at)) { - if (chunk->sent_count) + struct sctp_stream_out *streamout = + &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream]; + + if (chunk->sent_count) { chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; - else + streamout->abandoned_sent[SCTP_PR_INDEX(TTL)]++; + } else { chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; + streamout->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; + } return 1; } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && chunk->sent_count > chunk->sinfo.sinfo_timetolive) { + struct sctp_stream_out *streamout = + &chunk->asoc->stream->out[chunk->sinfo.sinfo_stream]; + chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; + streamout->abandoned_sent[SCTP_PR_INDEX(RTX)]++; return 1; } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) && chunk->msg->expires_at && diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 8081476ed313cca8ad8f9876ec5ef5dc8fd68207..fe4c3d462f6ebc48d11d2a587f2adb7a39fde2e5 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -353,6 +353,8 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, struct sctp_chunk *chk, *temp; list_for_each_entry_safe(chk, temp, queue, transmitted_list) { + struct sctp_stream_out *streamout; + if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) continue; @@ -361,8 +363,10 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, sctp_insert_list(&asoc->outqueue.abandoned, &chk->transmitted_list); + streamout = &asoc->stream->out[chk->sinfo.sinfo_stream]; asoc->sent_cnt_removable--; asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; + streamout->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; if (!chk->tsn_gap_acked) { if (chk->transport) @@ -396,6 +400,12 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, q->out_qlen -= chk->skb->len; asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; + if (chk->sinfo.sinfo_stream < asoc->stream->outcnt) { + struct sctp_stream_out *streamout = + &asoc->stream->out[chk->sinfo.sinfo_stream]; + + streamout->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; + } msg_len -= SCTP_DATA_SNDSIZE(chk) + sizeof(struct sk_buff) + diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24c6ccce7539097728f3733ae5ecc037562cefcf..4f5e6cfc7f601b4de8db8802668d553f1af8491d 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3872,9 +3872,18 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net, else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST) reply = sctp_process_strreset_inreq( (struct sctp_association *)asoc, param, &ev); - /* More handles for other types will be added here, by now it - * just ignores other types. - */ + else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST) + reply = sctp_process_strreset_tsnreq( + (struct sctp_association *)asoc, param, &ev); + else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) + reply = sctp_process_strreset_addstrm_out( + (struct sctp_association *)asoc, param, &ev); + else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) + reply = sctp_process_strreset_addstrm_in( + (struct sctp_association *)asoc, param, &ev); + else if (param.p->type == SCTP_PARAM_RESET_RESPONSE) + reply = sctp_process_strreset_resp( + (struct sctp_association *)asoc, param, &ev); if (ev) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d9d4c92e06b312e6c300afd8f3d5db33161fd9f7..f16c8d97b7f313e9f671d8bb8620b0f9566e619f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3758,6 +3758,39 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk, return retval; } +static int sctp_setsockopt_reconfig_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (asoc) { + asoc->reconf_enable = !!params.assoc_value; + } else if (!params.assoc_id) { + struct sctp_sock *sp = sctp_sk(sk); + + sp->ep->reconf_enable = !!params.assoc_value; + } else { + goto out; + } + + retval = 0; + +out: + return retval; +} + static int sctp_setsockopt_enable_strreset(struct sock *sk, char __user *optval, unsigned int optlen) @@ -4038,6 +4071,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); break; + case SCTP_RECONFIG_SUPPORTED: + retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); + break; case SCTP_ENABLE_STREAM_RESET: retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); break; @@ -6540,6 +6576,102 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, return retval; } +static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_stream_out *streamout; + struct sctp_association *asoc; + struct sctp_prstatus params; + int retval = -EINVAL; + int policy; + + if (len < sizeof(params)) + goto out; + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) { + retval = -EFAULT; + goto out; + } + + policy = params.sprstat_policy; + if (policy & ~SCTP_PR_SCTP_MASK) + goto out; + + asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); + if (!asoc || params.sprstat_sid >= asoc->stream->outcnt) + goto out; + + streamout = &asoc->stream->out[params.sprstat_sid]; + if (policy == SCTP_PR_SCTP_NONE) { + params.sprstat_abandoned_unsent = 0; + params.sprstat_abandoned_sent = 0; + for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { + params.sprstat_abandoned_unsent += + streamout->abandoned_unsent[policy]; + params.sprstat_abandoned_sent += + streamout->abandoned_sent[policy]; + } + } else { + params.sprstat_abandoned_unsent = + streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)]; + params.sprstat_abandoned_sent = + streamout->abandoned_sent[__SCTP_PR_INDEX(policy)]; + } + + if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) { + retval = -EFAULT; + goto out; + } + + retval = 0; + +out: + return retval; +} + +static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (asoc) { + params.assoc_value = asoc->reconf_enable; + } else if (!params.assoc_id) { + struct sctp_sock *sp = sctp_sk(sk); + + params.assoc_value = sp->ep->reconf_enable; + } else { + retval = -EINVAL; + goto out; + } + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, char __user *optval, int __user *optlen) @@ -6748,6 +6880,14 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, optlen); break; + case SCTP_PR_STREAM_STATUS: + retval = sctp_getsockopt_pr_streamstatus(sk, len, optval, + optlen); + break; + case SCTP_RECONFIG_SUPPORTED: + retval = sctp_getsockopt_reconfig_supported(sk, len, optval, + optlen); + break; case SCTP_ENABLE_STREAM_RESET: retval = sctp_getsockopt_enable_strreset(sk, len, optval, optlen); @@ -7440,9 +7580,12 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, if (sk->sk_shutdown & RCV_SHUTDOWN) break; - if (sk_can_busy_loop(sk) && - sk_busy_loop(sk, noblock)) - continue; + if (sk_can_busy_loop(sk)) { + sk_busy_loop(sk, noblock); + + if (!skb_queue_empty(&sk->sk_receive_queue)) + continue; + } /* User doesn't want to wait. */ error = -EAGAIN; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index bbed997e1c5f01d4401adcd4664be4b6d16a93fa..dda53a2939866d5bb7e3233a1ba0a90160ecf488 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -294,18 +294,6 @@ int sctp_send_add_streams(struct sctp_association *asoc, stream->out = streamout; } - if (in) { - struct sctp_stream_in *streamin; - - streamin = krealloc(stream->in, incnt * sizeof(*streamin), - GFP_KERNEL); - if (!streamin) - goto out; - - memset(streamin + stream->incnt, 0, in * sizeof(*streamin)); - stream->in = streamin; - } - chunk = sctp_make_strreset_addstrm(asoc, out, in); if (!chunk) goto out; @@ -330,13 +318,14 @@ int sctp_send_add_streams(struct sctp_association *asoc, } static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param( - struct sctp_association *asoc, __u32 resp_seq) + struct sctp_association *asoc, __u32 resp_seq, + __be16 type) { struct sctp_chunk *chunk = asoc->strreset_chunk; struct sctp_reconf_chunk *hdr; union sctp_params param; - if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk) + if (!chunk) return NULL; hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; @@ -347,13 +336,21 @@ static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param( */ struct sctp_strreset_tsnreq *req = param.v; - if (req->request_seq == resp_seq) + if ((!resp_seq || req->request_seq == resp_seq) && + (!type || type == req->param_hdr.type)) return param.v; } return NULL; } +static void sctp_update_strreset_result(struct sctp_association *asoc, + __u32 result) +{ + asoc->strreset_result[1] = asoc->strreset_result[0]; + asoc->strreset_result[0] = result; +} + struct sctp_chunk *sctp_process_strreset_outreq( struct sctp_association *asoc, union sctp_params param, @@ -370,15 +367,19 @@ struct sctp_chunk *sctp_process_strreset_outreq( if (ntohl(outreq->send_reset_at_tsn) > sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) { result = SCTP_STRRESET_IN_PROGRESS; - goto out; + goto err; } - if (request_seq > asoc->strreset_inseq) { + if (TSN_lt(asoc->strreset_inseq, request_seq) || + TSN_lt(request_seq, asoc->strreset_inseq - 2)) { result = SCTP_STRRESET_ERR_BAD_SEQNO; - goto out; - } else if (request_seq == asoc->strreset_inseq) { - asoc->strreset_inseq++; + goto err; + } else if (TSN_lt(request_seq, asoc->strreset_inseq)) { + i = asoc->strreset_inseq - request_seq - 1; + result = asoc->strreset_result[i]; + goto err; } + asoc->strreset_inseq++; /* Check strreset_enable after inseq inc, as sender cannot tell * the peer doesn't enable strreset after receiving response with @@ -388,13 +389,9 @@ struct sctp_chunk *sctp_process_strreset_outreq( goto out; if (asoc->strreset_chunk) { - sctp_paramhdr_t *param_hdr; - struct sctp_transport *t; - - param_hdr = sctp_chunk_lookup_strreset_param( - asoc, outreq->response_seq); - if (!param_hdr || param_hdr->type != - SCTP_PARAM_RESET_IN_REQUEST) { + if (!sctp_chunk_lookup_strreset_param( + asoc, outreq->response_seq, + SCTP_PARAM_RESET_IN_REQUEST)) { /* same process with outstanding isn't 0 */ result = SCTP_STRRESET_ERR_IN_PROGRESS; goto out; @@ -404,6 +401,8 @@ struct sctp_chunk *sctp_process_strreset_outreq( asoc->strreset_outseq++; if (!asoc->strreset_outstanding) { + struct sctp_transport *t; + t = asoc->strreset_chunk->transport; if (del_timer(&t->reconf_timer)) sctp_transport_put(t); @@ -439,6 +438,8 @@ struct sctp_chunk *sctp_process_strreset_outreq( GFP_ATOMIC); out: + sctp_update_strreset_result(asoc, result); +err: return sctp_make_strreset_resp(asoc, result, request_seq); } @@ -455,12 +456,18 @@ struct sctp_chunk *sctp_process_strreset_inreq( __u32 request_seq; request_seq = ntohl(inreq->request_seq); - if (request_seq > asoc->strreset_inseq) { + if (TSN_lt(asoc->strreset_inseq, request_seq) || + TSN_lt(request_seq, asoc->strreset_inseq - 2)) { result = SCTP_STRRESET_ERR_BAD_SEQNO; - goto out; - } else if (request_seq == asoc->strreset_inseq) { - asoc->strreset_inseq++; + goto err; + } else if (TSN_lt(request_seq, asoc->strreset_inseq)) { + i = asoc->strreset_inseq - request_seq - 1; + result = asoc->strreset_result[i]; + if (result == SCTP_STRRESET_PERFORMED) + return NULL; + goto err; } + asoc->strreset_inseq++; if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) goto out; @@ -495,12 +502,407 @@ struct sctp_chunk *sctp_process_strreset_inreq( asoc->strreset_outstanding = 1; sctp_chunk_hold(asoc->strreset_chunk); + result = SCTP_STRRESET_PERFORMED; + *evp = sctp_ulpevent_make_stream_reset_event(asoc, SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); out: + sctp_update_strreset_result(asoc, result); +err: if (!chunk) chunk = sctp_make_strreset_resp(asoc, result, request_seq); return chunk; } + +struct sctp_chunk *sctp_process_strreset_tsnreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen; + struct sctp_strreset_tsnreq *tsnreq = param.v; + struct sctp_stream *stream = asoc->stream; + __u32 result = SCTP_STRRESET_DENIED; + __u32 request_seq; + __u16 i; + + request_seq = ntohl(tsnreq->request_seq); + if (TSN_lt(asoc->strreset_inseq, request_seq) || + TSN_lt(request_seq, asoc->strreset_inseq - 2)) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto err; + } else if (TSN_lt(request_seq, asoc->strreset_inseq)) { + i = asoc->strreset_inseq - request_seq - 1; + result = asoc->strreset_result[i]; + if (result == SCTP_STRRESET_PERFORMED) { + next_tsn = asoc->next_tsn; + init_tsn = + sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; + } + goto err; + } + asoc->strreset_inseq++; + + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) + goto out; + + if (asoc->strreset_outstanding) { + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + /* G3: The same processing as though a SACK chunk with no gap report + * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were + * received MUST be performed. + */ + max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); + sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); + + /* G1: Compute an appropriate value for the Receiver's Next TSN -- the + * TSN that the peer should use to send the next DATA chunk. The + * value SHOULD be the smallest TSN not acknowledged by the + * receiver of the request plus 2^31. + */ + init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31); + sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, + init_tsn, GFP_ATOMIC); + + /* G4: The same processing as though a FWD-TSN chunk (as defined in + * [RFC3758]) with all streams affected and a new cumulative TSN + * ACK of the Receiver's Next TSN minus 1 were received MUST be + * performed. + */ + sctp_outq_free(&asoc->outqueue); + + /* G2: Compute an appropriate value for the local endpoint's next TSN, + * i.e., the next TSN assigned by the receiver of the SSN/TSN reset + * chunk. The value SHOULD be the highest TSN sent by the receiver + * of the request plus 1. + */ + next_tsn = asoc->next_tsn; + asoc->ctsn_ack_point = next_tsn - 1; + asoc->adv_peer_ack_point = asoc->ctsn_ack_point; + + /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all + * incoming and outgoing streams. + */ + for (i = 0; i < stream->outcnt; i++) + stream->out[i].ssn = 0; + for (i = 0; i < stream->incnt; i++) + stream->in[i].ssn = 0; + + result = SCTP_STRRESET_PERFORMED; + + *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn, + next_tsn, GFP_ATOMIC); + +out: + sctp_update_strreset_result(asoc, result); +err: + return sctp_make_strreset_tsnresp(asoc, result, request_seq, + next_tsn, init_tsn); +} + +struct sctp_chunk *sctp_process_strreset_addstrm_out( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_addstrm *addstrm = param.v; + struct sctp_stream *stream = asoc->stream; + __u32 result = SCTP_STRRESET_DENIED; + struct sctp_stream_in *streamin; + __u32 request_seq, incnt; + __u16 in, i; + + request_seq = ntohl(addstrm->request_seq); + if (TSN_lt(asoc->strreset_inseq, request_seq) || + TSN_lt(request_seq, asoc->strreset_inseq - 2)) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto err; + } else if (TSN_lt(request_seq, asoc->strreset_inseq)) { + i = asoc->strreset_inseq - request_seq - 1; + result = asoc->strreset_result[i]; + goto err; + } + asoc->strreset_inseq++; + + if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) + goto out; + + if (asoc->strreset_chunk) { + if (!sctp_chunk_lookup_strreset_param( + asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { + /* same process with outstanding isn't 0 */ + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + asoc->strreset_outstanding--; + asoc->strreset_outseq++; + + if (!asoc->strreset_outstanding) { + struct sctp_transport *t; + + t = asoc->strreset_chunk->transport; + if (del_timer(&t->reconf_timer)) + sctp_transport_put(t); + + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + } + } + + in = ntohs(addstrm->number_of_streams); + incnt = stream->incnt + in; + if (!in || incnt > SCTP_MAX_STREAM) + goto out; + + streamin = krealloc(stream->in, incnt * sizeof(*streamin), + GFP_ATOMIC); + if (!streamin) + goto out; + + memset(streamin + stream->incnt, 0, in * sizeof(*streamin)); + stream->in = streamin; + stream->incnt = incnt; + + result = SCTP_STRRESET_PERFORMED; + + *evp = sctp_ulpevent_make_stream_change_event(asoc, + 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC); + +out: + sctp_update_strreset_result(asoc, result); +err: + return sctp_make_strreset_resp(asoc, result, request_seq); +} + +struct sctp_chunk *sctp_process_strreset_addstrm_in( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_addstrm *addstrm = param.v; + struct sctp_stream *stream = asoc->stream; + __u32 result = SCTP_STRRESET_DENIED; + struct sctp_stream_out *streamout; + struct sctp_chunk *chunk = NULL; + __u32 request_seq, outcnt; + __u16 out, i; + + request_seq = ntohl(addstrm->request_seq); + if (TSN_lt(asoc->strreset_inseq, request_seq) || + TSN_lt(request_seq, asoc->strreset_inseq - 2)) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto err; + } else if (TSN_lt(request_seq, asoc->strreset_inseq)) { + i = asoc->strreset_inseq - request_seq - 1; + result = asoc->strreset_result[i]; + if (result == SCTP_STRRESET_PERFORMED) + return NULL; + goto err; + } + asoc->strreset_inseq++; + + if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) + goto out; + + if (asoc->strreset_outstanding) { + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + out = ntohs(addstrm->number_of_streams); + outcnt = stream->outcnt + out; + if (!out || outcnt > SCTP_MAX_STREAM) + goto out; + + streamout = krealloc(stream->out, outcnt * sizeof(*streamout), + GFP_ATOMIC); + if (!streamout) + goto out; + + memset(streamout + stream->outcnt, 0, out * sizeof(*streamout)); + stream->out = streamout; + + chunk = sctp_make_strreset_addstrm(asoc, out, 0); + if (!chunk) + goto out; + + asoc->strreset_chunk = chunk; + asoc->strreset_outstanding = 1; + sctp_chunk_hold(asoc->strreset_chunk); + + stream->outcnt = outcnt; + + result = SCTP_STRRESET_PERFORMED; + + *evp = sctp_ulpevent_make_stream_change_event(asoc, + 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC); + +out: + sctp_update_strreset_result(asoc, result); +err: + if (!chunk) + chunk = sctp_make_strreset_resp(asoc, result, request_seq); + + return chunk; +} + +struct sctp_chunk *sctp_process_strreset_resp( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_resp *resp = param.v; + struct sctp_stream *stream = asoc->stream; + struct sctp_transport *t; + __u16 i, nums, flags = 0; + sctp_paramhdr_t *req; + __u32 result; + + req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0); + if (!req) + return NULL; + + result = ntohl(resp->result); + if (result != SCTP_STRRESET_PERFORMED) { + /* if in progress, do nothing but retransmit */ + if (result == SCTP_STRRESET_IN_PROGRESS) + return NULL; + else if (result == SCTP_STRRESET_DENIED) + flags = SCTP_STREAM_RESET_DENIED; + else + flags = SCTP_STREAM_RESET_FAILED; + } + + if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { + struct sctp_strreset_outreq *outreq; + __u16 *str_p; + + outreq = (struct sctp_strreset_outreq *)req; + str_p = outreq->list_of_streams; + nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2; + + if (result == SCTP_STRRESET_PERFORMED) { + if (nums) { + for (i = 0; i < nums; i++) + stream->out[ntohs(str_p[i])].ssn = 0; + } else { + for (i = 0; i < stream->outcnt; i++) + stream->out[i].ssn = 0; + } + + flags = SCTP_STREAM_RESET_OUTGOING_SSN; + } + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, + nums, str_p, GFP_ATOMIC); + } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { + struct sctp_strreset_inreq *inreq; + __u16 *str_p; + + /* if the result is performed, it's impossible for inreq */ + if (result == SCTP_STRRESET_PERFORMED) + return NULL; + + inreq = (struct sctp_strreset_inreq *)req; + str_p = inreq->list_of_streams; + nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2; + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, + nums, str_p, GFP_ATOMIC); + } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { + struct sctp_strreset_resptsn *resptsn; + __u32 stsn, rtsn; + + /* check for resptsn, as sctp_verify_reconf didn't do it*/ + if (ntohs(param.p->length) != sizeof(*resptsn)) + return NULL; + + resptsn = (struct sctp_strreset_resptsn *)resp; + stsn = ntohl(resptsn->senders_next_tsn); + rtsn = ntohl(resptsn->receivers_next_tsn); + + if (result == SCTP_STRRESET_PERFORMED) { + __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( + &asoc->peer.tsn_map); + + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); + sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); + + sctp_tsnmap_init(&asoc->peer.tsn_map, + SCTP_TSN_MAP_INITIAL, + stsn, GFP_ATOMIC); + + sctp_outq_free(&asoc->outqueue); + + asoc->next_tsn = rtsn; + asoc->ctsn_ack_point = asoc->next_tsn - 1; + asoc->adv_peer_ack_point = asoc->ctsn_ack_point; + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].ssn = 0; + for (i = 0; i < stream->incnt; i++) + stream->in[i].ssn = 0; + } + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags, + stsn, rtsn, GFP_ATOMIC); + } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) { + struct sctp_strreset_addstrm *addstrm; + __u16 number; + + addstrm = (struct sctp_strreset_addstrm *)req; + nums = ntohs(addstrm->number_of_streams); + number = stream->outcnt - nums; + + if (result == SCTP_STRRESET_PERFORMED) + for (i = number; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + else + stream->outcnt = number; + + *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, + 0, nums, GFP_ATOMIC); + } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) { + struct sctp_strreset_addstrm *addstrm; + + /* if the result is performed, it's impossible for addstrm in + * request. + */ + if (result == SCTP_STRRESET_PERFORMED) + return NULL; + + addstrm = (struct sctp_strreset_addstrm *)req; + nums = ntohs(addstrm->number_of_streams); + + *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, + nums, 0, GFP_ATOMIC); + } + + asoc->strreset_outstanding--; + asoc->strreset_outseq++; + + /* remove everything for this reconf request */ + if (!asoc->strreset_outstanding) { + t = asoc->strreset_chunk->transport; + if (del_timer(&t->reconf_timer)) + sctp_transport_put(t); + + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + } + + return NULL; +} diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index daf8554fd42a5e537bb58572823b2028f74be930..0e732f68c2bfc3b791dade5a85c36628904ee490 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -274,6 +274,13 @@ static struct ctl_table sctp_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "reconf_enable", + .data = &init_net.sctp.reconf_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "auth_enable", .data = &init_net.sctp.auth_enable, diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index c8881bc542a066e6f7f234beea3c7208394242c5..ec2b3e013c2f4ba48eabfc8d8ed20921cabc08a1 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -883,6 +883,62 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( return event; } +struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( + const struct sctp_association *asoc, __u16 flags, __u32 local_tsn, + __u32 remote_tsn, gfp_t gfp) +{ + struct sctp_assoc_reset_event *areset; + struct sctp_ulpevent *event; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event), + MSG_NOTIFICATION, gfp); + if (!event) + return NULL; + + skb = sctp_event2skb(event); + areset = (struct sctp_assoc_reset_event *) + skb_put(skb, sizeof(struct sctp_assoc_reset_event)); + + areset->assocreset_type = SCTP_ASSOC_RESET_EVENT; + areset->assocreset_flags = flags; + areset->assocreset_length = sizeof(struct sctp_assoc_reset_event); + sctp_ulpevent_set_owner(event, asoc); + areset->assocreset_assoc_id = sctp_assoc2id(asoc); + areset->assocreset_local_tsn = local_tsn; + areset->assocreset_remote_tsn = remote_tsn; + + return event; +} + +struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event( + const struct sctp_association *asoc, __u16 flags, + __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp) +{ + struct sctp_stream_change_event *schange; + struct sctp_ulpevent *event; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event), + MSG_NOTIFICATION, gfp); + if (!event) + return NULL; + + skb = sctp_event2skb(event); + schange = (struct sctp_stream_change_event *) + skb_put(skb, sizeof(struct sctp_stream_change_event)); + + schange->strchange_type = SCTP_STREAM_CHANGE_EVENT; + schange->strchange_flags = flags; + schange->strchange_length = sizeof(struct sctp_stream_change_event); + sctp_ulpevent_set_owner(event, asoc); + schange->strchange_assoc_id = sctp_assoc2id(asoc); + schange->strchange_instrms = strchange_instrms; + schange->strchange_outstrms = strchange_outstrms; + + return event; +} + /* Return the notification type, assuming this is a notification * event. */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 093803786eacf3388dc470a04c02293f60e4102e..5b6ee21368a68b4c02a5b8987a09a6d6ee613a0e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -147,7 +147,6 @@ static int smc_release(struct socket *sock) schedule_delayed_work(&smc->sock_put_work, SMC_CLOSE_SOCK_PUT_DELAY); } - sk->sk_prot->unhash(sk); release_sock(sk); sock_put(sk); @@ -451,6 +450,9 @@ static int smc_connect_rdma(struct smc_sock *smc) goto decline_rdma_unlock; } + smc_close_init(smc); + smc_rx_init(smc); + if (local_contact == SMC_FIRST_CONTACT) { rc = smc_ib_ready_link(link); if (rc) { @@ -477,7 +479,6 @@ static int smc_connect_rdma(struct smc_sock *smc) mutex_unlock(&smc_create_lgr_pending); smc_tx_init(smc); - smc_rx_init(smc); out_connected: smc_copy_sock_settings_to_clc(smc); @@ -637,7 +638,8 @@ struct sock *smc_accept_dequeue(struct sock *parent, smc_accept_unlink(new_sk); if (new_sk->sk_state == SMC_CLOSED) { - /* tbd in follow-on patch: close this sock */ + new_sk->sk_prot->unhash(new_sk); + sock_put(new_sk); continue; } if (new_sock) @@ -657,8 +659,13 @@ void smc_close_non_accepted(struct sock *sk) if (!sk->sk_lingertime) /* wait for peer closing */ sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT; - if (!smc->use_fallback) + if (smc->use_fallback) { + sk->sk_state = SMC_CLOSED; + } else { smc_close_active(smc); + sock_set_flag(sk, SOCK_DEAD); + sk->sk_shutdown |= SHUTDOWN_MASK; + } if (smc->clcsock) { struct socket *tcp; @@ -666,11 +673,9 @@ void smc_close_non_accepted(struct sock *sk) smc->clcsock = NULL; sock_release(tcp); } - sock_set_flag(sk, SOCK_DEAD); - sk->sk_shutdown |= SHUTDOWN_MASK; if (smc->use_fallback) { schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN); - } else { + } else if (sk->sk_state == SMC_CLOSED) { smc_conn_free(&smc->conn); schedule_delayed_work(&smc->sock_put_work, SMC_CLOSE_SOCK_PUT_DELAY); @@ -800,6 +805,9 @@ static void smc_listen_work(struct work_struct *work) goto decline_rdma; } + smc_close_init(new_smc); + smc_rx_init(new_smc); + rc = smc_clc_send_accept(new_smc, local_contact); if (rc) goto out_err; @@ -839,7 +847,6 @@ static void smc_listen_work(struct work_struct *work) } smc_tx_init(new_smc); - smc_rx_init(new_smc); out_connected: sk_refcnt_debug_inc(newsmcsk); diff --git a/net/smc/smc.h b/net/smc/smc.h index ee5fbea24549d2df9f55bb0ad177d548c637bb0f..6e44313e4467d01fbdc77f07ee7d14c50ab92ce2 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -164,6 +164,7 @@ struct smc_connection { #ifndef KERNEL_HAS_ATOMIC64 spinlock_t acurs_lock; /* protect cursors */ #endif + struct work_struct close_work; /* peer sent some closing */ }; struct smc_sock { /* smc sock container */ diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 5a339493872efd123826c31a2771485b3322f67a..a7294edbc22177d2c5b5a21deffaacea1e0855e2 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -217,8 +217,13 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, smc->sk.sk_err = ECONNRESET; conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; } - if (smc_cdc_rxed_any_close_or_senddone(conn)) - smc_close_passive_received(smc); + if (smc_cdc_rxed_any_close_or_senddone(conn)) { + smc->sk.sk_shutdown |= RCV_SHUTDOWN; + if (smc->clcsock && smc->clcsock->sk) + smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; + sock_set_flag(&smc->sk, SOCK_DONE); + schedule_work(&conn->close_work); + } /* piggy backed tx info */ /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ @@ -228,8 +233,6 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, smc_close_wake_tx_prepared(smc); } - /* subsequent patch: trigger socket release if connection closed */ - /* socket connected but not accepted */ if (!smc->sk.sk_socket) return; diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 67a71d170bedb4be2658cfa5f7a654098da4962f..3c2e166b5d222f4c932179ae442cbd88969efc92 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -117,7 +117,6 @@ void smc_close_active_abort(struct smc_sock *smc) struct smc_cdc_conn_state_flags *txflags = &smc->conn.local_tx_ctrl.conn_state_flags; - bh_lock_sock(&smc->sk); smc->sk.sk_err = ECONNABORTED; if (smc->clcsock && smc->clcsock->sk) { smc->clcsock->sk->sk_err = ECONNABORTED; @@ -125,6 +124,7 @@ void smc_close_active_abort(struct smc_sock *smc) } switch (smc->sk.sk_state) { case SMC_INIT: + case SMC_ACTIVE: smc->sk.sk_state = SMC_PEERABORTWAIT; break; case SMC_APPCLOSEWAIT1: @@ -161,10 +161,15 @@ void smc_close_active_abort(struct smc_sock *smc) } sock_set_flag(&smc->sk, SOCK_DEAD); - bh_unlock_sock(&smc->sk); smc->sk.sk_state_change(&smc->sk); } +static inline bool smc_close_sent_any_close(struct smc_connection *conn) +{ + return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort || + conn->local_tx_ctrl.conn_state_flags.peer_conn_closed; +} + int smc_close_active(struct smc_sock *smc) { struct smc_cdc_conn_state_flags *txflags = @@ -185,8 +190,7 @@ int smc_close_active(struct smc_sock *smc) case SMC_INIT: sk->sk_state = SMC_CLOSED; if (smc->smc_listen_work.func) - flush_work(&smc->smc_listen_work); - sock_put(sk); + cancel_work_sync(&smc->smc_listen_work); break; case SMC_LISTEN: sk->sk_state = SMC_CLOSED; @@ -198,7 +202,7 @@ int smc_close_active(struct smc_sock *smc) } release_sock(sk); smc_close_cleanup_listen(sk); - flush_work(&smc->tcp_listen_work); + cancel_work_sync(&smc->smc_listen_work); lock_sock(sk); break; case SMC_ACTIVE: @@ -218,7 +222,7 @@ int smc_close_active(struct smc_sock *smc) case SMC_APPFINCLOSEWAIT: /* socket already shutdown wr or both (active close) */ if (txflags->peer_done_writing && - !txflags->peer_conn_closed) { + !smc_close_sent_any_close(conn)) { /* just shutdown wr done, send close request */ rc = smc_close_final(conn); } @@ -248,6 +252,13 @@ int smc_close_active(struct smc_sock *smc) break; case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: + if (txflags->peer_done_writing && + !smc_close_sent_any_close(conn)) { + /* just shutdown wr done, send close request */ + rc = smc_close_final(conn); + } + /* peer sending PeerConnectionClosed will cause transition */ + break; case SMC_PEERFINCLOSEWAIT: /* peer sending PeerConnectionClosed will cause transition */ break; @@ -285,7 +296,7 @@ static void smc_close_passive_abort_received(struct smc_sock *smc) case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: if (txflags->peer_done_writing && - !txflags->peer_conn_closed) { + !smc_close_sent_any_close(&smc->conn)) { /* just shutdown, but not yet closed locally */ smc_close_abort(&smc->conn); sk->sk_state = SMC_PROCESSABORT; @@ -306,22 +317,27 @@ static void smc_close_passive_abort_received(struct smc_sock *smc) /* Some kind of closing has been received: peer_conn_closed, peer_conn_abort, * or peer_done_writing. - * Called under tasklet context. */ -void smc_close_passive_received(struct smc_sock *smc) +static void smc_close_passive_work(struct work_struct *work) { - struct smc_cdc_conn_state_flags *rxflags = - &smc->conn.local_rx_ctrl.conn_state_flags; + struct smc_connection *conn = container_of(work, + struct smc_connection, + close_work); + struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + struct smc_cdc_conn_state_flags *rxflags; struct sock *sk = &smc->sk; int old_state; - sk->sk_shutdown |= RCV_SHUTDOWN; - if (smc->clcsock && smc->clcsock->sk) - smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; - sock_set_flag(&smc->sk, SOCK_DONE); - + lock_sock(&smc->sk); old_state = sk->sk_state; + if (!conn->alert_token_local) { + /* abnormal termination */ + smc_close_active_abort(smc); + goto wakeup; + } + + rxflags = &smc->conn.local_rx_ctrl.conn_state_flags; if (rxflags->peer_conn_abort) { smc_close_passive_abort_received(smc); goto wakeup; @@ -331,7 +347,7 @@ void smc_close_passive_received(struct smc_sock *smc) case SMC_INIT: if (atomic_read(&smc->conn.bytes_to_rcv) || (rxflags->peer_done_writing && - !rxflags->peer_conn_closed)) + !smc_cdc_rxed_any_close(conn))) sk->sk_state = SMC_APPCLOSEWAIT1; else sk->sk_state = SMC_CLOSED; @@ -348,7 +364,7 @@ void smc_close_passive_received(struct smc_sock *smc) if (!smc_cdc_rxed_any_close(&smc->conn)) break; if (sock_flag(sk, SOCK_DEAD) && - (sk->sk_shutdown == SHUTDOWN_MASK)) { + smc_close_sent_any_close(conn)) { /* smc_release has already been called locally */ sk->sk_state = SMC_CLOSED; } else { @@ -367,17 +383,19 @@ void smc_close_passive_received(struct smc_sock *smc) } wakeup: - if (old_state != sk->sk_state) - sk->sk_state_change(sk); sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ - if ((sk->sk_state == SMC_CLOSED) && - (sock_flag(sk, SOCK_DEAD) || (old_state == SMC_INIT))) { - smc_conn_free(&smc->conn); - schedule_delayed_work(&smc->sock_put_work, - SMC_CLOSE_SOCK_PUT_DELAY); + if (old_state != sk->sk_state) { + sk->sk_state_change(sk); + if ((sk->sk_state == SMC_CLOSED) && + (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { + smc_conn_free(&smc->conn); + schedule_delayed_work(&smc->sock_put_work, + SMC_CLOSE_SOCK_PUT_DELAY); + } } + release_sock(&smc->sk); } void smc_close_sock_put_work(struct work_struct *work) @@ -442,3 +460,9 @@ int smc_close_shutdown_write(struct smc_sock *smc) sk->sk_state_change(&smc->sk); return rc; } + +/* Initialize close properties on connection establishment. */ +void smc_close_init(struct smc_sock *smc) +{ + INIT_WORK(&smc->conn.close_work, smc_close_passive_work); +} diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h index bc9a2df3633cee576b15c0a9d715d1bdf6753957..4a3d99a8d7cbf18598ef74dae61e2be2a0a17b48 100644 --- a/net/smc/smc_close.h +++ b/net/smc/smc_close.h @@ -21,8 +21,8 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc); void smc_close_active_abort(struct smc_sock *smc); int smc_close_active(struct smc_sock *smc); -void smc_close_passive_received(struct smc_sock *smc); void smc_close_sock_put_work(struct work_struct *work); int smc_close_shutdown_write(struct smc_sock *smc); +void smc_close_init(struct smc_sock *smc); #endif /* SMC_CLOSE_H */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 0eac633fb3549f6e715f25145b16e22b2bc49241..65020e93ff210bb7f5db079399984c55e54c80f1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -316,7 +316,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr) smc = container_of(conn, struct smc_sock, conn); sock_hold(&smc->sk); __smc_lgr_unregister_conn(conn); - smc_close_active_abort(smc); + schedule_work(&conn->close_work); sock_put(&smc->sk); node = rb_first(&lgr->conns_all); } diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index e6743c008ac548259447daa0e0c207aa7f249c63..16b7c801f8b662630f11e32f79c54f7e2774be11 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -179,8 +179,6 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler, u8 port_idx; smcibdev = container_of(handler, struct smc_ib_device, event_handler); - if (!smc_pnet_find_ib(smcibdev->ibdev->name)) - return; switch (ibevent->event) { case IB_EVENT_PORT_ERR: @@ -259,7 +257,6 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = 1, - .max_inline_data = SMC_WR_TX_SIZE, }, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index a95f74bb556915f92d0fa1bdbd2e34eb9814c3b1..7e1f0e24d17790f526aa50d07ff5e5d6596b6f3c 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -11,6 +11,7 @@ #ifndef _SMC_IB_H #define _SMC_IB_H +#include #include #include diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 9d3e7fb8348d3efae50515446591642815697b45..78f7af28ae4f25d71469d54d44f98ef4e2df94eb 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -219,7 +219,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) } /* Find an infiniband device by a given name. The device might not exist. */ -struct smc_ib_device *smc_pnet_find_ib(char *ib_name) +static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) { struct smc_ib_device *ibdev; @@ -523,8 +523,11 @@ void smc_pnet_find_roce_resource(struct sock *sk, read_lock(&smc_pnettable.lock); list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) { if (dst->dev == pnetelem->ndev) { - *smcibdev = pnetelem->smcibdev; - *ibport = pnetelem->ib_port; + if (smc_ib_port_active(pnetelem->smcibdev, + pnetelem->ib_port)) { + *smcibdev = pnetelem->smcibdev; + *ibport = pnetelem->ib_port; + } break; } } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 32ab3df928ca183643fab96ffd8c09667a54fbde..c4f1bccd43589c0d2d2591925dfa3053b7fa4aef 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -16,7 +16,6 @@ struct smc_ib_device; int smc_pnet_init(void) __init; void smc_pnet_exit(void); int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev); -struct smc_ib_device *smc_pnet_find_ib(char *ib_name); void smc_pnet_find_roce_resource(struct sock *sk, struct smc_ib_device **smcibdev, u8 *ibport); diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index c4ef9a4ec56971e685d419a4a89f51e8181709c1..f0c8b089f770a229b7c805fe5b2d59da40a5c1e3 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -36,11 +36,10 @@ static void smc_rx_data_ready(struct sock *sk) if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); if ((sk->sk_shutdown == SHUTDOWN_MASK) || (sk->sk_state == SMC_CLOSED)) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); - else - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 69a0013dd25cecbee0658168da577d15ed8913d8..21ec1832ab517d647ac8942ddbd5484492eb4c94 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -431,9 +431,13 @@ static void smc_tx_work(struct work_struct *work) struct smc_connection, tx_work); struct smc_sock *smc = container_of(conn, struct smc_sock, conn); + int rc; lock_sock(&smc->sk); - smc_tx_sndbuf_nonempty(conn); + rc = smc_tx_sndbuf_nonempty(conn); + if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && + !atomic_read(&conn->bytes_to_rcv)) + conn->local_rx_ctrl.prod_flags.write_blocked = 0; release_sock(&smc->sk); } diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index eadf157418dcd42c036685e5858bea6e5c559506..874ee9f9d79674f9576c28f9d1dd669e03e62422 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -447,7 +447,7 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_tx_ibs[i].num_sge = 1; lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; lnk->wr_tx_ibs[i].send_flags = - IB_SEND_SIGNALED | IB_SEND_SOLICITED | IB_SEND_INLINE; + IB_SEND_SIGNALED | IB_SEND_SOLICITED; } for (i = 0; i < lnk->wr_rx_cnt; i++) { lnk->wr_rx_sges[i].addr = diff --git a/net/socket.c b/net/socket.c index 985ef06792d6e54c69d296f3e15baf89be972f9c..c2564eb25c6b8faf5c99504ef1d3c90f9bb57c73 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3356,3 +3356,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown); + +/* This routine returns the IP overhead imposed by a socket i.e. + * the length of the underlying IP header, depending on whether + * this is an IPv4 or IPv6 socket and the length from IP options turned + * on at the socket. Assumes that the caller has a lock on the socket. + */ +u32 kernel_sock_ip_overhead(struct sock *sk) +{ + struct inet_sock *inet; + struct ip_options_rcu *opt; + u32 overhead = 0; + bool owned_by_user; +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6_pinfo *np; + struct ipv6_txoptions *optv6 = NULL; +#endif /* IS_ENABLED(CONFIG_IPV6) */ + + if (!sk) + return overhead; + + owned_by_user = sock_owned_by_user(sk); + switch (sk->sk_family) { + case AF_INET: + inet = inet_sk(sk); + overhead += sizeof(struct iphdr); + opt = rcu_dereference_protected(inet->inet_opt, + owned_by_user); + if (opt) + overhead += opt->opt.optlen; + return overhead; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + np = inet6_sk(sk); + overhead += sizeof(struct ipv6hdr); + if (np) + optv6 = rcu_dereference_protected(np->opt, + owned_by_user); + if (optv6) + overhead += (optv6->opt_flen + optv6->opt_nflen); + return overhead; +#endif /* IS_ENABLED(CONFIG_IPV6) */ + default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */ + return overhead; + } +} +EXPORT_SYMBOL(kernel_sock_ip_overhead); diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 017801f9dbaae28ff3bd411af3fce3a84da6c90d..8d40a7d31c9908c22b757fc0ab92bce436211c90 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -826,7 +826,7 @@ static int switchdev_port_br_setlink_protinfo(struct net_device *dev, int err; err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, - switchdev_port_bridge_policy); + switchdev_port_bridge_policy, NULL); if (err) return err; diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 33a5bdfbef76c00578921198bfbbe9c735f643d2..d174ee3254eecb523dbc86061d80a1e812dcc305 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -802,7 +802,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, info->attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, info->extack); if (err) return err; @@ -851,7 +851,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, info->attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, info->extack); if (err) return err; @@ -891,7 +891,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, info->attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, info->extack); if (err) return err; @@ -939,7 +939,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, info->attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, info->extack); if (err) return err; @@ -982,7 +982,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, info->attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, info->extack); if (err) return err; @@ -1104,7 +1104,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, info->attrs[TIPC_NLA_MEDIA], - tipc_nl_media_policy); + tipc_nl_media_policy, info->extack); if (err) return err; @@ -1152,7 +1152,7 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, info->attrs[TIPC_NLA_MEDIA], - tipc_nl_media_policy); + tipc_nl_media_policy, info->extack); if (!attrs[TIPC_NLA_MEDIA_NAME]) return -EINVAL; diff --git a/net/tipc/link.c b/net/tipc/link.c index ddd2dd6f77aae1a5cd77c6bd86fac293db66e145..60820dc35a08ade9805080de4c15ff9819e150f5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1827,7 +1827,7 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) int err; err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, - tipc_nl_prop_policy); + tipc_nl_prop_policy, NULL); if (err) return err; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 9be6592e4a6fa20c78995396ffa3dfcd1f19537a..bd0aac87b41ac627e5c897256a79150226926514 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -416,6 +416,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns); + tipc_subscrp_get(s); list_add(&s->nameseq_list, &nseq->subscriptions); if (!sseq) @@ -787,6 +788,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s) if (seq != NULL) { spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); + tipc_subscrp_put(s); if (!seq->first_free && list_empty(&seq->subscriptions)) { hlist_del_init_rcu(&seq->ns_list); kfree(seq->sseqs); diff --git a/net/tipc/net.c b/net/tipc/net.c index ab8a2d5d1e3245d31f97375e5a27deda9a15ad58..719c5924b6383e6bf548baf57fdb713283012d87 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -211,8 +211,8 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) return -EINVAL; err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, - info->attrs[TIPC_NLA_NET], - tipc_nl_net_policy); + info->attrs[TIPC_NLA_NET], tipc_nl_net_policy, + info->extack); if (err) return err; diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 26ca8dd64ded64407db8ef885f8dbb6edd30e4b8..b76f13f6fea10a53d00ed14a38cdf5cdf7afa44c 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -268,7 +268,8 @@ int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) if (!*attr) return -EOPNOTSUPP; - return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy); + return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy, + NULL); } int __init tipc_netlink_start(void) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index e1ae8a8a2b8eacf93224cc332fff4a537d8d1ab0..9bfe886ab33080f653ec1c39e0255b15709d0b76 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -296,7 +296,7 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, err = nla_parse(attrbuf, tipc_genl_family.maxattr, (const struct nlattr *)trans_buf->data, - trans_buf->len, NULL); + trans_buf->len, NULL, NULL); if (err) goto parse_out; @@ -352,7 +352,7 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, - attrs[TIPC_NLA_BEARER], NULL); + attrs[TIPC_NLA_BEARER], NULL, NULL); if (err) return err; @@ -472,7 +472,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], - NULL); + NULL, NULL); if (err) return err; @@ -480,7 +480,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX, - link[TIPC_NLA_LINK_PROP], NULL); + link[TIPC_NLA_LINK_PROP], NULL, NULL); if (err) return err; @@ -488,7 +488,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX, - link[TIPC_NLA_LINK_STATS], NULL); + link[TIPC_NLA_LINK_STATS], NULL, NULL); if (err) return err; @@ -598,7 +598,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], - NULL); + NULL, NULL); if (err) return err; @@ -795,7 +795,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, - attrs[TIPC_NLA_NAME_TABLE], NULL); + attrs[TIPC_NLA_NAME_TABLE], NULL, NULL); if (err) return err; @@ -803,7 +803,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, - nt[TIPC_NLA_NAME_TABLE_PUBL], NULL); + nt[TIPC_NLA_NAME_TABLE_PUBL], NULL, NULL); if (err) return err; @@ -863,7 +863,7 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], - NULL); + NULL, NULL); if (err) return err; @@ -929,7 +929,7 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], - NULL); + NULL, NULL); if (err) return err; @@ -940,8 +940,8 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, u32 node; struct nlattr *con[TIPC_NLA_CON_MAX + 1]; - nla_parse_nested(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON], - NULL); + nla_parse_nested(con, TIPC_NLA_CON_MAX, + sock[TIPC_NLA_SOCK_CON], NULL, NULL); node = nla_get_u32(con[TIPC_NLA_CON_NODE]); tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>", @@ -977,8 +977,8 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, if (!attrs[TIPC_NLA_MEDIA]) return -EINVAL; - err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], - NULL); + err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, + attrs[TIPC_NLA_MEDIA], NULL, NULL); if (err) return err; @@ -998,7 +998,7 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], - NULL); + NULL, NULL); if (err) return err; @@ -1045,7 +1045,7 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, return -EINVAL; err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], - NULL); + NULL, NULL); if (err) return err; diff --git a/net/tipc/node.c b/net/tipc/node.c index 4512e83652b16da259db9327fe0958c3642e70f1..aeef8011ac7d82d828289f4085efe3acaa8a3945 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1607,8 +1607,8 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) return -EINVAL; err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, - info->attrs[TIPC_NLA_NET], - tipc_nl_net_policy); + info->attrs[TIPC_NLA_NET], tipc_nl_net_policy, + info->extack); if (err) return err; @@ -1774,7 +1774,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, info->attrs[TIPC_NLA_LINK], - tipc_nl_link_policy); + tipc_nl_link_policy, info->extack); if (err) return err; @@ -1902,7 +1902,7 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, info->attrs[TIPC_NLA_LINK], - tipc_nl_link_policy); + tipc_nl_link_policy, info->extack); if (err) return err; @@ -2042,7 +2042,7 @@ int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX, info->attrs[TIPC_NLA_MON], - tipc_nl_monitor_policy); + tipc_nl_monitor_policy, info->extack); if (err) return err; @@ -2098,6 +2098,8 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) int err; msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg.skb) + return -ENOMEM; msg.portid = info->snd_portid; msg.seq = info->snd_seq; @@ -2163,7 +2165,7 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, err = nla_parse_nested(mon, TIPC_NLA_MON_MAX, attrs[TIPC_NLA_MON], - tipc_nl_monitor_policy); + tipc_nl_monitor_policy, NULL); if (err) return err; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index bdce99f9407affaae8ef524d3bd65bca5847d62b..0d4f2f455a7c91a1d7ea2b0fd9c8d121e6da98af 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -51,6 +51,7 @@ #define TIPC_FWD_MSG 1 #define TIPC_MAX_PORT 0xffffffff #define TIPC_MIN_PORT 1 +#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */ enum { TIPC_LISTEN = TCP_LISTEN, @@ -1305,7 +1306,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) /** * tipc_recvmsg - receive packet-oriented message * @m: descriptor for message info - * @buf_len: total size of user buffer area + * @buflen: length of user buffer area * @flags: receive flags * * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. @@ -1313,95 +1314,85 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) * * Returns size of returned message data, errno otherwise */ -static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, - int flags) +static int tipc_recvmsg(struct socket *sock, struct msghdr *m, + size_t buflen, int flags) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct sk_buff *buf; - struct tipc_msg *msg; - bool is_connectionless = tipc_sk_type_connectionless(sk); - long timeo; - unsigned int sz; - u32 err; - int res, hlen; + struct sk_buff *skb; + struct tipc_msg *hdr; + bool connected = !tipc_sk_type_connectionless(sk); + int rc, err, hlen, dlen, copy; + long timeout; /* Catch invalid receive requests */ - if (unlikely(!buf_len)) + if (unlikely(!buflen)) return -EINVAL; lock_sock(sk); - - if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) { - res = -ENOTCONN; + if (unlikely(connected && sk->sk_state == TIPC_OPEN)) { + rc = -ENOTCONN; goto exit; } + timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); -restart: - - /* Look for a message in receive queue; wait if necessary */ - res = tipc_wait_for_rcvmsg(sock, &timeo); - if (res) - goto exit; - - /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); - msg = buf_msg(buf); - sz = msg_data_sz(msg); - hlen = msg_hdr_sz(msg); - err = msg_errcode(msg); - - /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { + do { + /* Look at first msg in receive queue; wait if necessary */ + rc = tipc_wait_for_rcvmsg(sock, &timeout); + if (unlikely(rc)) + goto exit; + skb = skb_peek(&sk->sk_receive_queue); + hdr = buf_msg(skb); + dlen = msg_data_sz(hdr); + hlen = msg_hdr_sz(hdr); + err = msg_errcode(hdr); + if (likely(dlen || err)) + break; tsk_advance_rx_queue(sk); - goto restart; - } - - /* Capture sender's address (optional) */ - set_orig_addr(m, msg); + } while (1); - /* Capture ancillary data (optional) */ - res = tipc_sk_anc_data_recv(m, msg, tsk); - if (res) + /* Collect msg meta data, including error code and rejected data */ + set_orig_addr(m, hdr); + rc = tipc_sk_anc_data_recv(m, hdr, tsk); + if (unlikely(rc)) goto exit; - /* Capture message data (if valid) & compute return value (always) */ - if (!err) { - if (unlikely(buf_len < sz)) { - sz = buf_len; + /* Capture data if non-error msg, otherwise just set return value */ + if (likely(!err)) { + copy = min_t(int, dlen, buflen); + if (unlikely(copy != dlen)) m->msg_flags |= MSG_TRUNC; - } - res = skb_copy_datagram_msg(buf, hlen, m, sz); - if (res) - goto exit; - res = sz; + rc = skb_copy_datagram_msg(skb, hlen, m, copy); } else { - if (is_connectionless || err == TIPC_CONN_SHUTDOWN || - m->msg_control) - res = 0; - else - res = -ECONNRESET; + copy = 0; + rc = 0; + if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) + rc = -ECONNRESET; } + if (unlikely(rc)) + goto exit; + /* Caption of data or error code/rejected data was successful */ if (unlikely(flags & MSG_PEEK)) goto exit; - if (likely(!is_connectionless)) { - tsk->rcv_unacked += tsk_inc(tsk, hlen + sz); - if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) - tipc_sk_send_ack(tsk); - } tsk_advance_rx_queue(sk); + if (likely(!connected)) + goto exit; + + /* Send connection flow control ack when applicable */ + tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); + if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE) + tipc_sk_send_ack(tsk); exit: release_sock(sk); - return res; + return rc ? rc : copy; } /** - * tipc_recv_stream - receive stream-oriented data + * tipc_recvstream - receive stream-oriented data * @m: descriptor for message info - * @buf_len: total size of user buffer area + * @buflen: total size of user buffer area * @flags: receive flags * * Used for SOCK_STREAM messages only. If not enough data is available @@ -1409,111 +1400,98 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len, * * Returns size of returned message data, errno otherwise */ -static int tipc_recv_stream(struct socket *sock, struct msghdr *m, - size_t buf_len, int flags) +static int tipc_recvstream(struct socket *sock, struct msghdr *m, + size_t buflen, int flags) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct sk_buff *buf; - struct tipc_msg *msg; - long timeo; - unsigned int sz; - int target; - int sz_copied = 0; - u32 err; - int res = 0, hlen; + struct sk_buff *skb; + struct tipc_msg *hdr; + struct tipc_skb_cb *skb_cb; + bool peek = flags & MSG_PEEK; + int offset, required, copy, copied = 0; + int hlen, dlen, err, rc; + long timeout; /* Catch invalid receive attempts */ - if (unlikely(!buf_len)) + if (unlikely(!buflen)) return -EINVAL; lock_sock(sk); if (unlikely(sk->sk_state == TIPC_OPEN)) { - res = -ENOTCONN; - goto exit; - } - - target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - -restart: - /* Look for a message in receive queue; wait if necessary */ - res = tipc_wait_for_rcvmsg(sock, &timeo); - if (res) + rc = -ENOTCONN; goto exit; - - /* Look at first message in receive queue */ - buf = skb_peek(&sk->sk_receive_queue); - msg = buf_msg(buf); - sz = msg_data_sz(msg); - hlen = msg_hdr_sz(msg); - err = msg_errcode(msg); - - /* Discard an empty non-errored message & try again */ - if ((!sz) && (!err)) { - tsk_advance_rx_queue(sk); - goto restart; } + required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen); + timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - /* Optionally capture sender's address & ancillary data of first msg */ - if (sz_copied == 0) { - set_orig_addr(m, msg); - res = tipc_sk_anc_data_recv(m, msg, tsk); - if (res) - goto exit; - } - - /* Capture message data (if valid) & compute return value (always) */ - if (!err) { - u32 offset = TIPC_SKB_CB(buf)->bytes_read; - u32 needed; - int sz_to_copy; - - sz -= offset; - needed = (buf_len - sz_copied); - sz_to_copy = min(sz, needed); - - res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy); - if (res) - goto exit; + do { + /* Look at first msg in receive queue; wait if necessary */ + rc = tipc_wait_for_rcvmsg(sock, &timeout); + if (unlikely(rc)) + break; + skb = skb_peek(&sk->sk_receive_queue); + skb_cb = TIPC_SKB_CB(skb); + hdr = buf_msg(skb); + dlen = msg_data_sz(hdr); + hlen = msg_hdr_sz(hdr); + err = msg_errcode(hdr); + + /* Discard any empty non-errored (SYN-) message */ + if (unlikely(!dlen && !err)) { + tsk_advance_rx_queue(sk); + continue; + } - sz_copied += sz_to_copy; + /* Collect msg meta data, incl. error code and rejected data */ + if (!copied) { + set_orig_addr(m, hdr); + rc = tipc_sk_anc_data_recv(m, hdr, tsk); + if (rc) + break; + } - if (sz_to_copy < sz) { - if (!(flags & MSG_PEEK)) - TIPC_SKB_CB(buf)->bytes_read = - offset + sz_to_copy; - goto exit; + /* Copy data if msg ok, otherwise return error/partial data */ + if (likely(!err)) { + offset = skb_cb->bytes_read; + copy = min_t(int, dlen - offset, buflen - copied); + rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy); + if (unlikely(rc)) + break; + copied += copy; + offset += copy; + if (unlikely(offset < dlen)) { + if (!peek) + skb_cb->bytes_read = offset; + break; + } + } else { + rc = 0; + if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control) + rc = -ECONNRESET; + if (copied || rc) + break; } - } else { - if (sz_copied != 0) - goto exit; /* can't add error msg to valid data */ - if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control) - res = 0; - else - res = -ECONNRESET; - } + if (unlikely(peek)) + break; - if (unlikely(flags & MSG_PEEK)) - goto exit; + tsk_advance_rx_queue(sk); - tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg)); - if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4))) - tipc_sk_send_ack(tsk); - tsk_advance_rx_queue(sk); + /* Send connection flow control advertisement when applicable */ + tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen); + if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)) + tipc_sk_send_ack(tsk); - /* Loop around if more data is required */ - if ((sz_copied < buf_len) && /* didn't get all requested data */ - (!skb_queue_empty(&sk->sk_receive_queue) || - (sz_copied < target)) && /* and more is ready or required */ - (!err)) /* and haven't reached a FIN */ - goto restart; + /* Exit if all requested data or FIN/error received */ + if (copied == buflen || err) + break; + } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); exit: release_sock(sk); - return sz_copied ? sz_copied : res; + return copied ? copied : rc; } /** @@ -2537,6 +2515,28 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } } +static int tipc_socketpair(struct socket *sock1, struct socket *sock2) +{ + struct tipc_sock *tsk2 = tipc_sk(sock2->sk); + struct tipc_sock *tsk1 = tipc_sk(sock1->sk); + u32 onode = tipc_own_addr(sock_net(sock1->sk)); + + tsk1->peer.family = AF_TIPC; + tsk1->peer.addrtype = TIPC_ADDR_ID; + tsk1->peer.scope = TIPC_NODE_SCOPE; + tsk1->peer.addr.id.ref = tsk2->portid; + tsk1->peer.addr.id.node = onode; + tsk2->peer.family = AF_TIPC; + tsk2->peer.addrtype = TIPC_ADDR_ID; + tsk2->peer.scope = TIPC_NODE_SCOPE; + tsk2->peer.addr.id.ref = tsk1->portid; + tsk2->peer.addr.id.node = onode; + + tipc_sk_finish_conn(tsk1, tsk2->portid, onode); + tipc_sk_finish_conn(tsk2, tsk1->portid, onode); + return 0; +} + /* Protocol switches for the various types of TIPC sockets */ static const struct proto_ops msg_ops = { @@ -2545,7 +2545,7 @@ static const struct proto_ops msg_ops = { .release = tipc_release, .bind = tipc_bind, .connect = tipc_connect, - .socketpair = sock_no_socketpair, + .socketpair = tipc_socketpair, .accept = sock_no_accept, .getname = tipc_getname, .poll = tipc_poll, @@ -2566,7 +2566,7 @@ static const struct proto_ops packet_ops = { .release = tipc_release, .bind = tipc_bind, .connect = tipc_connect, - .socketpair = sock_no_socketpair, + .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, .poll = tipc_poll, @@ -2587,7 +2587,7 @@ static const struct proto_ops stream_ops = { .release = tipc_release, .bind = tipc_bind, .connect = tipc_connect, - .socketpair = sock_no_socketpair, + .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, .poll = tipc_poll, @@ -2597,7 +2597,7 @@ static const struct proto_ops stream_ops = { .setsockopt = tipc_setsockopt, .getsockopt = tipc_getsockopt, .sendmsg = tipc_sendstream, - .recvmsg = tipc_recv_stream, + .recvmsg = tipc_recvstream, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage }; @@ -2870,7 +2870,7 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], - tipc_nl_sock_policy); + tipc_nl_sock_policy, NULL); if (err) return err; diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 271cd66e4b3b66534d8686bec94132bc9737314e..0bf91cd3733cb37ecc8ba4ccf7ae5a26cb6e966d 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -54,8 +54,6 @@ struct tipc_subscriber { static void tipc_subscrp_delete(struct tipc_subscription *sub); static void tipc_subscrb_put(struct tipc_subscriber *subscriber); -static void tipc_subscrp_put(struct tipc_subscription *subscription); -static void tipc_subscrp_get(struct tipc_subscription *subscription); /** * htohl - convert value to endianness used by destination @@ -125,7 +123,6 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, { struct tipc_name_seq seq; - tipc_subscrp_get(sub); tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) return; @@ -135,7 +132,6 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, node); - tipc_subscrp_put(sub); } static void tipc_subscrp_timeout(unsigned long data) @@ -145,6 +141,7 @@ static void tipc_subscrp_timeout(unsigned long data) spin_lock_bh(&subscriber->lock); tipc_nametbl_unsubscribe(sub); + list_del(&sub->subscrp_list); spin_unlock_bh(&subscriber->lock); /* Notify subscriber of timeout */ @@ -177,20 +174,17 @@ static void tipc_subscrp_kref_release(struct kref *kref) struct tipc_net *tn = net_generic(sub->net, tipc_net_id); struct tipc_subscriber *subscriber = sub->subscriber; - spin_lock_bh(&subscriber->lock); - list_del(&sub->subscrp_list); atomic_dec(&tn->subscription_count); - spin_unlock_bh(&subscriber->lock); kfree(sub); tipc_subscrb_put(subscriber); } -static void tipc_subscrp_put(struct tipc_subscription *subscription) +void tipc_subscrp_put(struct tipc_subscription *subscription) { kref_put(&subscription->kref, tipc_subscrp_kref_release); } -static void tipc_subscrp_get(struct tipc_subscription *subscription) +void tipc_subscrp_get(struct tipc_subscription *subscription) { kref_get(&subscription->kref); } @@ -210,11 +204,8 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, continue; tipc_nametbl_unsubscribe(sub); - tipc_subscrp_get(sub); - spin_unlock_bh(&subscriber->lock); + list_del(&sub->subscrp_list); tipc_subscrp_delete(sub); - tipc_subscrp_put(sub); - spin_lock_bh(&subscriber->lock); if (s) break; diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index ffdc214c117a924f34b416fde415fcd18201ebc0..ee52957dc9524a76ac371aa19d950dc90bfe4035 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -78,4 +78,7 @@ u32 tipc_subscrp_convert_seq_type(u32 type, int swap); int tipc_topsrv_start(struct net *net); void tipc_topsrv_stop(struct net *net); +void tipc_subscrp_put(struct tipc_subscription *subscription); +void tipc_subscrp_get(struct tipc_subscription *subscription); + #endif diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 46061cf48cd13506a12cb9b8fb53f64d9a56aa06..ecca64fc6a6f223bf8c0e09e3a299fe4fd62509d 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -457,7 +457,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) err = nla_parse_nested(battrs, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], - tipc_nl_bearer_policy); + tipc_nl_bearer_policy, NULL); if (err) return err; @@ -609,7 +609,8 @@ int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr) struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; struct udp_media_addr *dst; - if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy)) + if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attr, + tipc_nl_udp_policy, NULL)) return -EINVAL; if (!opts[TIPC_NLA_UDP_REMOTE]) @@ -662,7 +663,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attrs[TIPC_NLA_BEARER_UDP_OPTS], - tipc_nl_udp_policy)) + tipc_nl_udp_policy, NULL)) goto err; if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 928691c434087e8ff72b84fc6515539115c0509b..6a7fe7660551f45c065a7f472b805c0b8073f6bb 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -996,7 +996,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) unsigned int hash; struct unix_address *addr; struct hlist_head *list; - struct path path = { NULL, NULL }; + struct path path = { }; err = -EINVAL; if (sunaddr->sun_family != AF_UNIX) diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile index bc27c70e0e59f76a487af92455ac6542a035f762..09fc2eb29dc88898f631bfdf3e747bf16fbe4462 100644 --- a/net/vmw_vsock/Makefile +++ b/net/vmw_vsock/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o obj-$(CONFIG_VIRTIO_VSOCKETS) += vmw_vsock_virtio_transport.o obj-$(CONFIG_VIRTIO_VSOCKETS_COMMON) += vmw_vsock_virtio_transport_common.o -vsock-y += af_vsock.o vsock_addr.o +vsock-y += af_vsock.o af_vsock_tap.o vsock_addr.o vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \ vmci_transport_notify_qstate.o diff --git a/net/vmw_vsock/af_vsock_tap.c b/net/vmw_vsock/af_vsock_tap.c new file mode 100644 index 0000000000000000000000000000000000000000..98f09b5393668e1e8ac6bfd5b03e4cf5ffd903b7 --- /dev/null +++ b/net/vmw_vsock/af_vsock_tap.c @@ -0,0 +1,114 @@ +/* + * Tap functions for AF_VSOCK sockets. + * + * Code based on net/netlink/af_netlink.c tap functions. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +static DEFINE_SPINLOCK(vsock_tap_lock); +static struct list_head vsock_tap_all __read_mostly = + LIST_HEAD_INIT(vsock_tap_all); + +int vsock_add_tap(struct vsock_tap *vt) +{ + if (unlikely(vt->dev->type != ARPHRD_VSOCKMON)) + return -EINVAL; + + __module_get(vt->module); + + spin_lock(&vsock_tap_lock); + list_add_rcu(&vt->list, &vsock_tap_all); + spin_unlock(&vsock_tap_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vsock_add_tap); + +int vsock_remove_tap(struct vsock_tap *vt) +{ + struct vsock_tap *tmp; + bool found = false; + + spin_lock(&vsock_tap_lock); + + list_for_each_entry(tmp, &vsock_tap_all, list) { + if (vt == tmp) { + list_del_rcu(&vt->list); + found = true; + goto out; + } + } + + pr_warn("vsock_remove_tap: %p not found\n", vt); +out: + spin_unlock(&vsock_tap_lock); + + synchronize_net(); + + if (found) + module_put(vt->module); + + return found ? 0 : -ENODEV; +} +EXPORT_SYMBOL_GPL(vsock_remove_tap); + +static int __vsock_deliver_tap_skb(struct sk_buff *skb, + struct net_device *dev) +{ + int ret = 0; + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (nskb) { + dev_hold(dev); + + nskb->dev = dev; + ret = dev_queue_xmit(nskb); + if (unlikely(ret > 0)) + ret = net_xmit_errno(ret); + + dev_put(dev); + } + + return ret; +} + +static void __vsock_deliver_tap(struct sk_buff *skb) +{ + int ret; + struct vsock_tap *tmp; + + list_for_each_entry_rcu(tmp, &vsock_tap_all, list) { + ret = __vsock_deliver_tap_skb(skb, tmp->dev); + if (unlikely(ret)) + break; + } +} + +void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque) +{ + struct sk_buff *skb; + + rcu_read_lock(); + + if (likely(list_empty(&vsock_tap_all))) + goto out; + + skb = build_skb(opaque); + if (skb) { + __vsock_deliver_tap(skb); + consume_skb(skb); + } + +out: + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(vsock_deliver_tap); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 68675a151f22b8b63c02b25a67b833d9a6046d84..9dffe0282ad4f08efe8df7ab48695183b5117739 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -144,6 +144,8 @@ virtio_transport_send_pkt_work(struct work_struct *work) list_del_init(&pkt->list); spin_unlock_bh(&vsock->send_pkt_list_lock); + virtio_transport_deliver_tap_pkt(pkt); + reply = pkt->reply; sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); @@ -370,6 +372,7 @@ static void virtio_transport_rx_work(struct work_struct *work) } pkt->len = len - sizeof(pkt->hdr); + virtio_transport_deliver_tap_pkt(pkt); virtio_transport_recv_pkt(pkt); } } while (!virtqueue_enable_cb(vq)); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index af087b44ceea2311e53060e2442b4af2024bb037..18e24793659f928221297208ff4c82c8e385d14b 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -85,6 +86,69 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, return NULL; } +/* Packet capture */ +static struct sk_buff *virtio_transport_build_skb(void *opaque) +{ + struct virtio_vsock_pkt *pkt = opaque; + unsigned char *t_hdr, *payload; + struct af_vsockmon_hdr *hdr; + struct sk_buff *skb; + + skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len, + GFP_ATOMIC); + if (!skb) + return NULL; + + hdr = (struct af_vsockmon_hdr *)skb_put(skb, sizeof(*hdr)); + + /* pkt->hdr is little-endian so no need to byteswap here */ + hdr->src_cid = pkt->hdr.src_cid; + hdr->src_port = pkt->hdr.src_port; + hdr->dst_cid = pkt->hdr.dst_cid; + hdr->dst_port = pkt->hdr.dst_port; + + hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); + hdr->len = cpu_to_le16(sizeof(pkt->hdr)); + memset(hdr->reserved, 0, sizeof(hdr->reserved)); + + switch (le16_to_cpu(pkt->hdr.op)) { + case VIRTIO_VSOCK_OP_REQUEST: + case VIRTIO_VSOCK_OP_RESPONSE: + hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); + break; + case VIRTIO_VSOCK_OP_RST: + case VIRTIO_VSOCK_OP_SHUTDOWN: + hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); + break; + case VIRTIO_VSOCK_OP_RW: + hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); + break; + case VIRTIO_VSOCK_OP_CREDIT_UPDATE: + case VIRTIO_VSOCK_OP_CREDIT_REQUEST: + hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); + break; + default: + hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); + break; + } + + t_hdr = skb_put(skb, sizeof(pkt->hdr)); + memcpy(t_hdr, &pkt->hdr, sizeof(pkt->hdr)); + + if (pkt->len) { + payload = skb_put(skb, pkt->len); + memcpy(payload, pkt->buf, pkt->len); + } + + return skb; +} + +void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) +{ + vsock_deliver_tap(virtio_transport_build_skb, pkt); +} +EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); + static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct virtio_vsock_pkt_info *info) { diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 4be4fbbc0b5035662b1cd756bd4e99dd3351309e..10ae7823a19def7bde20d669e3913a40178e7da2 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -96,31 +96,23 @@ static int PROTOCOL_OVERRIDE = -1; static s32 vmci_transport_error_to_vsock_error(s32 vmci_error) { - int err; - switch (vmci_error) { case VMCI_ERROR_NO_MEM: - err = ENOMEM; - break; + return -ENOMEM; case VMCI_ERROR_DUPLICATE_ENTRY: case VMCI_ERROR_ALREADY_EXISTS: - err = EADDRINUSE; - break; + return -EADDRINUSE; case VMCI_ERROR_NO_ACCESS: - err = EPERM; - break; + return -EPERM; case VMCI_ERROR_NO_RESOURCES: - err = ENOBUFS; - break; + return -ENOBUFS; case VMCI_ERROR_INVALID_RESOURCE: - err = EHOSTUNREACH; - break; + return -EHOSTUNREACH; case VMCI_ERROR_INVALID_ARGS: default: - err = EINVAL; + break; } - - return err > 0 ? -err : err; + return -EINVAL; } static u32 vmci_transport_peer_rid(u32 peer_cid) diff --git a/net/wireless/ap.c b/net/wireless/ap.c index bdad1f951561b3d8b7c75d8992e1c3c1a623f146..25666d3009be8be07410f7b0b53c81f62566111a 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c @@ -32,6 +32,11 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, rdev_set_qos_map(rdev, dev, NULL); if (notify) nl80211_send_ap_stopped(wdev); + + /* Should we apply the grace period during beaconing interface + * shutdown also? + */ + cfg80211_sched_dfs_chan_update(rdev); } return err; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 5497d022fadabf17512c0f4c0b77ce50cce345dc..b8aa5a7d5c77ae8453062de794fee7329e429f75 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -456,6 +456,123 @@ bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, return (r1 + r2 > 0); } +/* + * Checks if center frequency of chan falls with in the bandwidth + * range of chandef. + */ +bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef, + struct ieee80211_channel *chan) +{ + int width; + u32 cf_offset, freq; + + if (chandef->chan->center_freq == chan->center_freq) + return true; + + width = cfg80211_chandef_get_width(chandef); + if (width <= 20) + return false; + + cf_offset = width / 2 - 10; + + for (freq = chandef->center_freq1 - width / 2 + 10; + freq <= chandef->center_freq1 + width / 2 - 10; freq += 20) { + if (chan->center_freq == freq) + return true; + } + + if (!chandef->center_freq2) + return false; + + for (freq = chandef->center_freq2 - width / 2 + 10; + freq <= chandef->center_freq2 + width / 2 - 10; freq += 20) { + if (chan->center_freq == freq) + return true; + } + + return false; +} + +bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev) +{ + bool active = false; + + ASSERT_WDEV_LOCK(wdev); + + if (!wdev->chandef.chan) + return false; + + switch (wdev->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + active = wdev->beacon_interval != 0; + break; + case NL80211_IFTYPE_ADHOC: + active = wdev->ssid_len != 0; + break; + case NL80211_IFTYPE_MESH_POINT: + active = wdev->mesh_id_len != 0; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_OCB: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_P2P_DEVICE: + /* Can NAN type be considered as beaconing interface? */ + case NL80211_IFTYPE_NAN: + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NUM_NL80211_IFTYPES: + WARN_ON(1); + } + + return active; +} + +static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan) +{ + struct wireless_dev *wdev; + + list_for_each_entry(wdev, &wiphy->wdev_list, list) { + wdev_lock(wdev); + if (!cfg80211_beaconing_iface_active(wdev)) { + wdev_unlock(wdev); + continue; + } + + if (cfg80211_is_sub_chan(&wdev->chandef, chan)) { + wdev_unlock(wdev); + return true; + } + wdev_unlock(wdev); + } + + return false; +} + +bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan) +{ + struct cfg80211_registered_device *rdev; + + ASSERT_RTNL(); + + if (!(chan->flags & IEEE80211_CHAN_RADAR)) + return false; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) + continue; + + if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan)) + return true; + } + + return false; +} static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy, u32 center_freq, diff --git a/net/wireless/core.c b/net/wireless/core.c index e55e05bc48053f5542696a4c2d53170dfd02577c..83ea164f16b3e018546260d8e809ae6b244d85e3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -305,30 +305,14 @@ static void cfg80211_event_work(struct work_struct *work) void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) { - struct cfg80211_iface_destroy *item; + struct wireless_dev *wdev, *tmp; ASSERT_RTNL(); - spin_lock_irq(&rdev->destroy_list_lock); - while ((item = list_first_entry_or_null(&rdev->destroy_list, - struct cfg80211_iface_destroy, - list))) { - struct wireless_dev *wdev, *tmp; - u32 nlportid = item->nlportid; - - list_del(&item->list); - kfree(item); - spin_unlock_irq(&rdev->destroy_list_lock); - - list_for_each_entry_safe(wdev, tmp, - &rdev->wiphy.wdev_list, list) { - if (nlportid == wdev->owner_nlportid) - rdev_del_virtual_intf(rdev, wdev); - } - - spin_lock_irq(&rdev->destroy_list_lock); + list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) { + if (wdev->nl_owner_dead) + rdev_del_virtual_intf(rdev, wdev); } - spin_unlock_irq(&rdev->destroy_list_lock); } static void cfg80211_destroy_iface_wk(struct work_struct *work) @@ -346,13 +330,47 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work) static void cfg80211_sched_scan_stop_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; + struct cfg80211_sched_scan_request *req, *tmp; rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); rtnl_lock(); + list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { + if (req->nl_owner_dead) + cfg80211_stop_sched_scan_req(rdev, req, false); + } + rtnl_unlock(); +} - __cfg80211_stop_sched_scan(rdev, false); +static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, + propagate_radar_detect_wk); + + rtnl_lock(); + + regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->radar_chandef, + NL80211_DFS_UNAVAILABLE, + NL80211_RADAR_DETECTED); + + rtnl_unlock(); +} + +static void cfg80211_propagate_cac_done_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, + propagate_cac_done_wk); + + rtnl_lock(); + + regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->cac_done_chandef, + NL80211_DFS_AVAILABLE, + NL80211_RADAR_CAC_FINISHED); rtnl_unlock(); } @@ -436,8 +454,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); + INIT_LIST_HEAD(&rdev->sched_scan_req_list); INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); - INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); INIT_LIST_HEAD(&rdev->mlme_unreg); spin_lock_init(&rdev->mlme_unreg_lock); INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk); @@ -452,10 +470,12 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, rdev->wiphy.dev.platform_data = rdev; device_enable_async_suspend(&rdev->wiphy.dev); - INIT_LIST_HEAD(&rdev->destroy_list); - spin_lock_init(&rdev->destroy_list_lock); INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk); INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); + INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk); + INIT_WORK(&rdev->propagate_radar_detect_wk, + cfg80211_propagate_radar_detect_wk); + INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -915,6 +935,8 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->destroy_work); flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->mlme_unreg_wk); + flush_work(&rdev->propagate_radar_detect_wk); + flush_work(&rdev->propagate_cac_done_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) @@ -954,6 +976,12 @@ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); +void cfg80211_cqm_config_free(struct wireless_dev *wdev) +{ + kfree(wdev->cqm_config); + wdev->cqm_config = NULL; +} + void cfg80211_unregister_wdev(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); @@ -980,6 +1008,8 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) WARN_ON_ONCE(1); break; } + + cfg80211_cqm_config_free(wdev); } EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -1001,7 +1031,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; - struct cfg80211_sched_scan_request *sched_scan_req; + struct cfg80211_sched_scan_request *pos, *tmp; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); @@ -1012,9 +1042,11 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - if (sched_scan_req && dev == sched_scan_req->dev) - __cfg80211_stop_sched_scan(rdev, false); + list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, + list) { + if (dev == pos->dev) + cfg80211_stop_sched_scan_req(rdev, pos, false); + } #ifdef CONFIG_CFG80211_WEXT kfree(wdev->wext.ie); @@ -1089,7 +1121,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; - struct cfg80211_sched_scan_request *sched_scan_req; + struct cfg80211_sched_scan_request *pos, *tmp; if (!wdev) return NOTIFY_DONE; @@ -1114,7 +1146,15 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - wdev->identifier = ++rdev->wdev_id; + /* + * We get here also when the interface changes network namespaces, + * as it's registered into the new one, but we don't want it to + * change ID in that case. Checking if the ID is already assigned + * works, because 0 isn't considered a valid ID and the memory is + * 0-initialized. + */ + if (!wdev->identifier) + wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); rdev->devlist_generation++; /* can only change netns with wiphy */ @@ -1158,10 +1198,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, ___cfg80211_scan_done(rdev, false); } - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - if (WARN_ON(sched_scan_req && - sched_scan_req->dev == wdev->netdev)) { - __cfg80211_stop_sched_scan(rdev, false); + list_for_each_entry_safe(pos, tmp, + &rdev->sched_scan_req_list, list) { + if (WARN_ON(pos && pos->dev == wdev->netdev)) + cfg80211_stop_sched_scan_req(rdev, pos, false); } rdev->opencount--; @@ -1208,12 +1248,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, */ if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && - rdev->ops->set_power_mgmt) - if (rdev_set_power_mgmt(rdev, dev, wdev->ps, - wdev->ps_timeout)) { - /* assume this means it's off */ - wdev->ps = false; - } + rdev->ops->set_power_mgmt && + rdev_set_power_mgmt(rdev, dev, wdev->ps, + wdev->ps_timeout)) { + /* assume this means it's off */ + wdev->ps = false; + } break; case NETDEV_UNREGISTER: /* @@ -1234,6 +1274,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, kzfree(wdev->wext.keys); #endif flush_work(&wdev->disconnect_wk); + cfg80211_cqm_config_free(wdev); } /* * synchronise (so that we won't find this netdev diff --git a/net/wireless/core.h b/net/wireless/core.h index 58ca206982feafa7ddfe2c90d4483791e5057cf0..6e809325af3bf090f09fb8fef4356bada3a1beed 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -74,10 +74,9 @@ struct cfg80211_registered_device { u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; - struct cfg80211_sched_scan_request __rcu *sched_scan_req; + struct list_head sched_scan_req_list; unsigned long suspend_at; struct work_struct scan_done_wk; - struct work_struct sched_scan_results_wk; struct genl_info *cur_cmd_info; @@ -91,11 +90,15 @@ struct cfg80211_registered_device { struct cfg80211_coalesce *coalesce; - spinlock_t destroy_list_lock; - struct list_head destroy_list; struct work_struct destroy_work; - struct work_struct sched_scan_stop_wk; + struct work_struct sched_scan_res_wk; + + struct cfg80211_chan_def radar_chandef; + struct work_struct propagate_radar_detect_wk; + + struct cfg80211_chan_def cac_done_chandef; + struct work_struct propagate_cac_done_wk; /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ @@ -220,23 +223,8 @@ struct cfg80211_event { enum cfg80211_event_type type; union { - struct { - u8 bssid[ETH_ALEN]; - const u8 *req_ie; - const u8 *resp_ie; - size_t req_ie_len; - size_t resp_ie_len; - struct cfg80211_bss *bss; - int status; /* -1 = failed; 0..65535 = status code */ - enum nl80211_timeout_reason timeout_reason; - } cr; - struct { - const u8 *req_ie; - const u8 *resp_ie; - size_t req_ie_len; - size_t resp_ie_len; - struct cfg80211_bss *bss; - } rm; + struct cfg80211_connect_resp_params cr; + struct cfg80211_roam_info rm; struct { const u8 *ie; size_t ie_len; @@ -267,9 +255,11 @@ struct cfg80211_beacon_registration { u32 nlportid; }; -struct cfg80211_iface_destroy { - struct list_head list; - u32 nlportid; +struct cfg80211_cqm_config { + u32 rssi_hyst; + s32 last_rssi_event_value; + int n_rssi_thresholds; + s32 rssi_thresholds[0]; }; void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev); @@ -385,21 +375,16 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid); -void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - int status, bool wextev, - struct cfg80211_bss *bss, - enum nl80211_timeout_reason timeout_reason); +void __cfg80211_connect_result(struct net_device *dev, + struct cfg80211_connect_resp_params *params, + bool wextev); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap); int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len); + struct cfg80211_roam_info *info); int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); void cfg80211_autodisconnect_wk(struct work_struct *work); @@ -423,13 +408,20 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, void __cfg80211_scan_done(struct work_struct *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message); -void __cfg80211_sched_scan_results(struct work_struct *wk); +void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req); +int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, + bool want_multi); +void cfg80211_sched_scan_results_wk(struct work_struct *work); +int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req, + bool driver_initiated); int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, - bool driver_initiated); + u64 reqid, bool driver_initiated); void cfg80211_upload_connect_keys(struct wireless_dev *wdev); int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, - u32 *flags, struct vif_params *params); + struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); void cfg80211_process_wdev_events(struct wireless_dev *wdev); @@ -459,6 +451,16 @@ unsigned int cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef); +void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev); + +bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan); + +bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev); + +bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef, + struct ieee80211_channel *chan); + static inline unsigned int elapsed_jiffies_msecs(unsigned long start) { unsigned long end = jiffies; @@ -512,4 +514,6 @@ void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, #define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; }) #endif +void cfg80211_cqm_config_free(struct wireless_dev *wdev); + #endif /* __NET_WIRELESS_CORE_H */ diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 364f900a3dc4d2810c90fa3f613d1b9a08cb5e71..10bf040a0982d2f7859f98399c24de26a7fd5383 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -190,6 +190,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) if (!nowext) wdev->wext.ibss.ssid_len = 0; #endif + cfg80211_sched_dfs_chan_update(rdev); } void cfg80211_clear_ibss(struct net_device *dev, bool nowext) diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 2d8518a37eabc4795e97f7f600f14cdf0f63d7ff..ec0b1c20ac9920106efbc8798b53d1077a646e36 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -262,6 +262,7 @@ int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, wdev->beacon_interval = 0; memset(&wdev->chandef, 0, sizeof(wdev->chandef)); rdev_set_qos_map(rdev, dev, NULL); + cfg80211_sched_dfs_chan_update(rdev); } return err; diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 22b3d999006559d7572287c2b8e6f4fad286766d..d8df7a5180a0473b56e74e8b4eb93b380965ea49 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -26,9 +26,16 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; - u8 *ie = mgmt->u.assoc_resp.variable; - int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); - u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); + struct cfg80211_connect_resp_params cr; + + memset(&cr, 0, sizeof(cr)); + cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code); + cr.bssid = mgmt->bssid; + cr.bss = bss; + cr.resp_ie = mgmt->u.assoc_resp.variable; + cr.resp_ie_len = + len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); + cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED; trace_cfg80211_send_rx_assoc(dev, bss); @@ -38,7 +45,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ - if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) { + if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) { cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); return; @@ -46,10 +53,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues); /* update current_bss etc., consumes the bss reference */ - __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, - status_code, - status_code == WLAN_STATUS_SUCCESS, bss, - NL80211_TIMEOUT_UNSPECIFIED); + __cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS); } EXPORT_SYMBOL(cfg80211_rx_assoc_resp); @@ -745,6 +749,12 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, } EXPORT_SYMBOL(cfg80211_rx_mgmt); +void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev) +{ + cancel_delayed_work(&rdev->dfs_update_channels_wk); + queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, 0); +} + void cfg80211_dfs_channels_update_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); @@ -755,6 +765,8 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) struct wiphy *wiphy; bool check_again = false; unsigned long timeout, next_time = 0; + unsigned long time_dfs_update; + enum nl80211_radar_event radar_event; int bandid, i; rdev = container_of(delayed_work, struct cfg80211_registered_device, @@ -770,11 +782,27 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) for (i = 0; i < sband->n_channels; i++) { c = &sband->channels[i]; - if (c->dfs_state != NL80211_DFS_UNAVAILABLE) + if (!(c->flags & IEEE80211_CHAN_RADAR)) + continue; + + if (c->dfs_state != NL80211_DFS_UNAVAILABLE && + c->dfs_state != NL80211_DFS_AVAILABLE) continue; - timeout = c->dfs_state_entered + msecs_to_jiffies( - IEEE80211_DFS_MIN_NOP_TIME_MS); + if (c->dfs_state == NL80211_DFS_UNAVAILABLE) { + time_dfs_update = IEEE80211_DFS_MIN_NOP_TIME_MS; + radar_event = NL80211_RADAR_NOP_FINISHED; + } else { + if (regulatory_pre_cac_allowed(wiphy) || + cfg80211_any_wiphy_oper_chan(wiphy, c)) + continue; + + time_dfs_update = REG_PRE_CAC_EXPIRY_GRACE_MS; + radar_event = NL80211_RADAR_PRE_CAC_EXPIRED; + } + + timeout = c->dfs_state_entered + + msecs_to_jiffies(time_dfs_update); if (time_after_eq(jiffies, timeout)) { c->dfs_state = NL80211_DFS_USABLE; @@ -784,8 +812,12 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) NL80211_CHAN_NO_HT); nl80211_radar_notify(rdev, &chandef, - NL80211_RADAR_NOP_FINISHED, - NULL, GFP_ATOMIC); + radar_event, NULL, + GFP_ATOMIC); + + regulatory_propagate_dfs_state(wiphy, &chandef, + c->dfs_state, + radar_event); continue; } @@ -810,7 +842,6 @@ void cfg80211_radar_event(struct wiphy *wiphy, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - unsigned long timeout; trace_cfg80211_radar_event(wiphy, chandef); @@ -820,11 +851,12 @@ void cfg80211_radar_event(struct wiphy *wiphy, */ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE); - timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS); - queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, - timeout); + cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp); + + memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def)); + queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); } EXPORT_SYMBOL(cfg80211_radar_event); @@ -851,6 +883,10 @@ void cfg80211_cac_event(struct net_device *netdev, msecs_to_jiffies(wdev->cac_time_ms); WARN_ON(!time_after_eq(jiffies, timeout)); cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); + memcpy(&rdev->cac_done_chandef, chandef, + sizeof(struct cfg80211_chan_def)); + queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); + cfg80211_sched_dfs_chan_update(rdev); break; case NL80211_RADAR_CAC_ABORTED: break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2312dc2ffdb98b37b2909274c57eed68935267d7..570fc95dc507d8ee08a329e6b313b517fd78ce83 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -410,6 +410,16 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 }, + [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY, + .len = FILS_ERP_MAX_USERNAME_LEN }, + [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY, + .len = FILS_ERP_MAX_REALM_LEN }, + [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 }, + [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY, + .len = FILS_ERP_MAX_RRK_LEN }, + [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, + [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, + [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -487,6 +497,7 @@ static const struct nla_policy nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, + [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN }, [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, }; @@ -548,7 +559,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, genl_family_attrbuf(&nl80211_fam), - nl80211_fam.maxattr, nl80211_policy); + nl80211_fam.maxattr, nl80211_policy, NULL); if (err) return err; @@ -719,7 +730,7 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, - nl80211_key_policy); + nl80211_key_policy, NULL); if (err) return err; @@ -760,7 +771,7 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], - nl80211_key_default_policy); + nl80211_key_default_policy, NULL); if (err) return err; @@ -807,10 +818,11 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; - int err = nla_parse_nested( - kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, - info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES], - nl80211_key_default_policy); + int err = nla_parse_nested(kdt, + NUM_NL80211_KEY_DEFAULT_TYPES - 1, + info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES], + nl80211_key_default_policy, + info->extack); if (err) return err; @@ -1366,7 +1378,7 @@ static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev, CMD(tdls_mgmt, TDLS_MGMT); CMD(tdls_oper, TDLS_OPER); } - if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) + if (rdev->wiphy.max_sched_scan_reqs) CMD(sched_scan_start, START_SCHED_SCAN); CMD(probe_client, PROBE_CLIENT); CMD(set_noack_map, SET_NOACK_MAP); @@ -1805,6 +1817,11 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; + if (rdev->wiphy.max_sched_scan_reqs && + nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_MAX_REQS, + rdev->wiphy.max_sched_scan_reqs)) + goto nla_put_failure; + if (nla_put(msg, NL80211_ATTR_EXT_FEATURES, sizeof(rdev->wiphy.ext_features), rdev->wiphy.ext_features)) @@ -1892,8 +1909,8 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb, struct nl80211_dump_wiphy_state *state) { struct nlattr **tb = genl_family_attrbuf(&nl80211_fam); - int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - tb, nl80211_fam.maxattr, nl80211_policy); + int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, + nl80211_fam.maxattr, nl80211_policy, NULL); /* ignore parse errors for backward compatibility */ if (ret) return 0; @@ -2308,7 +2325,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) rem_txq_params) { result = nla_parse_nested(tb, NL80211_TXQ_ATTR_MAX, nl_txq_params, - txq_params_policy); + txq_params_policy, + info->extack); if (result) return result; result = parse_txq_params(tb, &txq_params); @@ -2695,17 +2713,82 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) if (!nla) return -EINVAL; - if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, - nla, mntr_flags_policy)) + if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, nla, + mntr_flags_policy, NULL)) return -EINVAL; for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) if (flags[flag]) *mntrflags |= (1<attrs[NL80211_ATTR_MNTR_FLAGS]) { + if (type != NL80211_IFTYPE_MONITOR) + return -EINVAL; + + err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], + ¶ms->flags); + if (err) + return err; + + change = true; + } + + if (params->flags & MONITOR_FLAG_ACTIVE && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + + if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { + const u8 *mumimo_groups; + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (type != NL80211_IFTYPE_MONITOR) + return -EINVAL; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + mumimo_groups = + nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); + + /* bits 0 and 63 are reserved and must be zero */ + if ((mumimo_groups[0] & BIT(7)) || + (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0))) + return -EINVAL; + + params->vht_mumimo_groups = mumimo_groups; + change = true; + } + + if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (type != NL80211_IFTYPE_MONITOR) + return -EINVAL; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + params->vht_mumimo_follow_addr = + nla_data(info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]); + change = true; + } + + return change ? 1 : 0; +} + static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 use_4addr, enum nl80211_iftype iftype) @@ -2739,7 +2822,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) int err; enum nl80211_iftype otype, ntype; struct net_device *dev = info->user_ptr[1]; - u32 _flags, *flags = NULL; bool change = false; memset(¶ms, 0, sizeof(params)); @@ -2782,56 +2864,14 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) params.use_4addr = -1; } - if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { - if (ntype != NL80211_IFTYPE_MONITOR) - return -EINVAL; - err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], - &_flags); - if (err) - return err; - - flags = &_flags; - change = true; - } - - if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { - const u8 *mumimo_groups; - u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; - - if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) - return -EOPNOTSUPP; - - mumimo_groups = - nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); - - /* bits 0 and 63 are reserved and must be zero */ - if ((mumimo_groups[0] & BIT(7)) || - (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0))) - return -EINVAL; - - memcpy(params.vht_mumimo_groups, mumimo_groups, - VHT_MUMIMO_GROUPS_DATA_LEN); - change = true; - } - - if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { - u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; - - if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) - return -EOPNOTSUPP; - - nla_memcpy(params.macaddr, - info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR], - ETH_ALEN); + err = nl80211_parse_mon_options(rdev, ntype, info, ¶ms); + if (err < 0) + return err; + if (err > 0) change = true; - } - - if (flags && (*flags & MONITOR_FLAG_ACTIVE) && - !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) - return -EOPNOTSUPP; if (change) - err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); + err = cfg80211_change_iface(rdev, dev, ntype, ¶ms); else err = 0; @@ -2849,7 +2889,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; - u32 flags; /* to avoid failing a new interface creation due to pending removal */ cfg80211_destroy_ifaces(rdev); @@ -2885,13 +2924,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return err; } - err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? - info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, - &flags); - - if (!err && (flags & MONITOR_FLAG_ACTIVE) && - !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) - return -EOPNOTSUPP; + err = nl80211_parse_mon_options(rdev, type, info, ¶ms); + if (err < 0) + return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) @@ -2899,8 +2934,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), - NET_NAME_USER, type, err ? NULL : &flags, - ¶ms); + NET_NAME_USER, type, ¶ms); if (WARN_ON(!wdev)) { nlmsg_free(msg); return -EPROTO; @@ -3561,7 +3595,7 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info, if (sband == NULL) return -EINVAL; err = nla_parse_nested(tb, NL80211_TXRATE_MAX, tx_rates, - nl80211_txattr_policy); + nl80211_txattr_policy, info->extack); if (err) return err; if (tb[NL80211_TXRATE_LEGACY]) { @@ -3818,6 +3852,19 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, return false; return true; case NL80211_CMD_CONNECT: + /* SAE not supported yet */ + if (auth_type == NL80211_AUTHTYPE_SAE) + return false; + /* FILS with SK PFS or PK not supported yet */ + if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || + auth_type == NL80211_AUTHTYPE_FILS_PK) + return false; + if (!wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + auth_type == NL80211_AUTHTYPE_FILS_SK) + return false; + return true; case NL80211_CMD_START_AP: /* SAE not supported yet */ if (auth_type == NL80211_AUTHTYPE_SAE) @@ -4100,8 +4147,8 @@ static int parse_station_flags(struct genl_info *info, if (!nla) return 0; - if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, - nla, sta_flags_policy)) + if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, nla, + sta_flags_policy, info->extack)) return -EINVAL; /* @@ -4151,7 +4198,7 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, struct nlattr *rate; u32 bitrate; u16 bitrate_compat; - enum nl80211_attrs rate_flg; + enum nl80211_rate_info rate_flg; rate = nla_nest_start(msg, attr); if (!rate) @@ -4728,7 +4775,7 @@ static int nl80211_parse_sta_wme(struct genl_info *info, nla = info->attrs[NL80211_ATTR_STA_WME]; err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla, - nl80211_sta_wme_policy); + nl80211_sta_wme_policy, info->extack); if (err) return err; @@ -5703,7 +5750,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, cur_params.dot11MeshGateAnnouncementProtocol) || nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, cur_params.dot11MeshForwarding) || - nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, + nla_put_s32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, cur_params.rssi_threshold) || nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, cur_params.ht_opmode) || @@ -5853,7 +5900,7 @@ do { \ return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_CONFIG], - nl80211_meshconf_params_policy)) + nl80211_meshconf_params_policy, info->extack)) return -EINVAL; /* This makes sure that there aren't more than 32 mesh config @@ -6002,7 +6049,7 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, return -EINVAL; if (nla_parse_nested(tb, NL80211_MESH_SETUP_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_SETUP], - nl80211_mesh_setup_params_policy)) + nl80211_mesh_setup_params_policy, info->extack)) return -EINVAL; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) @@ -6393,7 +6440,8 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { r = nla_parse_nested(tb, NL80211_REG_RULE_ATTR_MAX, - nl_reg_rule, reg_rule_policy); + nl_reg_rule, reg_rule_policy, + info->extack); if (r) goto bad_reg; r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); @@ -6461,7 +6509,7 @@ static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy, return -EINVAL; err = nla_parse_nested(attr, NL80211_BSS_SELECT_ATTR_MAX, nest, - nl80211_bss_select_policy); + nl80211_bss_select_policy, NULL); if (err) return err; @@ -6545,6 +6593,19 @@ static int nl80211_parse_random_mac(struct nlattr **attrs, return 0; } +static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev) +{ + ASSERT_WDEV_LOCK(wdev); + + if (!cfg80211_beaconing_iface_active(wdev)) + return true; + + if (!(wdev->chandef.chan->flags & IEEE80211_CHAN_RADAR)) + return true; + + return regulatory_pre_cac_allowed(wdev->wiphy); +} + static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -6670,6 +6731,25 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) request->n_channels = i; + wdev_lock(wdev); + if (!cfg80211_off_channel_oper_allowed(wdev)) { + struct ieee80211_channel *chan; + + if (request->n_channels != 1) { + wdev_unlock(wdev); + err = -EBUSY; + goto out_free; + } + + chan = request->channels[0]; + if (chan->center_freq != wdev->chandef.chan->center_freq) { + wdev_unlock(wdev); + err = -EBUSY; + goto out_free; + } + } + wdev_unlock(wdev); + i = 0; if (n_ssids) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { @@ -6862,7 +6942,7 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, return -EINVAL; err = nla_parse_nested(plan, NL80211_SCHED_SCAN_PLAN_MAX, - attr, nl80211_plan_policy); + attr, nl80211_plan_policy, NULL); if (err) return err; @@ -6953,11 +7033,19 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, err = nla_parse_nested(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, - attr, nl80211_match_policy); + attr, nl80211_match_policy, + NULL); if (err) return ERR_PTR(err); + + /* SSID and BSSID are mutually exclusive */ + if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] && + tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) + return ERR_PTR(-EINVAL); + /* add other standalone attributes here */ - if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) { + if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] || + tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) { n_match_sets++; continue; } @@ -7128,15 +7216,17 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { - struct nlattr *ssid, *rssi; + struct nlattr *ssid, *bssid, *rssi; err = nla_parse_nested(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, - attr, nl80211_match_policy); + attr, nl80211_match_policy, + NULL); if (err) goto out_free; ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]; - if (ssid) { + bssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]; + if (ssid || bssid) { if (WARN_ON(i >= n_match_sets)) { /* this indicates a programming error, * the loop above should have verified @@ -7146,14 +7236,25 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, goto out_free; } - if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { - err = -EINVAL; - goto out_free; + if (ssid) { + if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out_free; + } + memcpy(request->match_sets[i].ssid.ssid, + nla_data(ssid), nla_len(ssid)); + request->match_sets[i].ssid.ssid_len = + nla_len(ssid); } - memcpy(request->match_sets[i].ssid.ssid, - nla_data(ssid), nla_len(ssid)); - request->match_sets[i].ssid.ssid_len = - nla_len(ssid); + if (bssid) { + if (nla_len(bssid) != ETH_ALEN) { + err = -EINVAL; + goto out_free; + } + memcpy(request->match_sets[i].bssid, + nla_data(bssid), ETH_ALEN); + } + /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; @@ -7261,14 +7362,16 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_sched_scan_request *sched_scan_req; + bool want_multi; int err; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || - !rdev->ops->sched_scan_start) + if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_start) return -EOPNOTSUPP; - if (rdev->sched_scan_req) - return -EINPROGRESS; + want_multi = info->attrs[NL80211_ATTR_SCHED_SCAN_MULTI]; + err = cfg80211_sched_scan_req_possible(rdev, want_multi); + if (err) + return err; sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, info->attrs, @@ -7278,6 +7381,14 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (err) goto out_err; + /* leave request id zero for legacy request + * or if driver does not support multi-scheduled scan + */ + if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) { + while (!sched_scan_req->reqid) + sched_scan_req->reqid = rdev->wiphy.cookie_counter++; + } + err = rdev_sched_scan_start(rdev, dev, sched_scan_req); if (err) goto out_free; @@ -7288,10 +7399,9 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) sched_scan_req->owner_nlportid = info->snd_portid; - rcu_assign_pointer(rdev->sched_scan_req, sched_scan_req); + cfg80211_add_sched_scan_req(rdev, sched_scan_req); - nl80211_send_sched_scan(rdev, dev, - NL80211_CMD_START_SCHED_SCAN); + nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_START_SCHED_SCAN); return 0; out_free: @@ -7303,13 +7413,27 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { + struct cfg80211_sched_scan_request *req; struct cfg80211_registered_device *rdev = info->user_ptr[0]; + u64 cookie; - if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || - !rdev->ops->sched_scan_stop) + if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; - return __cfg80211_stop_sched_scan(rdev, false); + if (info->attrs[NL80211_ATTR_COOKIE]) { + cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); + return __cfg80211_stop_sched_scan(rdev, cookie, false); + } + + req = list_first_or_null_rcu(&rdev->sched_scan_req_list, + struct cfg80211_sched_scan_request, + list); + if (!req || req->reqid || + (req->owner_nlportid && + req->owner_nlportid != info->snd_portid)) + return -ENOENT; + + return cfg80211_stop_sched_scan_req(rdev, req, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, @@ -7433,7 +7557,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX, info->attrs[NL80211_ATTR_CSA_IES], - nl80211_policy); + nl80211_policy, info->extack); if (err) return err; @@ -8639,7 +8763,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb, struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam); err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - attrbuf, nl80211_fam.maxattr, nl80211_policy); + attrbuf, nl80211_fam.maxattr, + nl80211_policy, NULL); if (err) goto out_err; @@ -8867,6 +8992,35 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) } } + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && + info->attrs[NL80211_ATTR_FILS_ERP_REALM] && + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + connect.fils_erp_username = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_username_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_realm = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_realm_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_next_seq_num = + nla_get_u16( + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); + connect.fils_erp_rrk = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + connect.fils_erp_rrk_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || + info->attrs[NL80211_ATTR_FILS_ERP_REALM] || + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + kzfree(connkeys); + return -EINVAL; + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, @@ -8986,14 +9140,28 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info) memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); - if (!info->attrs[NL80211_ATTR_MAC]) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_PMKID]) return -EINVAL; pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); - pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_MAC]) { + pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + } else if (info->attrs[NL80211_ATTR_SSID] && + info->attrs[NL80211_ATTR_FILS_CACHE_ID] && + (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA || + info->attrs[NL80211_ATTR_PMK])) { + pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + pmksa.cache_id = + nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]); + } else { + return -EINVAL; + } + if (info->attrs[NL80211_ATTR_PMK]) { + pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); + pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); + } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) @@ -9096,6 +9264,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_chan_def chandef; + const struct cfg80211_chan_def *compat_chandef; struct sk_buff *msg; void *hdr; u64 cookie; @@ -9124,6 +9293,18 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, if (err) return err; + wdev_lock(wdev); + if (!cfg80211_off_channel_oper_allowed(wdev) && + !cfg80211_chandef_identical(&wdev->chandef, &chandef)) { + compat_chandef = cfg80211_chandef_compatible(&wdev->chandef, + &chandef); + if (compat_chandef != &chandef) { + wdev_unlock(wdev); + return -EBUSY; + } + } + wdev_unlock(wdev); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -9299,6 +9480,13 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) if (!chandef.chan && params.offchan) return -EINVAL; + wdev_lock(wdev); + if (params.offchan && !cfg80211_off_channel_oper_allowed(wdev)) { + wdev_unlock(wdev); + return -EBUSY; + } + wdev_unlock(wdev); + params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]); @@ -9466,7 +9654,7 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) static const struct nla_policy nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = { - [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 }, + [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_BINARY }, [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 }, @@ -9495,28 +9683,123 @@ static int nl80211_set_cqm_txe(struct genl_info *info, return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl); } +static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + s32 last, low, high; + u32 hyst; + int i, n; + int err; + + /* RSSI reporting disabled? */ + if (!wdev->cqm_config) + return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); + + /* + * Obtain current RSSI value if possible, if not and no RSSI threshold + * event has been received yet, we should receive an event after a + * connection is established and enough beacons received to calculate + * the average. + */ + if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss && + rdev->ops->get_station) { + struct station_info sinfo; + u8 *mac_addr; + + mac_addr = wdev->current_bss->pub.bssid; + + err = rdev_get_station(rdev, dev, mac_addr, &sinfo); + if (err) + return err; + + if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) + wdev->cqm_config->last_rssi_event_value = + (s8) sinfo.rx_beacon_signal_avg; + } + + last = wdev->cqm_config->last_rssi_event_value; + hyst = wdev->cqm_config->rssi_hyst; + n = wdev->cqm_config->n_rssi_thresholds; + + for (i = 0; i < n; i++) + if (last < wdev->cqm_config->rssi_thresholds[i]) + break; + + low = i > 0 ? + (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN; + high = i < n ? + (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX; + + return rdev_set_cqm_rssi_range_config(rdev, dev, low, high); +} + static int nl80211_set_cqm_rssi(struct genl_info *info, - s32 threshold, u32 hysteresis) + const s32 *thresholds, int n_thresholds, + u32 hysteresis) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; + int i, err; + s32 prev = S32_MIN; - if (threshold > 0) - return -EINVAL; - - /* disabling - hysteresis should also be zero then */ - if (threshold == 0) - hysteresis = 0; + /* Check all values negative and sorted */ + for (i = 0; i < n_thresholds; i++) { + if (thresholds[i] > 0 || thresholds[i] <= prev) + return -EINVAL; - if (!rdev->ops->set_cqm_rssi_config) - return -EOPNOTSUPP; + prev = thresholds[i]; + } if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; - return rdev_set_cqm_rssi_config(rdev, dev, threshold, hysteresis); + wdev_lock(wdev); + cfg80211_cqm_config_free(wdev); + wdev_unlock(wdev); + + if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) { + if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */ + return rdev_set_cqm_rssi_config(rdev, dev, 0, 0); + + return rdev_set_cqm_rssi_config(rdev, dev, + thresholds[0], hysteresis); + } + + if (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_CQM_RSSI_LIST)) + return -EOPNOTSUPP; + + if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */ + n_thresholds = 0; + + wdev_lock(wdev); + if (n_thresholds) { + struct cfg80211_cqm_config *cqm_config; + + cqm_config = kzalloc(sizeof(struct cfg80211_cqm_config) + + n_thresholds * sizeof(s32), GFP_KERNEL); + if (!cqm_config) { + err = -ENOMEM; + goto unlock; + } + + cqm_config->rssi_hyst = hysteresis; + cqm_config->n_rssi_thresholds = n_thresholds; + memcpy(cqm_config->rssi_thresholds, thresholds, + n_thresholds * sizeof(s32)); + + wdev->cqm_config = cqm_config; + } + + err = cfg80211_cqm_rssi_update(rdev, dev); + +unlock: + wdev_unlock(wdev); + + return err; } static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) @@ -9530,16 +9813,22 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) return -EINVAL; err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, - nl80211_attr_cqm_policy); + nl80211_attr_cqm_policy, info->extack); if (err) return err; if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && attrs[NL80211_ATTR_CQM_RSSI_HYST]) { - s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); + const s32 *thresholds = + nla_data(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); + int len = nla_len(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); - return nl80211_set_cqm_rssi(info, threshold, hysteresis); + if (len % 4) + return -EINVAL; + + return nl80211_set_cqm_rssi(info, thresholds, len / 4, + hysteresis); } if (attrs[NL80211_ATTR_CQM_TXE_RATE] && @@ -9940,7 +10229,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, return -EINVAL; err = nla_parse_nested(tb, MAX_NL80211_WOWLAN_TCP, attr, - nl80211_wowlan_tcp_policy); + nl80211_wowlan_tcp_policy, NULL); if (err) return err; @@ -10085,7 +10374,8 @@ static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev, goto out; } - err = nla_parse_nested(tb, NL80211_ATTR_MAX, attr, nl80211_policy); + err = nla_parse_nested(tb, NL80211_ATTR_MAX, attr, nl80211_policy, + NULL); if (err) goto out; @@ -10122,7 +10412,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(tb, MAX_NL80211_WOWLAN_TRIG, info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS], - nl80211_wowlan_policy); + nl80211_wowlan_policy, info->extack); if (err) return err; @@ -10205,7 +10495,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) u8 *mask_pat; nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - NULL); + NULL, info->extack); err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) @@ -10416,7 +10706,7 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; err = nla_parse_nested(tb, NL80211_ATTR_COALESCE_RULE_MAX, rule, - nl80211_coalesce_policy); + nl80211_coalesce_policy, NULL); if (err) return err; @@ -10454,7 +10744,7 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL); + nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL); if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; @@ -10575,7 +10865,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested(tb, MAX_NL80211_REKEY_DATA, info->attrs[NL80211_ATTR_REKEY_DATA], - nl80211_rekey_policy); + nl80211_rekey_policy, info->extack); if (err) return err; @@ -10892,7 +11182,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb, err = nla_parse_nested(tb, NL80211_NAN_FUNC_ATTR_MAX, info->attrs[NL80211_ATTR_NAN_FUNC], - nl80211_nan_func_policy); + nl80211_nan_func_policy, info->extack); if (err) return err; @@ -10989,7 +11279,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb, err = nla_parse_nested(srf_tb, NL80211_NAN_SRF_ATTR_MAX, tb[NL80211_NAN_FUNC_SRF], - nl80211_nan_srf_policy); + nl80211_nan_srf_policy, info->extack); if (err) goto out; @@ -11524,8 +11814,8 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, return 0; } - err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - attrbuf, nl80211_fam.maxattr, nl80211_policy); + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, + nl80211_fam.maxattr, nl80211_policy, NULL); if (err) return err; @@ -12970,18 +13260,19 @@ static int nl80211_prep_scan_msg(struct sk_buff *msg, static int nl80211_prep_sched_scan_msg(struct sk_buff *msg, - struct cfg80211_registered_device *rdev, - struct net_device *netdev, - u32 portid, u32 seq, int flags, u32 cmd) + struct cfg80211_sched_scan_request *req, u32 cmd) { void *hdr; - hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) return -1; - if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || - nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, + wiphy_to_rdev(req->wiphy)->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) || + nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid, + NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -13041,8 +13332,7 @@ void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, NL80211_MCGRP_SCAN, GFP_KERNEL); } -void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u32 cmd) +void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd) { struct sk_buff *msg; @@ -13050,12 +13340,12 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, if (!msg) return; - if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) { + if (nl80211_prep_sched_scan_msg(msg, req, cmd) < 0) { nlmsg_free(msg); return; } - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(req->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } @@ -13296,17 +13586,16 @@ void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, } void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - int status, - enum nl80211_timeout_reason timeout_reason, + struct net_device *netdev, + struct cfg80211_connect_resp_params *cr, gfp_t gfp) { struct sk_buff *msg; void *hdr; - msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp); + msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len + + cr->fils_kek_len + cr->pmk_len + + (cr->pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!msg) return; @@ -13318,17 +13607,31 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || - (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || + (cr->bssid && + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) || nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, - status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : - status) || - (status < 0 && + cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : + cr->status) || + (cr->status < 0 && (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || - nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) || - (req_ie && - nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || - (resp_ie && - nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, + cr->timeout_reason))) || + (cr->req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) || + (cr->resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len, + cr->resp_ie)) || + (cr->update_erp_next_seq_num && + nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, + cr->fils_erp_next_seq_num)) || + (cr->status == WLAN_STATUS_SUCCESS && + ((cr->fils_kek && + nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len, + cr->fils_kek)) || + (cr->pmk && + nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) || + (cr->pmkid && + nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid))))) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -13343,14 +13646,14 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) + struct net_device *netdev, + struct cfg80211_roam_info *info, gfp_t gfp) { struct sk_buff *msg; void *hdr; + const u8 *bssid = info->bss ? info->bss->bssid : info->bssid; - msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp); + msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len, gfp); if (!msg) return; @@ -13363,10 +13666,12 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) || - (req_ie && - nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || - (resp_ie && - nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie))) + (info->req_ie && + nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len, + info->req_ie)) || + (info->resp_ie && + nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len, + info->resp_ie))) goto nla_put_failure; genlmsg_end(msg, hdr); @@ -13968,6 +14273,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, s32 rssi_level, gfp_t gfp) { struct sk_buff *msg; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level); @@ -13975,6 +14282,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) return; + if (wdev->cqm_config) { + wdev->cqm_config->last_rssi_event_value = rssi_level; + + cfg80211_cqm_rssi_update(rdev, dev); + + if (rssi_level == 0) + rssi_level = wdev->cqm_config->last_rssi_event_value; + } + msg = cfg80211_prepare_cqm(dev, NULL, gfp); if (!msg) return; @@ -14619,26 +14935,26 @@ static int nl80211_netlink_notify(struct notifier_block * nb, rcu_read_lock(); list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { - bool schedule_destroy_work = false; - struct cfg80211_sched_scan_request *sched_scan_req = - rcu_dereference(rdev->sched_scan_req); + struct cfg80211_sched_scan_request *sched_scan_req; - if (sched_scan_req && notify->portid && - sched_scan_req->owner_nlportid == notify->portid) { - sched_scan_req->owner_nlportid = 0; - - if (rdev->ops->sched_scan_stop && - rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) + list_for_each_entry_rcu(sched_scan_req, + &rdev->sched_scan_req_list, + list) { + if (sched_scan_req->owner_nlportid == notify->portid) { + sched_scan_req->nl_owner_dead = true; schedule_work(&rdev->sched_scan_stop_wk); + } } list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { cfg80211_mlme_unregister_socket(wdev, notify->portid); - if (wdev->owner_nlportid == notify->portid) - schedule_destroy_work = true; - else if (wdev->conn_owner_nlportid == notify->portid) + if (wdev->owner_nlportid == notify->portid) { + wdev->nl_owner_dead = true; + schedule_work(&rdev->destroy_work); + } else if (wdev->conn_owner_nlportid == notify->portid) { schedule_work(&wdev->disconnect_wk); + } } spin_lock_bh(&rdev->beacon_registrations_lock); @@ -14651,19 +14967,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb, } } spin_unlock_bh(&rdev->beacon_registrations_lock); - - if (schedule_destroy_work) { - struct cfg80211_iface_destroy *destroy; - - destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC); - if (destroy) { - destroy->nlportid = notify->portid; - spin_lock(&rdev->destroy_list_lock); - list_add(&destroy->list, &rdev->destroy_list); - spin_unlock(&rdev->destroy_list_lock); - schedule_work(&rdev->destroy_work); - } - } } rcu_read_unlock(); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index e488dca87423eb7c85fc80d408c3d45f91e9a97a..b96933322077c6238dd39c4d990941e85154e757 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -16,8 +16,7 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted); void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, struct sk_buff *msg); -void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u32 cmd); +void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd); void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request); @@ -53,16 +52,12 @@ void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - int status, - enum nl80211_timeout_reason timeout_reason, + struct net_device *netdev, + struct cfg80211_connect_resp_params *params, gfp_t gfp); void nl80211_send_roamed(struct cfg80211_registered_device *rdev, - struct net_device *netdev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + struct net_device *netdev, + struct cfg80211_roam_info *info, gfp_t gfp); void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap); diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 2f425075ada8eae81c8ecb04d975d0c9f0d36578..0598c1e5d0adbba6375beec2dcfb667fb4883192 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -36,13 +36,13 @@ static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev, static inline struct wireless_dev *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name, unsigned char name_assign_type, - enum nl80211_iftype type, u32 *flags, + enum nl80211_iftype type, struct vif_params *params) { struct wireless_dev *ret; trace_rdev_add_virtual_intf(&rdev->wiphy, name, type); ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type, - type, flags, params); + type, params); trace_rdev_return_wdev(&rdev->wiphy, ret); return ret; } @@ -61,12 +61,11 @@ rdev_del_virtual_intf(struct cfg80211_registered_device *rdev, static inline int rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype type, - u32 *flags, struct vif_params *params) + struct vif_params *params) { int ret; trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type); - ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, flags, - params); + ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -749,6 +748,18 @@ rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_cqm_rssi_range_config(struct cfg80211_registered_device *rdev, + struct net_device *dev, s32 low, s32 high) +{ + int ret; + trace_rdev_set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); + ret = rdev->ops->set_cqm_rssi_range_config(&rdev->wiphy, dev, + low, high); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + static inline int rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 rate, u32 pkts, u32 intvl) @@ -802,18 +813,18 @@ rdev_sched_scan_start(struct cfg80211_registered_device *rdev, struct cfg80211_sched_scan_request *request) { int ret; - trace_rdev_sched_scan_start(&rdev->wiphy, dev, request); + trace_rdev_sched_scan_start(&rdev->wiphy, dev, request->reqid); ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, - struct net_device *dev) + struct net_device *dev, u64 reqid) { int ret; - trace_rdev_sched_scan_stop(&rdev->wiphy, dev); - ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); + trace_rdev_sched_scan_stop(&rdev->wiphy, dev, reqid); + ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev, reqid); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 753efcd51fa3495c66e7063d3ce6c1aca7fd9d14..5fae296a6a583256f9874319dfaa9ee87e977fff 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2067,6 +2067,88 @@ reg_process_hint_country_ie(struct wiphy *wiphy, return REG_REQ_IGNORE; } +bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2) +{ + const struct ieee80211_regdomain *wiphy1_regd = NULL; + const struct ieee80211_regdomain *wiphy2_regd = NULL; + const struct ieee80211_regdomain *cfg80211_regd = NULL; + bool dfs_domain_same; + + rcu_read_lock(); + + cfg80211_regd = rcu_dereference(cfg80211_regdomain); + wiphy1_regd = rcu_dereference(wiphy1->regd); + if (!wiphy1_regd) + wiphy1_regd = cfg80211_regd; + + wiphy2_regd = rcu_dereference(wiphy2->regd); + if (!wiphy2_regd) + wiphy2_regd = cfg80211_regd; + + dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region; + + rcu_read_unlock(); + + return dfs_domain_same; +} + +static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan, + struct ieee80211_channel *src_chan) +{ + if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) || + !(src_chan->flags & IEEE80211_CHAN_RADAR)) + return; + + if (dst_chan->flags & IEEE80211_CHAN_DISABLED || + src_chan->flags & IEEE80211_CHAN_DISABLED) + return; + + if (src_chan->center_freq == dst_chan->center_freq && + dst_chan->dfs_state == NL80211_DFS_USABLE) { + dst_chan->dfs_state = src_chan->dfs_state; + dst_chan->dfs_state_entered = src_chan->dfs_state_entered; + } +} + +static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy, + struct wiphy *src_wiphy) +{ + struct ieee80211_supported_band *src_sband, *dst_sband; + struct ieee80211_channel *src_chan, *dst_chan; + int i, j, band; + + if (!reg_dfs_domain_same(dst_wiphy, src_wiphy)) + return; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + dst_sband = dst_wiphy->bands[band]; + src_sband = src_wiphy->bands[band]; + if (!dst_sband || !src_sband) + continue; + + for (i = 0; i < dst_sband->n_channels; i++) { + dst_chan = &dst_sband->channels[i]; + for (j = 0; j < src_sband->n_channels; j++) { + src_chan = &src_sband->channels[j]; + reg_copy_dfs_chan_state(dst_chan, src_chan); + } + } + } +} + +static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev; + + ASSERT_RTNL(); + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (wiphy == &rdev->wiphy) + continue; + wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy); + } +} + /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { @@ -2110,6 +2192,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) if (treatment == REG_REQ_ALREADY_SET && wiphy && wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, reg_request->initiator); + wiphy_all_share_dfs_chan_state(wiphy); reg_check_channels(); } @@ -3061,6 +3144,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy) lr = get_last_request(); wiphy_update_regulatory(wiphy, lr->initiator); + wiphy_all_share_dfs_chan_state(wiphy); } void wiphy_regulatory_deregister(struct wiphy *wiphy) @@ -3120,6 +3204,67 @@ bool regulatory_indoor_allowed(void) return reg_is_indoor; } +bool regulatory_pre_cac_allowed(struct wiphy *wiphy) +{ + const struct ieee80211_regdomain *regd = NULL; + const struct ieee80211_regdomain *wiphy_regd = NULL; + bool pre_cac_allowed = false; + + rcu_read_lock(); + + regd = rcu_dereference(cfg80211_regdomain); + wiphy_regd = rcu_dereference(wiphy->regd); + if (!wiphy_regd) { + if (regd->dfs_region == NL80211_DFS_ETSI) + pre_cac_allowed = true; + + rcu_read_unlock(); + + return pre_cac_allowed; + } + + if (regd->dfs_region == wiphy_regd->dfs_region && + wiphy_regd->dfs_region == NL80211_DFS_ETSI) + pre_cac_allowed = true; + + rcu_read_unlock(); + + return pre_cac_allowed; +} + +void regulatory_propagate_dfs_state(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_dfs_state dfs_state, + enum nl80211_radar_event event) +{ + struct cfg80211_registered_device *rdev; + + ASSERT_RTNL(); + + if (WARN_ON(!cfg80211_chandef_valid(chandef))) + return; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (wiphy == &rdev->wiphy) + continue; + + if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) + continue; + + if (!ieee80211_get_channel(&rdev->wiphy, + chandef->chan->center_freq)) + continue; + + cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); + + if (event == NL80211_RADAR_DETECTED || + event == NL80211_RADAR_CAC_FINISHED) + cfg80211_sched_dfs_chan_update(rdev); + + nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); + } +} + int __init regulatory_init(void) { int err = 0; diff --git a/net/wireless/reg.h b/net/wireless/reg.h index f6ced316b5a49e204632ef42675309614237cc54..ca7fedf2e7a16e2a39ec7b0d3c988154d0e76d79 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h @@ -143,4 +143,40 @@ int cfg80211_get_unii(int freq); */ bool regulatory_indoor_allowed(void); +/* + * Grace period to timeout pre-CAC results on the dfs channels. This timeout + * value is used for Non-ETSI domain. + * TODO: May be make this timeout available through regdb? + */ +#define REG_PRE_CAC_EXPIRY_GRACE_MS 2000 + +/** + * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain + * @wiphy: wiphy for which pre-CAC capability is checked. + + * Pre-CAC is allowed only in ETSI domain. + */ +bool regulatory_pre_cac_allowed(struct wiphy *wiphy); + +/** + * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys + * @wiphy - wiphy on which radar is detected and the event will be propagated + * to other available wiphys having the same DFS domain + * @chandef - Channel definition of radar detected channel + * @dfs_state - DFS channel state to be set + * @event - Type of radar event which triggered this DFS state change + * + * This function should be called with rtnl lock held. + */ +void regulatory_propagate_dfs_state(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_dfs_state dfs_state, + enum nl80211_radar_event event); + +/** + * reg_dfs_domain_same - Checks if both wiphy have same DFS domain configured + * @wiphy1 - wiphy it's dfs_region to be checked against that of wiphy2 + * @wiphy2 - wiphy it's dfs_region to be checked against that of wiphy1 + */ +bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2); #endif /* __NET_WIRELESS_REG_H */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 21be56b3128ee74c4119ee67d5f0b85fecfe0b2e..14d5f0c8c45ff07224f2dc8e6310c2edc440874e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -300,93 +300,168 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, } EXPORT_SYMBOL(cfg80211_scan_done); -void __cfg80211_sched_scan_results(struct work_struct *wk) +void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req) { - struct cfg80211_registered_device *rdev; - struct cfg80211_sched_scan_request *request; + ASSERT_RTNL(); - rdev = container_of(wk, struct cfg80211_registered_device, - sched_scan_results_wk); + list_add_rcu(&req->list, &rdev->sched_scan_req_list); +} - rtnl_lock(); +static void cfg80211_del_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req) +{ + ASSERT_RTNL(); - request = rtnl_dereference(rdev->sched_scan_req); + list_del_rcu(&req->list); + kfree_rcu(req, rcu_head); +} - /* we don't have sched_scan_req anymore if the scan is stopping */ - if (request) { - if (request->flags & NL80211_SCAN_FLAG_FLUSH) { - /* flush entries from previous scans */ - spin_lock_bh(&rdev->bss_lock); - __cfg80211_bss_expire(rdev, request->scan_start); - spin_unlock_bh(&rdev->bss_lock); - request->scan_start = jiffies; - } - nl80211_send_sched_scan(rdev, request->dev, - NL80211_CMD_SCHED_SCAN_RESULTS); +static struct cfg80211_sched_scan_request * +cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid) +{ + struct cfg80211_sched_scan_request *pos; + + ASSERT_RTNL(); + + list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { + if (pos->reqid == reqid) + return pos; } + return NULL; +} + +/* + * Determines if a scheduled scan request can be handled. When a legacy + * scheduled scan is running no other scheduled scan is allowed regardless + * whether the request is for legacy or multi-support scan. When a multi-support + * scheduled scan is running a request for legacy scan is not allowed. In this + * case a request for multi-support scan can be handled if resources are + * available, ie. struct wiphy::max_sched_scan_reqs limit is not yet reached. + */ +int cfg80211_sched_scan_req_possible(struct cfg80211_registered_device *rdev, + bool want_multi) +{ + struct cfg80211_sched_scan_request *pos; + int i = 0; + + list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { + /* request id zero means legacy in progress */ + if (!i && !pos->reqid) + return -EINPROGRESS; + i++; + } + + if (i) { + /* no legacy allowed when multi request(s) are active */ + if (!want_multi) + return -EINPROGRESS; + + /* resource limit reached */ + if (i == rdev->wiphy.max_sched_scan_reqs) + return -ENOSPC; + } + return 0; +} + +void cfg80211_sched_scan_results_wk(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + struct cfg80211_sched_scan_request *req, *tmp; + rdev = container_of(work, struct cfg80211_registered_device, + sched_scan_res_wk); + + rtnl_lock(); + list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { + if (req->report_results) { + req->report_results = false; + if (req->flags & NL80211_SCAN_FLAG_FLUSH) { + /* flush entries from previous scans */ + spin_lock_bh(&rdev->bss_lock); + __cfg80211_bss_expire(rdev, req->scan_start); + spin_unlock_bh(&rdev->bss_lock); + req->scan_start = jiffies; + } + nl80211_send_sched_scan(req, + NL80211_CMD_SCHED_SCAN_RESULTS); + } + } rtnl_unlock(); } -void cfg80211_sched_scan_results(struct wiphy *wiphy) +void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid) { - trace_cfg80211_sched_scan_results(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct cfg80211_sched_scan_request *request; + + trace_cfg80211_sched_scan_results(wiphy, reqid); /* ignore if we're not scanning */ - if (rcu_access_pointer(wiphy_to_rdev(wiphy)->sched_scan_req)) - queue_work(cfg80211_wq, - &wiphy_to_rdev(wiphy)->sched_scan_results_wk); + rtnl_lock(); + request = cfg80211_find_sched_scan_req(rdev, reqid); + if (request) { + request->report_results = true; + queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); + } + rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_results); -void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy) +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_RTNL(); - trace_cfg80211_sched_scan_stopped(wiphy); + trace_cfg80211_sched_scan_stopped(wiphy, reqid); - __cfg80211_stop_sched_scan(rdev, true); + __cfg80211_stop_sched_scan(rdev, reqid, true); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl); -void cfg80211_sched_scan_stopped(struct wiphy *wiphy) +void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid) { rtnl_lock(); - cfg80211_sched_scan_stopped_rtnl(wiphy); + cfg80211_sched_scan_stopped_rtnl(wiphy, reqid); rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); -int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, - bool driver_initiated) +int cfg80211_stop_sched_scan_req(struct cfg80211_registered_device *rdev, + struct cfg80211_sched_scan_request *req, + bool driver_initiated) { - struct cfg80211_sched_scan_request *sched_scan_req; - struct net_device *dev; - ASSERT_RTNL(); - if (!rdev->sched_scan_req) - return -ENOENT; - - sched_scan_req = rtnl_dereference(rdev->sched_scan_req); - dev = sched_scan_req->dev; - if (!driver_initiated) { - int err = rdev_sched_scan_stop(rdev, dev); + int err = rdev_sched_scan_stop(rdev, req->dev, req->reqid); if (err) return err; } - nl80211_send_sched_scan(rdev, dev, NL80211_CMD_SCHED_SCAN_STOPPED); + nl80211_send_sched_scan(req, NL80211_CMD_SCHED_SCAN_STOPPED); - RCU_INIT_POINTER(rdev->sched_scan_req, NULL); - kfree_rcu(sched_scan_req, rcu_head); + cfg80211_del_sched_scan_req(rdev, req); return 0; } +int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, + u64 reqid, bool driver_initiated) +{ + struct cfg80211_sched_scan_request *sched_scan_req; + + ASSERT_RTNL(); + + sched_scan_req = cfg80211_find_sched_scan_req(rdev, reqid); + if (!sched_scan_req) + return -ENOENT; + + return cfg80211_stop_sched_scan_req(rdev, sched_scan_req, + driver_initiated); +} + void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs) { diff --git a/net/wireless/sme.c b/net/wireless/sme.c index b347e63d7aaa6814f524d3b080a005a88966d65d..532a0007ce82a7380536e32d5be3749e58bb4fc7 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -5,6 +5,7 @@ * * Copyright 2009 Johannes Berg * Copyright (C) 2009 Intel Corporation. All rights reserved. + * Copyright 2017 Intel Deutschland GmbH */ #include @@ -253,10 +254,13 @@ void cfg80211_conn_work(struct work_struct *work) } treason = NL80211_TIMEOUT_UNSPECIFIED; if (cfg80211_conn_do_work(wdev, &treason)) { - __cfg80211_connect_result( - wdev->netdev, bssid, - NULL, 0, NULL, 0, -1, false, NULL, - treason); + struct cfg80211_connect_resp_params cr; + + memset(&cr, 0, sizeof(cr)); + cr.status = -1; + cr.bssid = bssid; + cr.timeout_reason = treason; + __cfg80211_connect_result(wdev->netdev, &cr, false); } wdev_unlock(wdev); } @@ -359,10 +363,13 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { - __cfg80211_connect_result(wdev->netdev, mgmt->bssid, - NULL, 0, NULL, 0, - status_code, false, NULL, - NL80211_TIMEOUT_UNSPECIFIED); + struct cfg80211_connect_resp_params cr; + + memset(&cr, 0, sizeof(cr)); + cr.status = status_code; + cr.bssid = mgmt->bssid; + cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED; + __cfg80211_connect_result(wdev->netdev, &cr, false); } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); @@ -669,12 +676,9 @@ static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); */ /* This method must consume bss one way or another */ -void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - int status, bool wextev, - struct cfg80211_bss *bss, - enum nl80211_timeout_reason timeout_reason) +void __cfg80211_connect_result(struct net_device *dev, + struct cfg80211_connect_resp_params *cr, + bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *country_ie; @@ -686,48 +690,48 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) { - cfg80211_put_bss(wdev->wiphy, bss); + cfg80211_put_bss(wdev->wiphy, cr->bss); return; } - nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, - bssid, req_ie, req_ie_len, - resp_ie, resp_ie_len, - status, timeout_reason, GFP_KERNEL); + nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr, + GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (wextev) { - if (req_ie && status == WLAN_STATUS_SUCCESS) { + if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = req_ie_len; - wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie); + wrqu.data.length = cr->req_ie_len; + wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, + cr->req_ie); } - if (resp_ie && status == WLAN_STATUS_SUCCESS) { + if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = resp_ie_len; - wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie); + wrqu.data.length = cr->resp_ie_len; + wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, + cr->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; - if (bssid && status == WLAN_STATUS_SUCCESS) { - memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); - memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); + if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) { + memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; } wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } #endif - if (!bss && (status == WLAN_STATUS_SUCCESS)) { + if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) { WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect); - bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, - wdev->ssid, wdev->ssid_len, - wdev->conn_bss_type, - IEEE80211_PRIVACY_ANY); - if (bss) - cfg80211_hold_bss(bss_from_pub(bss)); + cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid, + wdev->ssid, wdev->ssid_len, + wdev->conn_bss_type, + IEEE80211_PRIVACY_ANY); + if (cr->bss) + cfg80211_hold_bss(bss_from_pub(cr->bss)); } if (wdev->current_bss) { @@ -736,29 +740,29 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, wdev->current_bss = NULL; } - if (status != WLAN_STATUS_SUCCESS) { + if (cr->status != WLAN_STATUS_SUCCESS) { kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; wdev->conn_owner_nlportid = 0; - if (bss) { - cfg80211_unhold_bss(bss_from_pub(bss)); - cfg80211_put_bss(wdev->wiphy, bss); + if (cr->bss) { + cfg80211_unhold_bss(bss_from_pub(cr->bss)); + cfg80211_put_bss(wdev->wiphy, cr->bss); } cfg80211_sme_free(wdev); return; } - if (WARN_ON(!bss)) + if (WARN_ON(!cr->bss)) return; - wdev->current_bss = bss_from_pub(bss); + wdev->current_bss = bss_from_pub(cr->bss); if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP)) cfg80211_upload_connect_keys(wdev); rcu_read_lock(); - country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); + country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY); if (!country_ie) { rcu_read_unlock(); return; @@ -775,70 +779,99 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, * - country_ie + 2, the start of the country ie data, and * - and country_ie[1] which is the IE length */ - regulatory_hint_country_ie(wdev->wiphy, bss->channel->band, + regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band, country_ie + 2, country_ie[1]); kfree(country_ie); } /* Consumes bss object one way or another */ -void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, - struct cfg80211_bss *bss, const u8 *req_ie, - size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, int status, gfp_t gfp, - enum nl80211_timeout_reason timeout_reason) +void cfg80211_connect_done(struct net_device *dev, + struct cfg80211_connect_resp_params *params, + gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; + u8 *next; - if (bss) { + if (params->bss) { /* Make sure the bss entry provided by the driver is valid. */ - struct cfg80211_internal_bss *ibss = bss_from_pub(bss); + struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss); if (WARN_ON(list_empty(&ibss->list))) { - cfg80211_put_bss(wdev->wiphy, bss); + cfg80211_put_bss(wdev->wiphy, params->bss); return; } } - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); + ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) + + params->req_ie_len + params->resp_ie_len + + params->fils_kek_len + params->pmk_len + + (params->pmkid ? WLAN_PMKID_LEN : 0), gfp); if (!ev) { - cfg80211_put_bss(wdev->wiphy, bss); + cfg80211_put_bss(wdev->wiphy, params->bss); return; } ev->type = EVENT_CONNECT_RESULT; - if (bssid) - memcpy(ev->cr.bssid, bssid, ETH_ALEN); - if (req_ie_len) { - ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); - ev->cr.req_ie_len = req_ie_len; - memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); + next = ((u8 *)ev) + sizeof(*ev); + if (params->bssid) { + ev->cr.bssid = next; + memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN); + next += ETH_ALEN; } - if (resp_ie_len) { - ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; - ev->cr.resp_ie_len = resp_ie_len; - memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); + if (params->req_ie_len) { + ev->cr.req_ie = next; + ev->cr.req_ie_len = params->req_ie_len; + memcpy((void *)ev->cr.req_ie, params->req_ie, + params->req_ie_len); + next += params->req_ie_len; } - if (bss) - cfg80211_hold_bss(bss_from_pub(bss)); - ev->cr.bss = bss; - ev->cr.status = status; - ev->cr.timeout_reason = timeout_reason; + if (params->resp_ie_len) { + ev->cr.resp_ie = next; + ev->cr.resp_ie_len = params->resp_ie_len; + memcpy((void *)ev->cr.resp_ie, params->resp_ie, + params->resp_ie_len); + next += params->resp_ie_len; + } + if (params->fils_kek_len) { + ev->cr.fils_kek = next; + ev->cr.fils_kek_len = params->fils_kek_len; + memcpy((void *)ev->cr.fils_kek, params->fils_kek, + params->fils_kek_len); + next += params->fils_kek_len; + } + if (params->pmk_len) { + ev->cr.pmk = next; + ev->cr.pmk_len = params->pmk_len; + memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len); + next += params->pmk_len; + } + if (params->pmkid) { + ev->cr.pmkid = next; + memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN); + next += WLAN_PMKID_LEN; + } + ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num; + if (params->update_erp_next_seq_num) + ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num; + if (params->bss) + cfg80211_hold_bss(bss_from_pub(params->bss)); + ev->cr.bss = params->bss; + ev->cr.status = params->status; + ev->cr.timeout_reason = params->timeout_reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } -EXPORT_SYMBOL(cfg80211_connect_bss); +EXPORT_SYMBOL(cfg80211_connect_done); /* Consumes bss object one way or another */ void __cfg80211_roamed(struct wireless_dev *wdev, - struct cfg80211_bss *bss, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len) + struct cfg80211_roam_info *info) { #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; @@ -856,97 +889,84 @@ void __cfg80211_roamed(struct wireless_dev *wdev, cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; - cfg80211_hold_bss(bss_from_pub(bss)); - wdev->current_bss = bss_from_pub(bss); + if (WARN_ON(!info->bss)) + return; + + cfg80211_hold_bss(bss_from_pub(info->bss)); + wdev->current_bss = bss_from_pub(info->bss); nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy), - wdev->netdev, bss->bssid, - req_ie, req_ie_len, resp_ie, resp_ie_len, - GFP_KERNEL); + wdev->netdev, info, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT - if (req_ie) { + if (info->req_ie) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = req_ie_len; + wrqu.data.length = info->req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, - &wrqu, req_ie); + &wrqu, info->req_ie); } - if (resp_ie) { + if (info->resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = resp_ie_len; + wrqu.data.length = info->resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, - &wrqu, resp_ie); + &wrqu, info->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); - memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN); + memcpy(wrqu.ap_addr.sa_data, info->bss->bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, info->bss->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif return; out: - cfg80211_put_bss(wdev->wiphy, bss); -} - -void cfg80211_roamed(struct net_device *dev, - struct ieee80211_channel *channel, - const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_bss *bss; - - bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, - wdev->ssid_len, - wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); - if (WARN_ON(!bss)) - return; - - cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie, - resp_ie_len, gfp); + cfg80211_put_bss(wdev->wiphy, info->bss); } -EXPORT_SYMBOL(cfg80211_roamed); -/* Consumes bss object one way or another */ -void cfg80211_roamed_bss(struct net_device *dev, - struct cfg80211_bss *bss, const u8 *req_ie, - size_t req_ie_len, const u8 *resp_ie, - size_t resp_ie_len, gfp_t gfp) +/* Consumes info->bss object one way or another */ +void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, + gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; - if (WARN_ON(!bss)) + if (!info->bss) { + info->bss = cfg80211_get_bss(wdev->wiphy, info->channel, + info->bssid, wdev->ssid, + wdev->ssid_len, + wdev->conn_bss_type, + IEEE80211_PRIVACY_ANY); + } + + if (WARN_ON(!info->bss)) return; - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); + ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len, gfp); if (!ev) { - cfg80211_put_bss(wdev->wiphy, bss); + cfg80211_put_bss(wdev->wiphy, info->bss); return; } ev->type = EVENT_ROAMED; ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); - ev->rm.req_ie_len = req_ie_len; - memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); - ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; - ev->rm.resp_ie_len = resp_ie_len; - memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); - ev->rm.bss = bss; + ev->rm.req_ie_len = info->req_ie_len; + memcpy((void *)ev->rm.req_ie, info->req_ie, info->req_ie_len); + ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + info->req_ie_len; + ev->rm.resp_ie_len = info->resp_ie_len; + memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len); + ev->rm.bss = info->bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } -EXPORT_SYMBOL(cfg80211_roamed_bss); +EXPORT_SYMBOL(cfg80211_roamed); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 776e80cef9b4ee2761681f3884427a8343e6050a..ca8b2059f92c43c5d4d3bac24f39fd141cd31141 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -576,11 +576,6 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap, TP_ARGS(wiphy, netdev) ); -DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), - TP_ARGS(wiphy, netdev) -); - DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) @@ -1322,6 +1317,28 @@ TRACE_EVENT(rdev_set_cqm_rssi_config, __entry->rssi_thold, __entry->rssi_hyst) ); +TRACE_EVENT(rdev_set_cqm_rssi_range_config, + TP_PROTO(struct wiphy *wiphy, + struct net_device *netdev, s32 low, s32 high), + TP_ARGS(wiphy, netdev, low, high), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __field(s32, rssi_low) + __field(s32, rssi_high) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + __entry->rssi_low = low; + __entry->rssi_high = high; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT + ", range: %d - %d ", + WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->rssi_low, __entry->rssi_high) +); + TRACE_EVENT(rdev_set_cqm_txe_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate, u32 pkts, u32 intvl), @@ -1588,20 +1605,31 @@ DEFINE_EVENT(tx_rx_evt, rdev_set_antenna, TP_ARGS(wiphy, rx, tx) ); -TRACE_EVENT(rdev_sched_scan_start, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_sched_scan_request *request), - TP_ARGS(wiphy, netdev, request), +DECLARE_EVENT_CLASS(wiphy_netdev_id_evt, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY + __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; + __entry->id = id; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, - WIPHY_PR_ARG, NETDEV_PR_ARG) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", id: %llu", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->id) +); + +DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_start, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id) +); + +DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_stop, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), + TP_ARGS(wiphy, netdev, id) ); TRACE_EVENT(rdev_tdls_mgmt, @@ -2792,14 +2820,28 @@ TRACE_EVENT(cfg80211_scan_done, MAC_PR_ARG(tsf_bssid)) ); -DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy) +DECLARE_EVENT_CLASS(wiphy_id_evt, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(u64, id) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->id = id; + ), + TP_printk(WIPHY_PR_FMT ", id: %llu", WIPHY_PR_ARG, __entry->id) ); -DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped, - TP_PROTO(struct wiphy *wiphy), - TP_ARGS(wiphy) +DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_stopped, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id) +); + +DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_results, + TP_PROTO(struct wiphy *wiphy, u64 id), + TP_ARGS(wiphy, id) ); TRACE_EVENT(cfg80211_get_bss, diff --git a/net/wireless/util.c b/net/wireless/util.c index 68e5f2ecee1aa22f17ab9a55eb566124e585740b..7198373e29206a0f19f006578596fea78e82f66e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -659,7 +659,7 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame, int offset, int len) { struct skb_shared_info *sh = skb_shinfo(skb); - const skb_frag_t *frag = &sh->frags[-1]; + const skb_frag_t *frag = &sh->frags[0]; struct page *frag_page; void *frag_ptr; int frag_len, frag_size; @@ -672,10 +672,10 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame, while (offset >= frag_size) { offset -= frag_size; - frag++; frag_page = skb_frag_page(frag); frag_ptr = skb_frag_address(frag); frag_size = skb_frag_size(frag); + frag++; } frag_ptr += offset; @@ -687,12 +687,12 @@ __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame, len -= cur_len; while (len > 0) { - frag++; frag_len = skb_frag_size(frag); cur_len = min(len, frag_len); __frame_add_frag(frame, skb_frag_page(frag), skb_frag_address(frag), cur_len, frag_len); len -= cur_len; + frag++; } } @@ -914,11 +914,11 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) netdev_err(dev, "failed to set key %d\n", i); continue; } - if (wdev->connect_keys->def == i) - if (rdev_set_default_key(rdev, dev, i, true, true)) { - netdev_err(dev, "failed to set defkey %d\n", i); - continue; - } + if (wdev->connect_keys->def == i && + rdev_set_default_key(rdev, dev, i, true, true)) { + netdev_err(dev, "failed to set defkey %d\n", i); + continue; + } } kzfree(wdev->connect_keys); @@ -929,7 +929,6 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; - const u8 *bssid = NULL; spin_lock_irqsave(&wdev->event_lock, flags); while (!list_empty(&wdev->event_list)) { @@ -941,20 +940,13 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) wdev_lock(wdev); switch (ev->type) { case EVENT_CONNECT_RESULT: - if (!is_zero_ether_addr(ev->cr.bssid)) - bssid = ev->cr.bssid; __cfg80211_connect_result( - wdev->netdev, bssid, - ev->cr.req_ie, ev->cr.req_ie_len, - ev->cr.resp_ie, ev->cr.resp_ie_len, - ev->cr.status, - ev->cr.status == WLAN_STATUS_SUCCESS, - ev->cr.bss, ev->cr.timeout_reason); + wdev->netdev, + &ev->cr, + ev->cr.status == WLAN_STATUS_SUCCESS); break; case EVENT_ROAMED: - __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie, - ev->rm.req_ie_len, ev->rm.resp_ie, - ev->rm.resp_ie_len); + __cfg80211_roamed(wdev, &ev->rm); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, @@ -991,7 +983,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, - u32 *flags, struct vif_params *params) + struct vif_params *params) { int err; enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; @@ -1049,7 +1041,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, cfg80211_process_rdev_events(rdev); } - err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params); + err = rdev_change_virtual_intf(rdev, dev, ntype, params); WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); @@ -1097,6 +1089,35 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, return err; } +static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate) +{ + int modulation, streams, bitrate; + + /* the formula below does only work for MCS values smaller than 32 */ + if (WARN_ON_ONCE(rate->mcs >= 32)) + return 0; + + modulation = rate->mcs & 7; + streams = (rate->mcs >> 3) + 1; + + bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000; + + if (modulation < 4) + bitrate *= (modulation + 1); + else if (modulation == 4) + bitrate *= (modulation + 2); + else + bitrate *= (modulation + 3); + + bitrate *= streams; + + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) + bitrate = (bitrate / 9) * 10; + + /* do NOT round down here */ + return (bitrate + 50000) / 100000; +} + static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { @@ -1230,39 +1251,14 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) u32 cfg80211_calculate_bitrate(struct rate_info *rate) { - int modulation, streams, bitrate; - - if (!(rate->flags & RATE_INFO_FLAGS_MCS) && - !(rate->flags & RATE_INFO_FLAGS_VHT_MCS)) - return rate->legacy; + if (rate->flags & RATE_INFO_FLAGS_MCS) + return cfg80211_calculate_bitrate_ht(rate); if (rate->flags & RATE_INFO_FLAGS_60G) return cfg80211_calculate_bitrate_60g(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); - /* the formula below does only work for MCS values smaller than 32 */ - if (WARN_ON_ONCE(rate->mcs >= 32)) - return 0; - - modulation = rate->mcs & 7; - streams = (rate->mcs >> 3) + 1; - - bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000; - - if (modulation < 4) - bitrate *= (modulation + 1); - else if (modulation == 4) - bitrate *= (modulation + 2); - else - bitrate *= (modulation + 3); - - bitrate *= streams; - - if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) - bitrate = (bitrate / 9) * 10; - - /* do NOT round down here */ - return (bitrate + 50000) / 100000; + return rate->legacy; } EXPORT_SYMBOL(cfg80211_calculate_bitrate); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a220156cf2175a459727fcf67e1291036dca21ae..5d4a02c7979b0d7478ff2b39f82e24bed97a6e78 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -62,7 +62,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, memset(&vifparams, 0, sizeof(vifparams)); - return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); + return cfg80211_change_iface(rdev, dev, type, &vifparams); } EXPORT_WEXT_HANDLER(cfg80211_wext_siwmode); diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index c0e961983f17a077bde9bab18d946ecef0cba0ae..abf81b329dc1f3276e1cf5631ebae4ec31a229de 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ xfrm_input.o xfrm_output.o \ xfrm_sysctl.o xfrm_replay.o +obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c new file mode 100644 index 0000000000000000000000000000000000000000..8ec8a3fcf8d4740670b16c2c1af099ef0beaa5c5 --- /dev/null +++ b/net/xfrm/xfrm_device.c @@ -0,0 +1,208 @@ +/* + * xfrm_device.c - IPsec device offloading code. + * + * Copyright (c) 2015 secunet Security Networks AG + * + * Author: + * Steffen Klassert + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) +{ + int err; + struct xfrm_state *x; + struct xfrm_offload *xo = xfrm_offload(skb); + + if (skb_is_gso(skb)) + return 0; + + if (xo) { + x = skb->sp->xvec[skb->sp->len - 1]; + if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) + return 0; + + x->outer_mode->xmit(x, skb); + + err = x->type_offload->xmit(x, skb, features); + if (err) { + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); + return err; + } + + skb_push(skb, skb->data - skb_mac_header(skb)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(validate_xmit_xfrm); + +int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, + struct xfrm_user_offload *xuo) +{ + int err; + struct dst_entry *dst; + struct net_device *dev; + struct xfrm_state_offload *xso = &x->xso; + xfrm_address_t *saddr; + xfrm_address_t *daddr; + + if (!x->type_offload) + return 0; + + /* We don't yet support UDP encapsulation, TFC padding and ESN. */ + if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN)) + return 0; + + dev = dev_get_by_index(net, xuo->ifindex); + if (!dev) { + if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { + saddr = &x->props.saddr; + daddr = &x->id.daddr; + } else { + saddr = &x->id.daddr; + daddr = &x->props.saddr; + } + + dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, x->props.family); + if (IS_ERR(dst)) + return 0; + + dev = dst->dev; + + dev_hold(dev); + dst_release(dst); + } + + if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { + dev_put(dev); + return 0; + } + + xso->dev = dev; + xso->num_exthdrs = 1; + xso->flags = xuo->flags; + + err = dev->xfrmdev_ops->xdo_dev_state_add(x); + if (err) { + dev_put(dev); + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xfrm_dev_state_add); + +bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + int mtu; + struct dst_entry *dst = skb_dst(skb); + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + struct net_device *dev = x->xso.dev; + + if (!x->type_offload || x->encap) + return false; + + if ((x->xso.offload_handle && (dev == dst->path->dev)) && + !dst->child->xfrm && x->type->get_mtu) { + mtu = x->type->get_mtu(x, xdst->child_mtu_cached); + + if (skb->len <= mtu) + goto ok; + + if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) + goto ok; + } + + return false; + +ok: + if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) + return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); + + return true; +} +EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); + +int xfrm_dev_register(struct net_device *dev) +{ + if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) + return NOTIFY_BAD; + if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && + !(dev->features & NETIF_F_HW_ESP)) + return NOTIFY_BAD; + + return NOTIFY_DONE; +} + +static int xfrm_dev_unregister(struct net_device *dev) +{ + return NOTIFY_DONE; +} + +static int xfrm_dev_feat_change(struct net_device *dev) +{ + if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) + return NOTIFY_BAD; + else if (!(dev->features & NETIF_F_HW_ESP)) + dev->xfrmdev_ops = NULL; + + if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && + !(dev->features & NETIF_F_HW_ESP)) + return NOTIFY_BAD; + + return NOTIFY_DONE; +} + +static int xfrm_dev_down(struct net_device *dev) +{ + if (dev->hw_features & NETIF_F_HW_ESP) + xfrm_dev_state_flush(dev_net(dev), dev, true); + + xfrm_garbage_collect(dev_net(dev)); + + return NOTIFY_DONE; +} + +static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_REGISTER: + return xfrm_dev_register(dev); + + case NETDEV_UNREGISTER: + return xfrm_dev_unregister(dev); + + case NETDEV_FEAT_CHANGE: + return xfrm_dev_feat_change(dev); + + case NETDEV_DOWN: + return xfrm_dev_down(dev); + } + return NOTIFY_DONE; +} + +static struct notifier_block xfrm_dev_notifier = { + .notifier_call = xfrm_dev_event, +}; + +void __net_init xfrm_dev_init(void) +{ + register_netdevice_notifier(&xfrm_dev_notifier); +} diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 666c5ffe929dca388b218d99c2d71946a231872f..eaea9c4fb3b0e6ec8903cfc5456519e8d4e7dcd1 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -54,8 +54,8 @@ static inline unsigned int __xfrm4_dpref_spref_hash(const xfrm_address_t *daddr, static inline unsigned int __xfrm6_pref_hash(const xfrm_address_t *addr, __u8 prefixlen) { - int pdw; - int pbi; + unsigned int pdw; + unsigned int pbi; u32 initval = 0; pdw = prefixlen >> 5; /* num of whole u32 in prefix */ diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index e23570b647ae721516997130d7abebeaf3f8bb03..9de4b1dbc0aea3f768a8d98eeab456c0c8e0fa61 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -107,6 +107,8 @@ struct sec_path *secpath_dup(struct sec_path *src) sp->len = 0; sp->olen = 0; + memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH])); + if (src) { int i; @@ -207,8 +209,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) unsigned int family; int decaps = 0; int async = 0; - struct xfrm_offload *xo; bool xfrm_gro = false; + bool crypto_done = false; + struct xfrm_offload *xo = xfrm_offload(skb); if (encap_type < 0) { x = xfrm_input_state(skb); @@ -220,9 +223,40 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) seq = XFRM_SKB_CB(skb)->seq.input.low; goto resume; } + /* encap_type < -1 indicates a GRO call. */ encap_type = 0; seq = XFRM_SPI_SKB_CB(skb)->seq; + + if (xo && (xo->flags & CRYPTO_DONE)) { + crypto_done = true; + x = xfrm_input_state(skb); + family = XFRM_SPI_SKB_CB(skb)->family; + + if (!(xo->status & CRYPTO_SUCCESS)) { + if (xo->status & + (CRYPTO_TRANSPORT_AH_AUTH_FAILED | + CRYPTO_TRANSPORT_ESP_AUTH_FAILED | + CRYPTO_TUNNEL_AH_AUTH_FAILED | + CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { + + xfrm_audit_state_icvfail(x, skb, + x->type->proto); + x->stats.integrity_failed++; + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); + goto drop; + } + + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); + goto drop; + } + + if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); + goto drop; + } + } + goto lock; } @@ -311,7 +345,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb_dst_force(skb); dev_hold(skb->dev); - nexthdr = x->type->input(x, skb); + if (crypto_done) + nexthdr = x->type_offload->input_tail(x, skb); + else + nexthdr = x->type->input(x, skb); if (nexthdr == -EINPROGRESS) return 0; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 8ba29fe58352abcc887235d36d42a06797d94b28..8c0b6722aaa87c6d346d0ba57c2e541a6703c79d 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -99,12 +99,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err) skb_dst_force(skb); - /* Inner headers are invalid now. */ - skb->encapsulation = 0; - - err = x->type->output(x, skb); - if (err == -EINPROGRESS) - goto out; + if (xfrm_offload(skb)) { + x->type_offload->encap(x, skb); + } else { + err = x->type->output(x, skb); + if (err == -EINPROGRESS) + goto out; + } resume: if (err) { @@ -200,8 +201,40 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb int xfrm_output(struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); + struct xfrm_state *x = skb_dst(skb)->xfrm; int err; + secpath_reset(skb); + skb->encapsulation = 0; + + if (xfrm_dev_offload_ok(skb, x)) { + struct sec_path *sp; + + sp = secpath_dup(skb->sp); + if (!sp) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + kfree_skb(skb); + return -ENOMEM; + } + if (skb->sp) + secpath_put(skb->sp); + skb->sp = sp; + skb->encapsulation = 1; + + sp->olen++; + sp->xvec[skb->sp->len++] = x; + xfrm_state_hold(x); + + if (skb_is_gso(skb)) { + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; + + return xfrm_output2(net, sk, skb); + } + + if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) + goto out; + } + if (skb_is_gso(skb)) return xfrm_output_gso(net, sk, skb); @@ -214,6 +247,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) } } +out: return xfrm_output2(net, sk, skb); } EXPORT_SYMBOL_GPL(xfrm_output); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index dfc77b9c5e5a8dd2b31440be47fb4480513680e1..b00a1d5a7f52e54f2ac454da3eeb14447f84d8b2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -116,11 +116,10 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa return afinfo; } -static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, - int tos, int oif, - const xfrm_address_t *saddr, - const xfrm_address_t *daddr, - int family) +struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, + const xfrm_address_t *saddr, + const xfrm_address_t *daddr, + int family) { const struct xfrm_policy_afinfo *afinfo; struct dst_entry *dst; @@ -135,6 +134,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, return dst; } +EXPORT_SYMBOL(__xfrm_dst_lookup); static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, int oif, @@ -2933,21 +2933,6 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); -static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - switch (event) { - case NETDEV_DOWN: - xfrm_garbage_collect(dev_net(dev)); - } - return NOTIFY_DONE; -} - -static struct notifier_block xfrm_dev_notifier = { - .notifier_call = xfrm_dev_event, -}; - #ifdef CONFIG_XFRM_STATISTICS static int __net_init xfrm_statistics_init(struct net *net) { @@ -3024,7 +3009,7 @@ static int __net_init xfrm_policy_init(struct net *net) INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); if (net_eq(net, &init_net)) - register_netdevice_notifier(&xfrm_dev_notifier); + xfrm_dev_init(); return 0; out_bydst: diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index cdc2e2e71bffa2d871d051870f0daf27067815fb..8b23c5bcf8e88bcf2141dd75fac6eeec54a1b143 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -45,7 +45,8 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) return seq_hi; } - +EXPORT_SYMBOL(xfrm_replay_seqhi); +; static void xfrm_replay_notify(struct xfrm_state *x, int event) { struct km_event c; @@ -558,6 +559,158 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) x->repl->notify(x, XFRM_REPLAY_UPDATE); } +#ifdef CONFIG_XFRM_OFFLOAD +static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb) +{ + int err = 0; + struct net *net = xs_net(x); + struct xfrm_offload *xo = xfrm_offload(skb); + __u32 oseq = x->replay.oseq; + + if (!xo) + return xfrm_replay_overflow(x, skb); + + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { + if (!skb_is_gso(skb)) { + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; + xo->seq.low = oseq; + } else { + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; + xo->seq.low = oseq + 1; + oseq += skb_shinfo(skb)->gso_segs; + } + + XFRM_SKB_CB(skb)->seq.output.hi = 0; + xo->seq.hi = 0; + if (unlikely(oseq < x->replay.oseq)) { + xfrm_audit_state_replay_overflow(x, skb); + err = -EOVERFLOW; + + return err; + } + + x->replay.oseq = oseq; + + if (xfrm_aevent_is_on(net)) + x->repl->notify(x, XFRM_REPLAY_UPDATE); + } + + return err; +} + +static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb) +{ + int err = 0; + struct xfrm_offload *xo = xfrm_offload(skb); + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; + struct net *net = xs_net(x); + __u32 oseq = replay_esn->oseq; + + if (!xo) + return xfrm_replay_overflow_bmp(x, skb); + + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { + if (!skb_is_gso(skb)) { + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; + xo->seq.low = oseq; + } else { + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; + xo->seq.low = oseq + 1; + oseq += skb_shinfo(skb)->gso_segs; + } + + XFRM_SKB_CB(skb)->seq.output.hi = 0; + xo->seq.hi = 0; + if (unlikely(oseq < replay_esn->oseq)) { + xfrm_audit_state_replay_overflow(x, skb); + err = -EOVERFLOW; + + return err; + } else { + replay_esn->oseq = oseq; + } + + if (xfrm_aevent_is_on(net)) + x->repl->notify(x, XFRM_REPLAY_UPDATE); + } + + return err; +} + +static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb) +{ + int err = 0; + struct xfrm_offload *xo = xfrm_offload(skb); + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; + struct net *net = xs_net(x); + __u32 oseq = replay_esn->oseq; + __u32 oseq_hi = replay_esn->oseq_hi; + + if (!xo) + return xfrm_replay_overflow_esn(x, skb); + + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { + if (!skb_is_gso(skb)) { + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; + xo->seq.low = oseq; + xo->seq.hi = oseq_hi; + } else { + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; + xo->seq.low = oseq = oseq + 1; + xo->seq.hi = oseq_hi; + oseq += skb_shinfo(skb)->gso_segs; + } + + if (unlikely(oseq < replay_esn->oseq)) { + XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; + xo->seq.hi = oseq_hi; + + if (replay_esn->oseq_hi == 0) { + replay_esn->oseq--; + replay_esn->oseq_hi--; + xfrm_audit_state_replay_overflow(x, skb); + err = -EOVERFLOW; + + return err; + } + } + + replay_esn->oseq = oseq; + replay_esn->oseq_hi = oseq_hi; + + if (xfrm_aevent_is_on(net)) + x->repl->notify(x, XFRM_REPLAY_UPDATE); + } + + return err; +} + +static const struct xfrm_replay xfrm_replay_legacy = { + .advance = xfrm_replay_advance, + .check = xfrm_replay_check, + .recheck = xfrm_replay_check, + .notify = xfrm_replay_notify, + .overflow = xfrm_replay_overflow_offload, +}; + +static const struct xfrm_replay xfrm_replay_bmp = { + .advance = xfrm_replay_advance_bmp, + .check = xfrm_replay_check_bmp, + .recheck = xfrm_replay_check_bmp, + .notify = xfrm_replay_notify_bmp, + .overflow = xfrm_replay_overflow_offload_bmp, +}; + +static const struct xfrm_replay xfrm_replay_esn = { + .advance = xfrm_replay_advance_esn, + .check = xfrm_replay_check_esn, + .recheck = xfrm_replay_recheck_esn, + .notify = xfrm_replay_notify_esn, + .overflow = xfrm_replay_overflow_offload_esn, +}; +#else static const struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, @@ -581,6 +734,7 @@ static const struct xfrm_replay xfrm_replay_esn = { .notify = xfrm_replay_notify_esn, .overflow = xfrm_replay_overflow_esn, }; +#endif int xfrm_init_replay(struct xfrm_state *x) { @@ -595,10 +749,12 @@ int xfrm_init_replay(struct xfrm_state *x) if (replay_esn->replay_window == 0) return -EINVAL; x->repl = &xfrm_replay_esn; - } else + } else { x->repl = &xfrm_replay_bmp; - } else + } + } else { x->repl = &xfrm_replay_legacy; + } return 0; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5a597dbbe564f09ad6b77e1f529127a0410c7fd4..fc3c5aa387543a63bf18955e60309207bf06e961 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -251,6 +251,75 @@ static void xfrm_put_type(const struct xfrm_type *type) module_put(type->owner); } +static DEFINE_SPINLOCK(xfrm_type_offload_lock); +int xfrm_register_type_offload(const struct xfrm_type_offload *type, + unsigned short family) +{ + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); + const struct xfrm_type_offload **typemap; + int err = 0; + + if (unlikely(afinfo == NULL)) + return -EAFNOSUPPORT; + typemap = afinfo->type_offload_map; + spin_lock_bh(&xfrm_type_offload_lock); + + if (likely(typemap[type->proto] == NULL)) + typemap[type->proto] = type; + else + err = -EEXIST; + spin_unlock_bh(&xfrm_type_offload_lock); + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(xfrm_register_type_offload); + +int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, + unsigned short family) +{ + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); + const struct xfrm_type_offload **typemap; + int err = 0; + + if (unlikely(afinfo == NULL)) + return -EAFNOSUPPORT; + typemap = afinfo->type_offload_map; + spin_lock_bh(&xfrm_type_offload_lock); + + if (unlikely(typemap[type->proto] != type)) + err = -ENOENT; + else + typemap[type->proto] = NULL; + spin_unlock_bh(&xfrm_type_offload_lock); + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(xfrm_unregister_type_offload); + +static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family) +{ + struct xfrm_state_afinfo *afinfo; + const struct xfrm_type_offload **typemap; + const struct xfrm_type_offload *type; + + afinfo = xfrm_state_get_afinfo(family); + if (unlikely(afinfo == NULL)) + return NULL; + typemap = afinfo->type_offload_map; + + type = typemap[proto]; + if ((type && !try_module_get(type->owner))) + type = NULL; + + rcu_read_unlock(); + return type; +} + +static void xfrm_put_type_offload(const struct xfrm_type_offload *type) +{ + module_put(type->owner); +} + static DEFINE_SPINLOCK(xfrm_mode_lock); int xfrm_register_mode(struct xfrm_mode *mode, int family) { @@ -365,10 +434,13 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) xfrm_put_mode(x->inner_mode_iaf); if (x->outer_mode) xfrm_put_mode(x->outer_mode); + if (x->type_offload) + xfrm_put_type_offload(x->type_offload); if (x->type) { x->type->destructor(x); xfrm_put_type(x->type); } + xfrm_dev_state_free(x); security_xfrm_state_free(x); kfree(x); } @@ -538,6 +610,8 @@ int __xfrm_state_delete(struct xfrm_state *x) net->xfrm.state_num--; spin_unlock(&net->xfrm.xfrm_state_lock); + xfrm_dev_state_delete(x); + /* All xfrm_state objects are created by xfrm_state_alloc. * The xfrm_state_alloc call gives a reference, and that * is what we are dropping here. @@ -582,12 +656,41 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) return err; } + +static inline int +xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) +{ + int i, err = 0; + + for (i = 0; i <= net->xfrm.state_hmask; i++) { + struct xfrm_state *x; + struct xfrm_state_offload *xso; + + hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { + xso = &x->xso; + + if (xso->dev == dev && + (err = security_xfrm_state_delete(x)) != 0) { + xfrm_audit_state_delete(x, 0, task_valid); + return err; + } + } + } + + return err; +} #else static inline int xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) { return 0; } + +static inline int +xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) +{ + return 0; +} #endif int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) @@ -630,6 +733,48 @@ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) } EXPORT_SYMBOL(xfrm_state_flush); +int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) +{ + int i, err = 0, cnt = 0; + + spin_lock_bh(&net->xfrm.xfrm_state_lock); + err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid); + if (err) + goto out; + + err = -ESRCH; + for (i = 0; i <= net->xfrm.state_hmask; i++) { + struct xfrm_state *x; + struct xfrm_state_offload *xso; +restart: + hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { + xso = &x->xso; + + if (!xfrm_state_kern(x) && xso->dev == dev) { + xfrm_state_hold(x); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + + err = xfrm_state_delete(x); + xfrm_audit_state_delete(x, err ? 0 : 1, + task_valid); + xfrm_state_put(x); + if (!err) + cnt++; + + spin_lock_bh(&net->xfrm.xfrm_state_lock); + goto restart; + } + } + } + if (cnt) + err = 0; + +out: + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + return err; +} +EXPORT_SYMBOL(xfrm_dev_state_flush); + void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) { spin_lock_bh(&net->xfrm.xfrm_state_lock); @@ -2077,6 +2222,8 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) if (x->type == NULL) goto error; + x->type_offload = xfrm_get_type_offload(x->id.proto, family); + err = x->type->init_state(x); if (err) goto error; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7916af0aadd2fb453fb61b46485506dc0a3cb4ef..38614df33ec8d5751bd88a48d0068926b9032556 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -595,6 +595,13 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, goto error; } + if (attrs[XFRMA_OFFLOAD_DEV]) { + err = xfrm_dev_state_add(net, x, + nla_data(attrs[XFRMA_OFFLOAD_DEV])); + if (err) + goto error; + } + if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, attrs[XFRMA_REPLAY_ESN_VAL]))) goto error; @@ -779,6 +786,23 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) return 0; } +static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb) +{ + struct xfrm_user_offload *xuo; + struct nlattr *attr; + + attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); + if (attr == NULL) + return -EMSGSIZE; + + xuo = nla_data(attr); + + xuo->ifindex = xso->dev->ifindex; + xuo->flags = xso->flags; + + return 0; +} + static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) { struct xfrm_algo *algo; @@ -869,6 +893,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x, &x->replay); if (ret) goto out; + if(x->xso.dev) + ret = copy_user_offload(&x->xso, skb); + if (ret) + goto out; if (x->security) ret = copy_sec_ctx(x->security, skb); out: @@ -932,8 +960,8 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) u8 proto = 0; int err; - err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, - xfrma_policy); + err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy, + NULL); if (err < 0) return err; @@ -2406,6 +2434,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, [XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, + [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, }; static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { @@ -2448,7 +2477,8 @@ static const struct xfrm_link { [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, }; -static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *attrs[XFRMA_MAX+1]; @@ -2488,7 +2518,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, link->nla_max ? : XFRMA_MAX, - link->nla_pol ? : xfrma_policy); + link->nla_pol ? : xfrma_policy, extack); if (err < 0) return err; @@ -2622,6 +2652,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(*x->coaddr)); if (x->props.extra_flags) l += nla_total_size(sizeof(x->props.extra_flags)); + if (x->xso.dev) + l += nla_total_size(sizeof(x->xso)); /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); @@ -3108,7 +3140,6 @@ static bool xfrm_is_alive(const struct km_event *c) } static struct xfrm_mgr netlink_mgr = { - .id = "netlink", .notify = xfrm_send_state_notify, .acquire = xfrm_send_acquire, .compile_policy = xfrm_compile_policy, diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 09e9d535bd7487d81574cf8572a41b6e697566fd..6c7468eb3684494b46a248fcef39a1e451fd6856 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -34,6 +34,8 @@ hostprogs-y += sampleip hostprogs-y += tc_l2_redirect hostprogs-y += lwt_len_hist hostprogs-y += xdp_tx_iptunnel +hostprogs-y += test_map_in_map +hostprogs-y += per_socket_stats_example # Libbpf dependencies LIBBPF := ../../tools/lib/bpf/bpf.o @@ -72,6 +74,8 @@ sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o tc_l2_redirect-objs := bpf_load.o $(LIBBPF) tc_l2_redirect_user.o lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o +test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o +per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -105,6 +109,8 @@ always += trace_event_kern.o always += sampleip_kern.o always += lwt_len_hist_kern.o always += xdp_tx_iptunnel_kern.o +always += test_map_in_map_kern.o +always += cookie_uid_helper_example.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -139,6 +145,7 @@ HOSTLOADLIBES_sampleip += -lelf HOSTLOADLIBES_tc_l2_redirect += -l elf HOSTLOADLIBES_lwt_len_hist += -l elf HOSTLOADLIBES_xdp_tx_iptunnel += -lelf +HOSTLOADLIBES_test_map_in_map += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang @@ -182,4 +189,5 @@ $(obj)/%.o: $(src)/%.c -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ + -Wno-unknown-warning-option \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index faaffe2e139a989de6f90835121a725091d4a289..9a9c95f2c9fb3a7de27eb70659429418ada04301 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -80,6 +80,7 @@ struct bpf_map_def { unsigned int value_size; unsigned int max_entries; unsigned int map_flags; + unsigned int inner_map_idx; }; static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = @@ -145,11 +146,30 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) = #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->nip) +#elif defined(__sparc__) + +#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) +#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2]) +#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3]) +#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4]) +#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) +#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) +#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) +#if defined(__arch64__) +#define PT_REGS_IP(x) ((x)->tpc) +#else +#define PT_REGS_IP(x) ((x)->pc) +#endif + #endif #ifdef __powerpc__ #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#elif defined(__sparc__) +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP #else #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index b86ee54da2d14d6ba0de18481d2c55ed1a70a67b..4221dc359453f059b1a765db3279063cf83ff3ab 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include "libbpf.h" #include "bpf_load.h" #include "perf-sys.h" @@ -37,14 +39,6 @@ int event_fd[MAX_PROGS]; int prog_cnt; int prog_array_fd = -1; -struct bpf_map_def { - unsigned int type; - unsigned int key_size; - unsigned int value_size; - unsigned int max_entries; - unsigned int map_flags; -}; - static int populate_prog_array(const char *event, int prog_fd) { int ind = atoi(event), err; @@ -192,17 +186,35 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) return 0; } -static int load_maps(struct bpf_map_def *maps, int len) +static int load_maps(struct bpf_map_def *maps, int nr_maps, + const char **map_names, fixup_map_cb fixup_map) { int i; - - for (i = 0; i < len / sizeof(struct bpf_map_def); i++) { - - map_fd[i] = bpf_create_map(maps[i].type, - maps[i].key_size, - maps[i].value_size, - maps[i].max_entries, - maps[i].map_flags); + /* + * Warning: Using "maps" pointing to ELF data_maps->d_buf as + * an array of struct bpf_map_def is a wrong assumption about + * the ELF maps section format. + */ + for (i = 0; i < nr_maps; i++) { + if (fixup_map) + fixup_map(&maps[i], map_names[i], i); + + if (maps[i].type == BPF_MAP_TYPE_ARRAY_OF_MAPS || + maps[i].type == BPF_MAP_TYPE_HASH_OF_MAPS) { + int inner_map_fd = map_fd[maps[i].inner_map_idx]; + + map_fd[i] = bpf_create_map_in_map(maps[i].type, + maps[i].key_size, + inner_map_fd, + maps[i].max_entries, + maps[i].map_flags); + } else { + map_fd[i] = bpf_create_map(maps[i].type, + maps[i].key_size, + maps[i].value_size, + maps[i].max_entries, + maps[i].map_flags); + } if (map_fd[i] < 0) { printf("failed to create a map: %d %s\n", errno, strerror(errno)); @@ -262,20 +274,74 @@ static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols, return 1; } insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; + /* + * Warning: Using sizeof(struct bpf_map_def) here is a + * wrong assumption about ELF maps section format + */ insn[insn_idx].imm = map_fd[sym.st_value / sizeof(struct bpf_map_def)]; } return 0; } -int load_bpf_file(char *path) +static int cmp_symbols(const void *l, const void *r) { - int fd, i; + const GElf_Sym *lsym = (const GElf_Sym *)l; + const GElf_Sym *rsym = (const GElf_Sym *)r; + + if (lsym->st_value < rsym->st_value) + return -1; + else if (lsym->st_value > rsym->st_value) + return 1; + else + return 0; +} + +static int get_sorted_map_names(Elf *elf, Elf_Data *symbols, int maps_shndx, + int strtabidx, char **map_names) +{ + GElf_Sym map_symbols[MAX_MAPS]; + int i, nr_maps = 0; + + for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) { + assert(nr_maps < MAX_MAPS); + if (!gelf_getsym(symbols, i, &map_symbols[nr_maps])) + continue; + if (map_symbols[nr_maps].st_shndx != maps_shndx) + continue; + nr_maps++; + } + + qsort(map_symbols, nr_maps, sizeof(GElf_Sym), cmp_symbols); + + for (i = 0; i < nr_maps; i++) { + char *map_name; + + map_name = elf_strptr(elf, strtabidx, map_symbols[i].st_name); + if (!map_name) { + printf("cannot get map symbol\n"); + return -1; + } + + map_names[i] = strdup(map_name); + if (!map_names[i]) { + printf("strdup(%s): %s(%d)\n", map_name, + strerror(errno), errno); + return -1; + } + } + + return nr_maps; +} + +static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) +{ + int fd, i, ret, maps_shndx = -1, strtabidx = -1; Elf *elf; GElf_Ehdr ehdr; GElf_Shdr shdr, shdr_prog; - Elf_Data *data, *data_prog, *symbols = NULL; - char *shname, *shname_prog; + Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL; + char *shname, *shname_prog, *map_names[MAX_MAPS] = { NULL }; /* reset global variables */ kern_version = 0; @@ -323,14 +389,47 @@ int load_bpf_file(char *path) } memcpy(&kern_version, data->d_buf, sizeof(int)); } else if (strcmp(shname, "maps") == 0) { - processed_sec[i] = true; - if (load_maps(data->d_buf, data->d_size)) - return 1; + maps_shndx = i; + data_maps = data; } else if (shdr.sh_type == SHT_SYMTAB) { + strtabidx = shdr.sh_link; symbols = data; } } + ret = 1; + + if (!symbols) { + printf("missing SHT_SYMTAB section\n"); + goto done; + } + + if (data_maps) { + int nr_maps; + int prog_elf_map_sz; + + nr_maps = get_sorted_map_names(elf, symbols, maps_shndx, + strtabidx, map_names); + if (nr_maps < 0) + goto done; + + /* Deduce map struct size stored in ELF maps section */ + prog_elf_map_sz = data_maps->d_size / nr_maps; + if (prog_elf_map_sz != sizeof(struct bpf_map_def)) { + printf("Error: ELF maps sec wrong size (%d/%lu)," + " old kern.o file?\n", + prog_elf_map_sz, sizeof(struct bpf_map_def)); + ret = 1; + goto done; + } + + if (load_maps(data_maps->d_buf, nr_maps, + (const char **)map_names, fixup_map)) + goto done; + + processed_sec[maps_shndx] = true; + } + /* load programs that need map fixup (relocations) */ for (i = 1; i < ehdr.e_shnum; i++) { if (processed_sec[i]) @@ -387,8 +486,22 @@ int load_bpf_file(char *path) load_and_attach(shname, data->d_buf, data->d_size); } + ret = 0; +done: + for (i = 0; i < MAX_MAPS; i++) + free(map_names[i]); close(fd); - return 0; + return ret; +} + +int load_bpf_file(char *path) +{ + return do_load_bpf_file(path, NULL); +} + +int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map) +{ + return do_load_bpf_file(path, fixup_map); } void read_trace_pipe(void) @@ -473,7 +586,7 @@ struct ksym *ksym_search(long key) return &syms[0]; } -int set_link_xdp_fd(int ifindex, int fd) +int set_link_xdp_fd(int ifindex, int fd, __u32 flags) { struct sockaddr_nl sa; int sock, seq = 0, len, ret = -1; @@ -509,15 +622,28 @@ int set_link_xdp_fd(int ifindex, int fd) req.nh.nlmsg_seq = ++seq; req.ifinfo.ifi_family = AF_UNSPEC; req.ifinfo.ifi_index = ifindex; + + /* started nested attribute for XDP */ nla = (struct nlattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len)); nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/; + nla->nla_len = NLA_HDRLEN; - nla_xdp = (struct nlattr *)((char *)nla + NLA_HDRLEN); + /* add XDP fd */ + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); nla_xdp->nla_type = 1/*IFLA_XDP_FD*/; nla_xdp->nla_len = NLA_HDRLEN + sizeof(int); memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd)); - nla->nla_len = NLA_HDRLEN + nla_xdp->nla_len; + nla->nla_len += nla_xdp->nla_len; + + /* if user passed in any flags, add those too */ + if (flags) { + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags); + memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags)); + nla->nla_len += nla_xdp->nla_len; + } req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h index c827827299b3b77831650e762376b061b6c93021..05822f83173af928fb13bfb046cb5b2b728c68d1 100644 --- a/samples/bpf/bpf_load.h +++ b/samples/bpf/bpf_load.h @@ -6,6 +6,18 @@ #define MAX_MAPS 32 #define MAX_PROGS 32 +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; + unsigned int inner_map_idx; +}; + +typedef void (*fixup_map_cb)(struct bpf_map_def *map, const char *map_name, + int idx); + extern int map_fd[MAX_MAPS]; extern int prog_fd[MAX_PROGS]; extern int event_fd[MAX_PROGS]; @@ -25,6 +37,7 @@ extern int prog_cnt; * returns zero on success */ int load_bpf_file(char *path); +int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map); void read_trace_pipe(void); struct ksym { @@ -34,5 +47,5 @@ struct ksym { int load_kallsyms(void); struct ksym *ksym_search(long key); -int set_link_xdp_fd(int ifindex, int fd); +int set_link_xdp_fd(int ifindex, int fd, __u32 flags); #endif diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c new file mode 100644 index 0000000000000000000000000000000000000000..b08ab4e889293c30f64b717065e6860603063f7c --- /dev/null +++ b/samples/bpf/cookie_uid_helper_example.c @@ -0,0 +1,321 @@ +/* This test is a demo of using get_socket_uid and get_socket_cookie + * helper function to do per socket based network traffic monitoring. + * It requires iptables version higher then 1.6.1. to load pinned eBPF + * program into the xt_bpf match. + * + * TEST: + * ./run_cookie_uid_helper_example.sh -option + * option: + * -t: do traffic monitoring test, the program will continuously + * print out network traffic happens after program started A sample + * output is shown below: + * + * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058 + * cookie: 132, uid: 0x0, Pakcet Count: 2, Bytes Count: 286 + * cookie: 812, uid: 0x3e8, Pakcet Count: 3, Bytes Count: 1726 + * cookie: 802, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104 + * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058 + * cookie: 831, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104 + * cookie: 0, uid: 0x0, Pakcet Count: 6, Bytes Count: 712 + * cookie: 880, uid: 0xfffe, Pakcet Count: 1, Bytes Count: 70 + * + * -s: do getsockopt SO_COOKIE test, the program will set up a pair of + * UDP sockets and send packets between them. And read out the traffic data + * directly from the ebpf map based on the socket cookie. + * + * Clean up: if using shell script, the script file will delete the iptables + * rule and unmount the bpf program when exit. Else the iptables rule need + * to be deleted by hand, see run_cookie_uid_helper_example.sh for detail. + */ + +#define _GNU_SOURCE + +#define offsetof(type, member) __builtin_offsetof(type, member) +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" + +#define PORT 8888 + +struct stats { + uint32_t uid; + uint64_t packets; + uint64_t bytes; +}; + +static int map_fd, prog_fd; + +static bool test_finish; + +static void maps_create(void) +{ + map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(uint32_t), + sizeof(struct stats), 100, 0); + if (map_fd < 0) + error(1, errno, "map create failed!\n"); +} + +static void prog_load(void) +{ + static char log_buf[1 << 16]; + + struct bpf_insn prog[] = { + /* + * Save sk_buff for future usage. value stored in R6 to R10 will + * not be reset after a bpf helper function call. + */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + /* + * pc1: BPF_FUNC_get_socket_cookie takes one parameter, + * R1: sk_buff + */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_socket_cookie), + /* pc2-4: save &socketCookie to r7 for future usage*/ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), + /* + * pc5-8: set up the registers for BPF_FUNC_map_lookup_elem, + * it takes two parameters (R1: map_fd, R2: &socket_cookie) + */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + /* + * pc9. if r0 != 0x0, go to pc+14, since we have the cookie + * stored already + * Otherwise do pc10-22 to setup a new data entry. + */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 14), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_socket_uid), + /* + * Place a struct stats in the R10 stack and sequentially + * place the member value into the memory. Packets value + * is set by directly place a IMM value 1 into the stack. + */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, + -32 + (__s16)offsetof(struct stats, uid)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, + -32 + (__s16)offsetof(struct stats, packets), 1), + /* + * __sk_buff is a special struct used for eBPF program to + * directly access some sk_buff field. + */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, len)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, + -32 + (__s16)offsetof(struct stats, bytes)), + /* + * add new map entry using BPF_FUNC_map_update_elem, it takes + * 4 parameters (R1: map_fd, R2: &socket_cookie, R3: &stats, + * R4: flags) + */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_update_elem), + BPF_JMP_IMM(BPF_JA, 0, 0, 5), + /* + * pc24-30 update the packet info to a exist data entry, it can + * be done by directly write to pointers instead of using + * BPF_FUNC_map_update_elem helper function + */ + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1, + offsetof(struct stats, packets)), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, len)), + BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1, + offsetof(struct stats, bytes)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, + offsetof(struct __sk_buff, len)), + BPF_EXIT_INSN(), + }; + prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + ARRAY_SIZE(prog), "GPL", 0, + log_buf, sizeof(log_buf)); + if (prog_fd < 0) + error(1, errno, "failed to load prog\n%s\n", log_buf); +} + +static void prog_attach_iptables(char *file) +{ + int ret; + char rules[100]; + + if (bpf_obj_pin(prog_fd, file)) + error(1, errno, "bpf_obj_pin"); + if (strlen(file) > 50) { + printf("file path too long: %s\n", file); + exit(1); + } + sprintf(rules, "iptables -A OUTPUT -m bpf --object-pinned %s -j ACCEPT", + file); + ret = system(rules); + if (ret < 0) { + printf("iptables rule update failed: %d/n", WEXITSTATUS(ret)); + exit(1); + } +} + +static void print_table(void) +{ + struct stats curEntry; + uint32_t curN = UINT32_MAX; + uint32_t nextN; + int res; + + while (bpf_map_get_next_key(map_fd, &curN, &nextN) > -1) { + curN = nextN; + res = bpf_map_lookup_elem(map_fd, &curN, &curEntry); + if (res < 0) { + error(1, errno, "fail to get entry value of Key: %u\n", + curN); + } else { + printf("cookie: %u, uid: 0x%x, Packet Count: %lu," + " Bytes Count: %lu\n", curN, curEntry.uid, + curEntry.packets, curEntry.bytes); + } + } +} + +static void udp_client(void) +{ + struct sockaddr_in si_other = {0}; + struct sockaddr_in si_me = {0}; + struct stats dataEntry; + int s_rcv, s_send, i, recv_len; + char message = 'a'; + char buf; + uint64_t cookie; + int res; + socklen_t cookie_len = sizeof(cookie); + socklen_t slen = sizeof(si_other); + + s_rcv = socket(PF_INET, SOCK_DGRAM, 0); + if (s_rcv < 0) + error(1, errno, "rcv socket creat failed!\n"); + si_other.sin_family = AF_INET; + si_other.sin_port = htons(PORT); + if (inet_aton("127.0.0.1", &si_other.sin_addr) == 0) + error(1, errno, "inet_aton\n"); + if (bind(s_rcv, (struct sockaddr *)&si_other, sizeof(si_other)) == -1) + error(1, errno, "bind\n"); + s_send = socket(PF_INET, SOCK_DGRAM, 0); + if (s_send < 0) + error(1, errno, "send socket creat failed!\n"); + res = getsockopt(s_send, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len); + if (res < 0) + printf("get cookie failed: %s\n", strerror(errno)); + res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry); + if (res != -1) + error(1, errno, "socket stat found while flow not active\n"); + for (i = 0; i < 10; i++) { + res = sendto(s_send, &message, sizeof(message), 0, + (struct sockaddr *)&si_other, slen); + if (res == -1) + error(1, errno, "send\n"); + if (res != sizeof(message)) + error(1, 0, "%uB != %luB\n", res, sizeof(message)); + recv_len = recvfrom(s_rcv, &buf, sizeof(buf), 0, + (struct sockaddr *)&si_me, &slen); + if (recv_len < 0) + error(1, errno, "revieve\n"); + res = memcmp(&(si_other.sin_addr), &(si_me.sin_addr), + sizeof(si_me.sin_addr)); + if (res != 0) + error(1, EFAULT, "sender addr error: %d\n", res); + printf("Message received: %c\n", buf); + res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry); + if (res < 0) + error(1, errno, "lookup sk stat failed, cookie: %lu\n", + cookie); + printf("cookie: %lu, uid: 0x%x, Packet Count: %lu," + " Bytes Count: %lu\n\n", cookie, dataEntry.uid, + dataEntry.packets, dataEntry.bytes); + } + close(s_send); + close(s_rcv); +} + +static int usage(void) +{ + printf("Usage: ./run_cookie_uid_helper_example.sh" + " bpfObjName -option\n" + " -t traffic monitor test\n" + " -s getsockopt cookie test\n"); + return 1; +} + +static void finish(int ret) +{ + test_finish = true; +} + +int main(int argc, char *argv[]) +{ + int opt; + bool cfg_test_traffic = false; + bool cfg_test_cookie = false; + + if (argc != 3) + return usage(); + while ((opt = getopt(argc, argv, "ts")) != -1) { + switch (opt) { + case 't': + cfg_test_traffic = true; + break; + case 's': + cfg_test_cookie = true; + break; + + default: + printf("unknown option %c\n", opt); + usage(); + return -1; + } + } + maps_create(); + prog_load(); + prog_attach_iptables(argv[2]); + if (cfg_test_traffic) { + if (signal(SIGINT, finish) == SIG_ERR) + error(1, errno, "register handler failed"); + while (!test_finish) { + print_table(); + printf("\n"); + sleep(1); + }; + } else if (cfg_test_cookie) { + udp_client(); + } + close(prog_fd); + close(map_fd); + return 0; +} diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h index 3705fba453a005fb32f5dfb51dd5763f5f364ccf..8ab36a04c174a9c154594b7ef4fe2ef2c5c5dc5e 100644 --- a/samples/bpf/libbpf.h +++ b/samples/bpf/libbpf.h @@ -135,6 +135,16 @@ struct bpf_insn; .off = OFF, \ .imm = 0 }) +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ + +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index a91872a97742a6413c316548c66ab8349ba1aff0..245165817fbe63bd53ea9cf7f3a3ab6cf5e7a2fa 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -11,6 +11,7 @@ #include "bpf_helpers.h" #define MAX_ENTRIES 1000 +#define MAX_NR_CPUS 1024 struct bpf_map_def SEC("maps") hash_map = { .type = BPF_MAP_TYPE_HASH, @@ -26,7 +27,7 @@ struct bpf_map_def SEC("maps") lru_hash_map = { .max_entries = 10000, }; -struct bpf_map_def SEC("maps") percpu_lru_hash_map = { +struct bpf_map_def SEC("maps") nocommon_lru_hash_map = { .type = BPF_MAP_TYPE_LRU_HASH, .key_size = sizeof(u32), .value_size = sizeof(long), @@ -34,6 +35,19 @@ struct bpf_map_def SEC("maps") percpu_lru_hash_map = { .map_flags = BPF_F_NO_COMMON_LRU, }; +struct bpf_map_def SEC("maps") inner_lru_hash_map = { + .type = BPF_MAP_TYPE_LRU_HASH, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = MAX_ENTRIES, +}; + +struct bpf_map_def SEC("maps") array_of_lru_hashs = { + .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, + .key_size = sizeof(u32), + .max_entries = MAX_NR_CPUS, +}; + struct bpf_map_def SEC("maps") percpu_hash_map = { .type = BPF_MAP_TYPE_PERCPU_HASH, .key_size = sizeof(u32), @@ -65,6 +79,13 @@ struct bpf_map_def SEC("maps") lpm_trie_map_alloc = { .map_flags = BPF_F_NO_PREALLOC, }; +struct bpf_map_def SEC("maps") array_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = MAX_ENTRIES, +}; + SEC("kprobe/sys_getuid") int stress_hmap(struct pt_regs *ctx) { @@ -93,6 +114,7 @@ int stress_percpu_hmap(struct pt_regs *ctx) bpf_map_delete_elem(&percpu_hash_map, &key); return 0; } + SEC("kprobe/sys_getgid") int stress_hmap_alloc(struct pt_regs *ctx) { @@ -121,24 +143,56 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getpid") +SEC("kprobe/sys_connect") int stress_lru_hmap_alloc(struct pt_regs *ctx) { - u32 key = bpf_get_prandom_u32(); + struct sockaddr_in6 *in6; + u16 test_case, dst6[8]; + int addrlen, ret; + char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%d\n"; long val = 1; + u32 key = bpf_get_prandom_u32(); - bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY); + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); + addrlen = (int)PT_REGS_PARM3(ctx); - return 0; -} + if (addrlen != sizeof(*in6)) + return 0; -SEC("kprobe/sys_getppid") -int stress_percpu_lru_hmap_alloc(struct pt_regs *ctx) -{ - u32 key = bpf_get_prandom_u32(); - long val = 1; + ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr); + if (ret) + goto done; - bpf_map_update_elem(&percpu_lru_hash_map, &key, &val, BPF_ANY); + if (dst6[0] != 0xdead || dst6[1] != 0xbeef) + return 0; + + test_case = dst6[7]; + + if (test_case == 0) { + ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY); + } else if (test_case == 1) { + ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val, + BPF_ANY); + } else if (test_case == 2) { + void *nolocal_lru_map; + int cpu = bpf_get_smp_processor_id(); + + nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs, + &cpu); + if (!nolocal_lru_map) { + ret = -ENOENT; + goto done; + } + + ret = bpf_map_update_elem(nolocal_lru_map, &key, &val, + BPF_ANY); + } else { + ret = -EINVAL; + } + +done: + if (ret) + bpf_trace_printk(fmt, sizeof(fmt), ret); return 0; } @@ -165,5 +219,31 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx) return 0; } +SEC("kprobe/sys_getpgid") +int stress_hash_map_lookup(struct pt_regs *ctx) +{ + u32 key = 1, i; + long *value; + +#pragma clang loop unroll(full) + for (i = 0; i < 64; ++i) + value = bpf_map_lookup_elem(&hash_map, &key); + + return 0; +} + +SEC("kprobe/sys_getpgrp") +int stress_array_map_lookup(struct pt_regs *ctx) +{ + u32 key = 1, i; + long *value; + +#pragma clang loop unroll(full) + for (i = 0; i < 64; ++i) + value = bpf_map_lookup_elem(&array_map, &key); + + return 0; +} + char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index 680260a91f50c893dd26a1b968cc220a299c530f..6ac778153315edcbde80780bd68fbf5e288b677f 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -18,10 +18,14 @@ #include #include #include +#include +#include + #include "libbpf.h" #include "bpf_load.h" -#define MAX_CNT 1000000 +#define TEST_BIT(t) (1U << (t)) +#define MAX_NR_CPUS 1024 static __u64 time_get_ns(void) { @@ -31,15 +35,44 @@ static __u64 time_get_ns(void) return ts.tv_sec * 1000000000ull + ts.tv_nsec; } -#define HASH_PREALLOC (1 << 0) -#define PERCPU_HASH_PREALLOC (1 << 1) -#define HASH_KMALLOC (1 << 2) -#define PERCPU_HASH_KMALLOC (1 << 3) -#define LRU_HASH_PREALLOC (1 << 4) -#define PERCPU_LRU_HASH_PREALLOC (1 << 5) -#define LPM_KMALLOC (1 << 6) +enum test_type { + HASH_PREALLOC, + PERCPU_HASH_PREALLOC, + HASH_KMALLOC, + PERCPU_HASH_KMALLOC, + LRU_HASH_PREALLOC, + NOCOMMON_LRU_HASH_PREALLOC, + LPM_KMALLOC, + HASH_LOOKUP, + ARRAY_LOOKUP, + INNER_LRU_HASH_PREALLOC, + NR_TESTS, +}; + +const char *test_map_names[NR_TESTS] = { + [HASH_PREALLOC] = "hash_map", + [PERCPU_HASH_PREALLOC] = "percpu_hash_map", + [HASH_KMALLOC] = "hash_map_alloc", + [PERCPU_HASH_KMALLOC] = "percpu_hash_map_alloc", + [LRU_HASH_PREALLOC] = "lru_hash_map", + [NOCOMMON_LRU_HASH_PREALLOC] = "nocommon_lru_hash_map", + [LPM_KMALLOC] = "lpm_trie_map_alloc", + [HASH_LOOKUP] = "hash_map", + [ARRAY_LOOKUP] = "array_map", + [INNER_LRU_HASH_PREALLOC] = "inner_lru_hash_map", +}; static int test_flags = ~0; +static uint32_t num_map_entries; +static uint32_t inner_lru_hash_size; +static int inner_lru_hash_idx = -1; +static int array_of_lru_hashs_idx = -1; +static uint32_t max_cnt = 1000000; + +static int check_test_flags(enum test_type t) +{ + return test_flags & TEST_BIT(t); +} static void test_hash_prealloc(int cpu) { @@ -47,34 +80,89 @@ static void test_hash_prealloc(int cpu) int i; start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) + for (i = 0; i < max_cnt; i++) syscall(__NR_getuid); printf("%d:hash_map_perf pre-alloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } -static void test_lru_hash_prealloc(int cpu) +static void do_test_lru(enum test_type test, int cpu) { + static int inner_lru_map_fds[MAX_NR_CPUS]; + + struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 }; + const char *test_name; __u64 start_time; - int i; + int i, ret; + + if (test == INNER_LRU_HASH_PREALLOC) { + int outer_fd = map_fd[array_of_lru_hashs_idx]; + + assert(cpu < MAX_NR_CPUS); + + if (cpu) { + inner_lru_map_fds[cpu] = + bpf_create_map(BPF_MAP_TYPE_LRU_HASH, + sizeof(uint32_t), sizeof(long), + inner_lru_hash_size, 0); + if (inner_lru_map_fds[cpu] == -1) { + printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", + strerror(errno), errno); + exit(1); + } + } else { + inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx]; + } + + ret = bpf_map_update_elem(outer_fd, &cpu, + &inner_lru_map_fds[cpu], + BPF_ANY); + if (ret) { + printf("cannot update ARRAY_OF_LRU_HASHS with key:%u. %s(%d)\n", + cpu, strerror(errno), errno); + exit(1); + } + } + + in6.sin6_addr.s6_addr16[0] = 0xdead; + in6.sin6_addr.s6_addr16[1] = 0xbeef; + + if (test == LRU_HASH_PREALLOC) { + test_name = "lru_hash_map_perf"; + in6.sin6_addr.s6_addr16[7] = 0; + } else if (test == NOCOMMON_LRU_HASH_PREALLOC) { + test_name = "nocommon_lru_hash_map_perf"; + in6.sin6_addr.s6_addr16[7] = 1; + } else if (test == INNER_LRU_HASH_PREALLOC) { + test_name = "inner_lru_hash_map_perf"; + in6.sin6_addr.s6_addr16[7] = 2; + } else { + assert(0); + } start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - syscall(__NR_getpid); - printf("%d:lru_hash_map_perf pre-alloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + for (i = 0; i < max_cnt; i++) { + ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6)); + assert(ret == -1 && errno == EBADF); + } + printf("%d:%s pre-alloc %lld events per sec\n", + cpu, test_name, + max_cnt * 1000000000ll / (time_get_ns() - start_time)); +} + +static void test_lru_hash_prealloc(int cpu) +{ + do_test_lru(LRU_HASH_PREALLOC, cpu); } -static void test_percpu_lru_hash_prealloc(int cpu) +static void test_nocommon_lru_hash_prealloc(int cpu) { - __u64 start_time; - int i; + do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu); +} - start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) - syscall(__NR_getppid); - printf("%d:lru_hash_map_perf pre-alloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); +static void test_inner_lru_hash_prealloc(int cpu) +{ + do_test_lru(INNER_LRU_HASH_PREALLOC, cpu); } static void test_percpu_hash_prealloc(int cpu) @@ -83,10 +171,10 @@ static void test_percpu_hash_prealloc(int cpu) int i; start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) + for (i = 0; i < max_cnt; i++) syscall(__NR_geteuid); printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_hash_kmalloc(int cpu) @@ -95,10 +183,10 @@ static void test_hash_kmalloc(int cpu) int i; start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) + for (i = 0; i < max_cnt; i++) syscall(__NR_getgid); printf("%d:hash_map_perf kmalloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_percpu_hash_kmalloc(int cpu) @@ -107,10 +195,10 @@ static void test_percpu_hash_kmalloc(int cpu) int i; start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) + for (i = 0; i < max_cnt; i++) syscall(__NR_getegid); printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_lpm_kmalloc(int cpu) @@ -119,40 +207,63 @@ static void test_lpm_kmalloc(int cpu) int i; start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) + for (i = 0; i < max_cnt; i++) syscall(__NR_gettid); printf("%d:lpm_perf kmalloc %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); + cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } -static void loop(int cpu) +static void test_hash_lookup(int cpu) { - cpu_set_t cpuset; - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - sched_setaffinity(0, sizeof(cpuset), &cpuset); + __u64 start_time; + int i; - if (test_flags & HASH_PREALLOC) - test_hash_prealloc(cpu); + start_time = time_get_ns(); + for (i = 0; i < max_cnt; i++) + syscall(__NR_getpgid, 0); + printf("%d:hash_lookup %lld lookups per sec\n", + cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time)); +} - if (test_flags & PERCPU_HASH_PREALLOC) - test_percpu_hash_prealloc(cpu); +static void test_array_lookup(int cpu) +{ + __u64 start_time; + int i; - if (test_flags & HASH_KMALLOC) - test_hash_kmalloc(cpu); + start_time = time_get_ns(); + for (i = 0; i < max_cnt; i++) + syscall(__NR_getpgrp, 0); + printf("%d:array_lookup %lld lookups per sec\n", + cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time)); +} - if (test_flags & PERCPU_HASH_KMALLOC) - test_percpu_hash_kmalloc(cpu); +typedef void (*test_func)(int cpu); +const test_func test_funcs[] = { + [HASH_PREALLOC] = test_hash_prealloc, + [PERCPU_HASH_PREALLOC] = test_percpu_hash_prealloc, + [HASH_KMALLOC] = test_hash_kmalloc, + [PERCPU_HASH_KMALLOC] = test_percpu_hash_kmalloc, + [LRU_HASH_PREALLOC] = test_lru_hash_prealloc, + [NOCOMMON_LRU_HASH_PREALLOC] = test_nocommon_lru_hash_prealloc, + [LPM_KMALLOC] = test_lpm_kmalloc, + [HASH_LOOKUP] = test_hash_lookup, + [ARRAY_LOOKUP] = test_array_lookup, + [INNER_LRU_HASH_PREALLOC] = test_inner_lru_hash_prealloc, +}; - if (test_flags & LRU_HASH_PREALLOC) - test_lru_hash_prealloc(cpu); +static void loop(int cpu) +{ + cpu_set_t cpuset; + int i; - if (test_flags & PERCPU_LRU_HASH_PREALLOC) - test_percpu_lru_hash_prealloc(cpu); + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + sched_setaffinity(0, sizeof(cpuset), &cpuset); - if (test_flags & LPM_KMALLOC) - test_lpm_kmalloc(cpu); + for (i = 0; i < NR_TESTS; i++) { + if (check_test_flags(i)) + test_funcs[i](cpu); + } } static void run_perf_test(int tasks) @@ -209,6 +320,38 @@ static void fill_lpm_trie(void) assert(!r); } +static void fixup_map(struct bpf_map_def *map, const char *name, int idx) +{ + int i; + + if (!strcmp("inner_lru_hash_map", name)) { + inner_lru_hash_idx = idx; + inner_lru_hash_size = map->max_entries; + } + + if (!strcmp("array_of_lru_hashs", name)) { + if (inner_lru_hash_idx == -1) { + printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n"); + exit(1); + } + map->inner_map_idx = inner_lru_hash_idx; + array_of_lru_hashs_idx = idx; + } + + if (num_map_entries <= 0) + return; + + inner_lru_hash_size = num_map_entries; + + /* Only change the max_entries for the enabled test(s) */ + for (i = 0; i < NR_TESTS; i++) { + if (!strcmp(test_map_names[i], name) && + (check_test_flags(i))) { + map->max_entries = num_map_entries; + } + } +} + int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; @@ -224,7 +367,13 @@ int main(int argc, char **argv) if (argc > 2) num_cpu = atoi(argv[2]) ? : num_cpu; - if (load_bpf_file(filename)) { + if (argc > 3) + num_map_entries = atoi(argv[3]); + + if (argc > 4) + max_cnt = atoi(argv[4]); + + if (load_bpf_file_fixup_map(filename, fixup_map)) { printf("%s", bpf_log_buf); return 1; } diff --git a/samples/bpf/run_cookie_uid_helper_example.sh b/samples/bpf/run_cookie_uid_helper_example.sh new file mode 100755 index 0000000000000000000000000000000000000000..f898cfa2b1aa52f71a65db469450d4d001b08b2c --- /dev/null +++ b/samples/bpf/run_cookie_uid_helper_example.sh @@ -0,0 +1,14 @@ +#!/bin/bash +local_dir="$(pwd)" +root_dir=$local_dir/../.. +mnt_dir=$(mktemp -d --tmp) + +on_exit() { + iptables -D OUTPUT -m bpf --object-pinned ${mnt_dir}/bpf_prog -j ACCEPT + umount ${mnt_dir} + rm -r ${mnt_dir} +} + +trap on_exit EXIT +mount -t bpf bpf ${mnt_dir} +./per_socket_stats_example ${mnt_dir}/bpf_prog $1 diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c index d96dc88d3b04067135fe80c95628fca37a29c022..73c35714226842b248027b214989e56570e4378f 100644 --- a/samples/bpf/test_lru_dist.c +++ b/samples/bpf/test_lru_dist.c @@ -25,7 +25,9 @@ #include "bpf_util.h" #define min(a, b) ((a) < (b) ? (a) : (b)) -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#ifndef offsetof +# define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#endif #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c new file mode 100644 index 0000000000000000000000000000000000000000..42c44d091dd176b87bb92d1aeb849b1f8ea8cdd6 --- /dev/null +++ b/samples/bpf/test_map_in_map_kern.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#define KBUILD_MODNAME "foo" +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define MAX_NR_PORTS 65536 + +/* map #0 */ +struct bpf_map_def SEC("maps") port_a = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(int), + .max_entries = MAX_NR_PORTS, +}; + +/* map #1 */ +struct bpf_map_def SEC("maps") port_h = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(u32), + .value_size = sizeof(int), + .max_entries = 1, +}; + +/* map #2 */ +struct bpf_map_def SEC("maps") reg_result_h = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(u32), + .value_size = sizeof(int), + .max_entries = 1, +}; + +/* map #3 */ +struct bpf_map_def SEC("maps") inline_result_h = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(u32), + .value_size = sizeof(int), + .max_entries = 1, +}; + +/* map #4 */ /* Test case #0 */ +struct bpf_map_def SEC("maps") a_of_port_a = { + .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, + .key_size = sizeof(u32), + .inner_map_idx = 0, /* map_fd[0] is port_a */ + .max_entries = MAX_NR_PORTS, +}; + +/* map #5 */ /* Test case #1 */ +struct bpf_map_def SEC("maps") h_of_port_a = { + .type = BPF_MAP_TYPE_HASH_OF_MAPS, + .key_size = sizeof(u32), + .inner_map_idx = 0, /* map_fd[0] is port_a */ + .max_entries = 1, +}; + +/* map #6 */ /* Test case #2 */ +struct bpf_map_def SEC("maps") h_of_port_h = { + .type = BPF_MAP_TYPE_HASH_OF_MAPS, + .key_size = sizeof(u32), + .inner_map_idx = 1, /* map_fd[1] is port_h */ + .max_entries = 1, +}; + +static __always_inline int do_reg_lookup(void *inner_map, u32 port) +{ + int *result; + + result = bpf_map_lookup_elem(inner_map, &port); + return result ? *result : -ENOENT; +} + +static __always_inline int do_inline_array_lookup(void *inner_map, u32 port) +{ + int *result; + + if (inner_map != &port_a) + return -EINVAL; + + result = bpf_map_lookup_elem(&port_a, &port); + return result ? *result : -ENOENT; +} + +static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port) +{ + int *result; + + if (inner_map != &port_h) + return -EINVAL; + + result = bpf_map_lookup_elem(&port_h, &port); + return result ? *result : -ENOENT; +} + +SEC("kprobe/sys_connect") +int trace_sys_connect(struct pt_regs *ctx) +{ + struct sockaddr_in6 *in6; + u16 test_case, port, dst6[8]; + int addrlen, ret, inline_ret, ret_key = 0; + u32 port_key; + void *outer_map, *inner_map; + bool inline_hash = false; + + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); + addrlen = (int)PT_REGS_PARM3(ctx); + + if (addrlen != sizeof(*in6)) + return 0; + + ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr); + if (ret) { + inline_ret = ret; + goto done; + } + + if (dst6[0] != 0xdead || dst6[1] != 0xbeef) + return 0; + + test_case = dst6[7]; + + ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port); + if (ret) { + inline_ret = ret; + goto done; + } + + port_key = port; + + ret = -ENOENT; + if (test_case == 0) { + outer_map = &a_of_port_a; + } else if (test_case == 1) { + outer_map = &h_of_port_a; + } else if (test_case == 2) { + outer_map = &h_of_port_h; + } else { + ret = __LINE__; + inline_ret = ret; + goto done; + } + + inner_map = bpf_map_lookup_elem(outer_map, &port_key); + if (!inner_map) { + ret = __LINE__; + inline_ret = ret; + goto done; + } + + ret = do_reg_lookup(inner_map, port_key); + + if (test_case == 0 || test_case == 1) + inline_ret = do_inline_array_lookup(inner_map, port_key); + else + inline_ret = do_inline_hash_lookup(inner_map, port_key); + +done: + bpf_map_update_elem(®_result_h, &ret_key, &ret, BPF_ANY); + bpf_map_update_elem(&inline_result_h, &ret_key, &inline_ret, BPF_ANY); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c new file mode 100644 index 0000000000000000000000000000000000000000..f62fdc2bd4281005f959cdb85cf1aafe2dc08193 --- /dev/null +++ b/samples/bpf/test_map_in_map_user.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" + +#define PORT_A (map_fd[0]) +#define PORT_H (map_fd[1]) +#define REG_RESULT_H (map_fd[2]) +#define INLINE_RESULT_H (map_fd[3]) +#define A_OF_PORT_A (map_fd[4]) /* Test case #0 */ +#define H_OF_PORT_A (map_fd[5]) /* Test case #1 */ +#define H_OF_PORT_H (map_fd[6]) /* Test case #2 */ + +static const char * const test_names[] = { + "Array of Array", + "Hash of Array", + "Hash of Hash", +}; + +#define NR_TESTS (sizeof(test_names) / sizeof(*test_names)) + +static void populate_map(uint32_t port_key, int magic_result) +{ + int ret; + + ret = bpf_map_update_elem(PORT_A, &port_key, &magic_result, BPF_ANY); + assert(!ret); + + ret = bpf_map_update_elem(PORT_H, &port_key, &magic_result, + BPF_NOEXIST); + assert(!ret); + + ret = bpf_map_update_elem(A_OF_PORT_A, &port_key, &PORT_A, BPF_ANY); + assert(!ret); + + ret = bpf_map_update_elem(H_OF_PORT_A, &port_key, &PORT_A, BPF_NOEXIST); + assert(!ret); + + ret = bpf_map_update_elem(H_OF_PORT_H, &port_key, &PORT_H, BPF_NOEXIST); + assert(!ret); +} + +static void test_map_in_map(void) +{ + struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 }; + uint32_t result_key = 0, port_key; + int result, inline_result; + int magic_result = 0xfaceb00c; + int ret; + int i; + + port_key = rand() & 0x00FF; + populate_map(port_key, magic_result); + + in6.sin6_addr.s6_addr16[0] = 0xdead; + in6.sin6_addr.s6_addr16[1] = 0xbeef; + in6.sin6_port = port_key; + + for (i = 0; i < NR_TESTS; i++) { + printf("%s: ", test_names[i]); + + in6.sin6_addr.s6_addr16[7] = i; + ret = connect(-1, (struct sockaddr *)&in6, sizeof(in6)); + assert(ret == -1 && errno == EBADF); + + ret = bpf_map_lookup_elem(REG_RESULT_H, &result_key, &result); + assert(!ret); + + ret = bpf_map_lookup_elem(INLINE_RESULT_H, &result_key, + &inline_result); + assert(!ret); + + if (result != magic_result || inline_result != magic_result) { + printf("Error. result:%d inline_result:%d\n", + result, inline_result); + exit(1); + } + + bpf_map_delete_elem(REG_RESULT_H, &result_key); + bpf_map_delete_elem(INLINE_RESULT_H, &result_key); + + printf("Pass\n"); + } +} + +int main(int argc, char **argv) +{ + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + char filename[256]; + + assert(!setrlimit(RLIMIT_MEMLOCK, &r)); + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + test_map_in_map(); + + return 0; +} diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index d2be65d1fd86ed199909bc6c68327f7d31e6b35a..378850c70eb81afdbc5283611573f89ea4d8923e 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -5,6 +5,7 @@ * License as published by the Free Software Foundation. */ #include +#include #include #include #include @@ -12,16 +13,18 @@ #include #include #include +#include #include "bpf_load.h" #include "bpf_util.h" #include "libbpf.h" static int ifindex; +static __u32 xdp_flags; static void int_exit(int sig) { - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, xdp_flags); exit(0); } @@ -54,18 +57,39 @@ static void poll_stats(int interval) } } -int main(int ac, char **argv) +static void usage(const char *prog) { - char filename[256]; + fprintf(stderr, + "usage: %s [OPTS] IFINDEX\n\n" + "OPTS:\n" + " -S use skb-mode\n", + prog); +} - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); +int main(int argc, char **argv) +{ + const char *optstr = "S"; + char filename[256]; + int opt; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; + default: + usage(basename(argv[0])); + return 1; + } + } - if (ac != 2) { - printf("usage: %s IFINDEX\n", argv[0]); + if (optind == argc) { + usage(basename(argv[0])); return 1; } + ifindex = strtoul(argv[optind], NULL, 0); - ifindex = strtoul(argv[1], NULL, 0); + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); @@ -79,7 +103,7 @@ int main(int ac, char **argv) signal(SIGINT, int_exit); - if (set_link_xdp_fd(ifindex, prog_fd[0]) < 0) { + if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { printf("link set xdp fd failed\n"); return 1; } diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c index 70e192fc61aa4b9a317a1e8b11df0d36034cc877..92b8bde9337c8cec6128e73d23d307b628ba5a38 100644 --- a/samples/bpf/xdp_tx_iptunnel_user.c +++ b/samples/bpf/xdp_tx_iptunnel_user.c @@ -5,6 +5,7 @@ * License as published by the Free Software Foundation. */ #include +#include #include #include #include @@ -24,11 +25,12 @@ #define STATS_INTERVAL_S 2U static int ifindex = -1; +static __u32 xdp_flags = 0; static void int_exit(int sig) { if (ifindex > -1) - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, xdp_flags); exit(0); } @@ -136,7 +138,7 @@ int main(int argc, char **argv) { unsigned char opt_flags[256] = {}; unsigned int kill_after_s = 0; - const char *optstr = "i:a:p:s:d:m:T:P:h"; + const char *optstr = "i:a:p:s:d:m:T:P:Sh"; int min_port = 0, max_port = 0; struct iptnl_info tnl = {}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; @@ -201,6 +203,9 @@ int main(int argc, char **argv) case 'T': kill_after_s = atoi(optarg); break; + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; default: usage(argv[0]); return 1; @@ -243,14 +248,14 @@ int main(int argc, char **argv) } } - if (set_link_xdp_fd(ifindex, prog_fd[0]) < 0) { + if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { printf("link set xdp fd failed\n"); return 1; } poll_stats(kill_after_s); - set_link_xdp_fd(ifindex, -1); + set_link_xdp_fd(ifindex, -1, xdp_flags); return 0; } diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 2ca9cde939d44976365aa67fe72f51be9b92897d..8e67bb4c9caba658b18f9d1dd0593907c8556c27 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -69,6 +69,7 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_DELNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index e04ab89a1013bdee0b4d0e1f887c00fa0f521aec..ebc6dceddb58c00e4253e4fc3b37eda7a17b62be 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -9,6 +9,9 @@ # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 +# elif defined(__sparc__) +# define __NR_bpf 349 +# else # error __NR_bpf not defined. libbpf does not support your arch. # endif #endif diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh index 4aa5369ffa4ef295a8841edd380c7f678de80893..d85968cb1bf235feb1352809ac9d7229c152d74a 100755 --- a/tools/hv/bondvf.sh +++ b/tools/hv/bondvf.sh @@ -101,9 +101,25 @@ function create_bond_cfg_redhat { echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn } +function del_eth_cfg_ubuntu { + local fn=$cfgdir/interfaces + local tmpfl=$(mktemp) + + local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1 + local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)' + + awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl + + cp $tmpfl $fn + + rm $tmpfl +} + function create_eth_cfg_ubuntu { local fn=$cfgdir/interfaces + del_eth_cfg_ubuntu $1 + echo $'\n'auto $1 >>$fn echo iface $1 inet manual >>$fn echo bond-master $2 >>$fn @@ -119,6 +135,8 @@ function create_eth_cfg_pri_ubuntu { function create_bond_cfg_ubuntu { local fn=$cfgdir/interfaces + del_eth_cfg_ubuntu $1 + echo $'\n'auto $1 >>$fn echo iface $1 inet dhcp >>$fn echo bond-mode active-backup >>$fn diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0539a0ceef38155835552360667070552ebce641..e553529929f683c4c39ae336d10c6f670e589b54 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -81,6 +81,7 @@ enum bpf_cmd { BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, + BPF_PROG_TEST_RUN, }; enum bpf_map_type { @@ -96,6 +97,8 @@ enum bpf_map_type { BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_ARRAY_OF_MAPS, + BPF_MAP_TYPE_HASH_OF_MAPS, }; enum bpf_prog_type { @@ -152,6 +155,7 @@ union bpf_attr { __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* prealloc or not */ + __u32 inner_map_fd; /* fd pointing to the inner map */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -186,6 +190,17 @@ union bpf_attr { __u32 attach_type; __u32 attach_flags; }; + + struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __aligned_u64 data_in; + __aligned_u64 data_out; + __u32 repeat; + __u32 duration; + } test; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -456,6 +471,18 @@ union bpf_attr { * Return: * > 0 length of the string including the trailing NUL on success * < 0 error + * + * u64 bpf_get_socket_cookie(skb) + * Get the cookie for the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: 8 Bytes non-decreasing number on success or 0 if the socket + * field is missing inside sk_buff + * + * u32 bpf_get_socket_uid(skb) + * Get the owner uid of the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: uid of the socket owner on success or 0 if the socket pointer + * inside sk_buff is NULL */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -503,7 +530,9 @@ union bpf_attr { FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ - FN(probe_read_str), + FN(probe_read_str), \ + FN(get_socket_cookie), \ + FN(get_socket_uid), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -574,6 +603,7 @@ struct __sk_buff { __u32 tc_classid; __u32 data; __u32 data_end; + __u32 napi_id; }; struct bpf_tunnel_key { diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 207c2eeddab064d7c304efed09653ba6e227d6e9..4fe444b8092e2d6dc8aea6b7ce8349a64e439c44 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -37,6 +37,8 @@ # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 +# elif defined(__sparc__) +# define __NR_bpf 349 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif @@ -69,6 +71,23 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } +int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, __u32 map_flags) +{ + union bpf_attr attr; + + memset(&attr, '\0', sizeof(attr)); + + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = 4; + attr.inner_map_fd = inner_map_fd; + attr.max_entries = max_entries; + attr.map_flags = map_flags; + + return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); +} + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) @@ -192,3 +211,27 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type) return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } + +int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, + void *data_out, __u32 *size_out, __u32 *retval, + __u32 *duration) +{ + union bpf_attr attr; + int ret; + + bzero(&attr, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.data_in = ptr_to_u64(data); + attr.test.data_out = ptr_to_u64(data_out); + attr.test.data_size_in = size; + attr.test.repeat = repeat; + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + if (size_out) + *size_out = attr.test.data_size_out; + if (retval) + *retval = attr.test.retval; + if (duration) + *duration = attr.test.duration; + return ret; +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 09c3dcac04963e6973d7a002e54b6a139af6212a..edb4daeff7a52c44f6bc366c265a7f5a3aadfc10 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -26,6 +26,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); +int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, __u32 map_flags); /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE 65536 @@ -45,6 +47,8 @@ int bpf_obj_get(const char *pathname); int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); - +int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, + void *data_out, __u32 *size_out, __u32 *retval, + __u32 *duration); #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ac6eb863b2a40df00c4ead9f48c872ab67949c19..1a2c07eb7795bb4fb43e4a97bdc721d7cbc7f3b8 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1618,8 +1618,7 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n) return fd; } -static void bpf_program__set_type(struct bpf_program *prog, - enum bpf_prog_type type) +void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { prog->type = type; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index b30394f9947a35356af870223664a4409f8e5fd6..32c7252f734e42f9895c4686c4236f4b259d99a3 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -25,6 +25,7 @@ #include #include #include // for size_t +#include enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, @@ -185,6 +186,7 @@ int bpf_program__set_sched_cls(struct bpf_program *prog); int bpf_program__set_sched_act(struct bpf_program *prog); int bpf_program__set_xdp(struct bpf_program *prog); int bpf_program__set_perf_event(struct bpf_program *prog); +void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); bool bpf_program__is_socket_filter(struct bpf_program *prog); bool bpf_program__is_tracepoint(struct bpf_program *prog); diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index 544b05a53b7057bbe9092e31f600065d47e3b24e..ad572e6cdbd0f47cfa947f18451a253977ff6c64 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -229,6 +229,7 @@ static void usage(void) { printf("Usage: bpf_jit_disasm [...]\n"); printf(" -o Also display related opcodes (default: off).\n"); + printf(" -O Write binary image of code to file, don't disassemble to stdout.\n"); printf(" -f Read last image dump from file or stdin (default: klog).\n"); printf(" -h Display this help.\n"); } @@ -238,12 +239,19 @@ int main(int argc, char **argv) unsigned int len, klen, opt, opcodes = 0; static uint8_t image[32768]; char *kbuff, *file = NULL; + char *ofile = NULL; + int ofd; + ssize_t nr; + uint8_t *pos; - while ((opt = getopt(argc, argv, "of:")) != -1) { + while ((opt = getopt(argc, argv, "of:O:")) != -1) { switch (opt) { case 'o': opcodes = 1; break; + case 'O': + ofile = optarg; + break; case 'f': file = optarg; break; @@ -263,11 +271,35 @@ int main(int argc, char **argv) } len = get_last_jit_image(kbuff, klen, image, sizeof(image)); - if (len > 0) - get_asm_insns(image, len, opcodes); - else + if (len <= 0) { fprintf(stderr, "No JIT image found!\n"); + goto done; + } + if (!ofile) { + get_asm_insns(image, len, opcodes); + goto done; + } + + ofd = open(ofile, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE); + if (ofd < 0) { + fprintf(stderr, "Could not open file %s for writing: ", ofile); + perror(NULL); + goto done; + } + pos = image; + do { + nr = write(ofd, pos, len); + if (nr < 0) { + fprintf(stderr, "Could not write data to %s: ", ofile); + perror(NULL); + goto done; + } + len -= nr; + pos += nr; + } while (len); + close(ofd); +done: put_log_buff(kbuff); return 0; } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 9af09e8099c0aae9fd6acd5d42cc5afb949871e9..d8d94b9bd76c7c4fb62c8ebf50e0033de97012da 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -8,16 +8,18 @@ ifneq ($(wildcard $(GENHDR)),) GENFLAGS := -DHAVE_GENHDR endif -CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -LDLIBS += -lcap +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include +LDLIBS += -lcap -lelf -TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs + +TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o TEST_PROGS := test_kmod.sh include ../lib.mk -BPFOBJ := $(OUTPUT)/bpf.o +BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS): $(BPFOBJ) @@ -28,3 +30,10 @@ force: $(BPFOBJ): force $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ + +CLANG ?= clang + +%.o: %.c + $(CLANG) -I../../../include/uapi -I../../../../samples/bpf/ \ + -D__x86_64__ -Wno-compare-distinct-pointer-types \ + -O2 -target bpf -c $< -o $@ diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h new file mode 100644 index 0000000000000000000000000000000000000000..19d0604f86946824b04138e8bcaf29f3628ab808 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_endian.h @@ -0,0 +1,23 @@ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +#include + +#if __BYTE_ORDER == __LITTLE_ENDIAN +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +#elif __BYTE_ORDER == __BIG_ENDIAN +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +#else +# error "Fix your __BYTE_ORDER?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __constant_ntohs(x) : __bpf_ntohs(x)) + +#endif diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 84a5d1823f028344e7a4339e8aa5eb45506d797d..20ecbaa0d85d72b860678caf49c3e421105802b8 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -35,4 +35,11 @@ static inline unsigned int bpf_num_possible_cpus(void) return possible_cpus; } +#define __bpf_percpu_val_align __attribute__((__aligned__(8))) + +#define BPF_DECLARE_PERCPU(type, name) \ + struct { type v; /* padding */ } __bpf_percpu_val_align \ + name[bpf_num_possible_cpus()] +#define bpf_percpu(name, cpu) name[(cpu)].v + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/test_iptunnel_common.h b/tools/testing/selftests/bpf/test_iptunnel_common.h new file mode 100644 index 0000000000000000000000000000000000000000..e4cd252a1b205d53c80bde3058a273219bed54a4 --- /dev/null +++ b/tools/testing/selftests/bpf/test_iptunnel_common.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _TEST_IPTNL_COMMON_H +#define _TEST_IPTNL_COMMON_H + +#include + +#define MAX_IPTNL_ENTRIES 256U + +struct vip { + union { + __u32 v6[4]; + __u32 v4; + } daddr; + __u16 dport; + __u16 family; + __u8 protocol; +}; + +struct iptnl_info { + union { + __u32 v6[4]; + __u32 v4; + } saddr; + union { + __u32 v6[4]; + __u32 v4; + } daddr; + __u16 family; + __u8 dmac[6]; +}; + +#endif diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/test_l4lb.c new file mode 100644 index 0000000000000000000000000000000000000000..1e10c9590991bfde9b35753d955591d2f66bb1c1 --- /dev/null +++ b/tools/testing/selftests/bpf/test_l4lb.c @@ -0,0 +1,473 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" +#include "test_iptunnel_common.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; + +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + +/* copy paste of jhash from kernel sources to make sure llvm + * can compile it into valid sequence of bpf instructions + */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +#define JHASH_INITVAL 0xdeadbeef + +typedef unsigned int u32; + +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const unsigned char *k = key; + + a = b = c = JHASH_INITVAL + length + initval; + + while (length > 12) { + a += *(u32 *)(k); + b += *(u32 *)(k + 4); + c += *(u32 *)(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + __jhash_final(a, b, c); + return c; +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +#define PCKT_FRAGMENTED 65343 +#define IPV4_HDR_LEN_NO_OPT 20 +#define IPV4_PLUS_ICMP_HDR 28 +#define IPV6_PLUS_ICMP_HDR 48 +#define RING_SIZE 2 +#define MAX_VIPS 12 +#define MAX_REALS 5 +#define CTL_MAP_SIZE 16 +#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE) +#define F_IPV6 (1 << 0) +#define F_HASH_NO_SRC_PORT (1 << 0) +#define F_ICMP (1 << 0) +#define F_SYN_SET (1 << 1) + +struct packet_description { + union { + __be32 src; + __be32 srcv6[4]; + }; + union { + __be32 dst; + __be32 dstv6[4]; + }; + union { + __u32 ports; + __u16 port16[2]; + }; + __u8 proto; + __u8 flags; +}; + +struct ctl_value { + union { + __u64 value; + __u32 ifindex; + __u8 mac[6]; + }; +}; + +struct vip_meta { + __u32 flags; + __u32 vip_num; +}; + +struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; +}; + +struct vip_stats { + __u64 bytes; + __u64 pkts; +}; + +struct eth_hdr { + unsigned char eth_dest[ETH_ALEN]; + unsigned char eth_source[ETH_ALEN]; + unsigned short eth_proto; +}; + +struct bpf_map_def SEC("maps") vip_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct vip), + .value_size = sizeof(struct vip_meta), + .max_entries = MAX_VIPS, +}; + +struct bpf_map_def SEC("maps") ch_rings = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = CH_RINGS_SIZE, +}; + +struct bpf_map_def SEC("maps") reals = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct real_definition), + .max_entries = MAX_REALS, +}; + +struct bpf_map_def SEC("maps") stats = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct vip_stats), + .max_entries = MAX_VIPS, +}; + +struct bpf_map_def SEC("maps") ctl_array = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct ctl_value), + .max_entries = CTL_MAP_SIZE, +}; + +static __always_inline __u32 get_packet_hash(struct packet_description *pckt, + bool ipv6) +{ + if (ipv6) + return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS), + pckt->ports, CH_RINGS_SIZE); + else + return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE); +} + +static __always_inline bool get_packet_dst(struct real_definition **real, + struct packet_description *pckt, + struct vip_meta *vip_info, + bool is_ipv6) +{ + __u32 hash = get_packet_hash(pckt, is_ipv6) % RING_SIZE; + __u32 key = RING_SIZE * vip_info->vip_num + hash; + __u32 *real_pos; + + real_pos = bpf_map_lookup_elem(&ch_rings, &key); + if (!real_pos) + return false; + key = *real_pos; + *real = bpf_map_lookup_elem(&reals, &key); + if (!(*real)) + return false; + return true; +} + +static __always_inline int parse_icmpv6(void *data, void *data_end, __u64 off, + struct packet_description *pckt) +{ + struct icmp6hdr *icmp_hdr; + struct ipv6hdr *ip6h; + + icmp_hdr = data + off; + if (icmp_hdr + 1 > data_end) + return TC_ACT_SHOT; + if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG) + return TC_ACT_OK; + off += sizeof(struct icmp6hdr); + ip6h = data + off; + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + pckt->proto = ip6h->nexthdr; + pckt->flags |= F_ICMP; + memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16); + memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16); + return TC_ACT_UNSPEC; +} + +static __always_inline int parse_icmp(void *data, void *data_end, __u64 off, + struct packet_description *pckt) +{ + struct icmphdr *icmp_hdr; + struct iphdr *iph; + + icmp_hdr = data + off; + if (icmp_hdr + 1 > data_end) + return TC_ACT_SHOT; + if (icmp_hdr->type != ICMP_DEST_UNREACH || + icmp_hdr->code != ICMP_FRAG_NEEDED) + return TC_ACT_OK; + off += sizeof(struct icmphdr); + iph = data + off; + if (iph + 1 > data_end) + return TC_ACT_SHOT; + if (iph->ihl != 5) + return TC_ACT_SHOT; + pckt->proto = iph->protocol; + pckt->flags |= F_ICMP; + pckt->src = iph->daddr; + pckt->dst = iph->saddr; + return TC_ACT_UNSPEC; +} + +static __always_inline bool parse_udp(void *data, __u64 off, void *data_end, + struct packet_description *pckt) +{ + struct udphdr *udp; + udp = data + off; + + if (udp + 1 > data_end) + return false; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = udp->source; + pckt->port16[1] = udp->dest; + } else { + pckt->port16[0] = udp->dest; + pckt->port16[1] = udp->source; + } + return true; +} + +static __always_inline bool parse_tcp(void *data, __u64 off, void *data_end, + struct packet_description *pckt) +{ + struct tcphdr *tcp; + + tcp = data + off; + if (tcp + 1 > data_end) + return false; + + if (tcp->syn) + pckt->flags |= F_SYN_SET; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = tcp->source; + pckt->port16[1] = tcp->dest; + } else { + pckt->port16[0] = tcp->dest; + pckt->port16[1] = tcp->source; + } + return true; +} + +static __always_inline int process_packet(void *data, __u64 off, void *data_end, + bool is_ipv6, struct __sk_buff *skb) +{ + void *pkt_start = (void *)(long)skb->data; + struct packet_description pckt = {}; + struct eth_hdr *eth = pkt_start; + struct bpf_tunnel_key tkey = {}; + struct vip_stats *data_stats; + struct real_definition *dst; + struct vip_meta *vip_info; + struct ctl_value *cval; + __u32 v4_intf_pos = 1; + __u32 v6_intf_pos = 2; + struct ipv6hdr *ip6h; + struct vip vip = {}; + struct iphdr *iph; + int tun_flag = 0; + __u16 pkt_bytes; + __u64 iph_len; + __u32 ifindex; + __u8 protocol; + __u32 vip_num; + int action; + + tkey.tunnel_ttl = 64; + if (is_ipv6) { + ip6h = data + off; + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + + iph_len = sizeof(struct ipv6hdr); + protocol = ip6h->nexthdr; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(ip6h->payload_len); + off += iph_len; + if (protocol == IPPROTO_FRAGMENT) { + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_ICMPV6) { + action = parse_icmpv6(data, data_end, off, &pckt); + if (action >= 0) + return action; + off += IPV6_PLUS_ICMP_HDR; + } else { + memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16); + memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16); + } + } else { + iph = data + off; + if (iph + 1 > data_end) + return TC_ACT_SHOT; + if (iph->ihl != 5) + return TC_ACT_SHOT; + + protocol = iph->protocol; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(iph->tot_len); + off += IPV4_HDR_LEN_NO_OPT; + + if (iph->frag_off & PCKT_FRAGMENTED) + return TC_ACT_SHOT; + if (protocol == IPPROTO_ICMP) { + action = parse_icmp(data, data_end, off, &pckt); + if (action >= 0) + return action; + off += IPV4_PLUS_ICMP_HDR; + } else { + pckt.src = iph->saddr; + pckt.dst = iph->daddr; + } + } + protocol = pckt.proto; + + if (protocol == IPPROTO_TCP) { + if (!parse_tcp(data, off, data_end, &pckt)) + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_UDP) { + if (!parse_udp(data, off, data_end, &pckt)) + return TC_ACT_SHOT; + } else { + return TC_ACT_SHOT; + } + + if (is_ipv6) + memcpy(vip.daddr.v6, pckt.dstv6, 16); + else + vip.daddr.v4 = pckt.dst; + + vip.dport = pckt.port16[1]; + vip.protocol = pckt.proto; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) { + vip.dport = 0; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) + return TC_ACT_SHOT; + pckt.port16[1] = 0; + } + + if (vip_info->flags & F_HASH_NO_SRC_PORT) + pckt.port16[0] = 0; + + if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6)) + return TC_ACT_SHOT; + + if (dst->flags & F_IPV6) { + cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + memcpy(tkey.remote_ipv6, dst->dstv6, 16); + tun_flag = BPF_F_TUNINFO_IPV6; + } else { + cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + tkey.remote_ipv4 = dst->dst; + } + vip_num = vip_info->vip_num; + data_stats = bpf_map_lookup_elem(&stats, &vip_num); + if (!data_stats) + return TC_ACT_SHOT; + data_stats->pkts++; + data_stats->bytes += pkt_bytes; + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag); + *(u32 *)eth->eth_dest = tkey.remote_ipv4; + return bpf_redirect(ifindex, 0); +} + +SEC("l4lb-demo") +int balancer_ingress(struct __sk_buff *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct eth_hdr *eth = data; + __u32 eth_proto; + __u32 nh_off; + + nh_off = sizeof(struct eth_hdr); + if (data + nh_off > data_end) + return TC_ACT_SHOT; + eth_proto = eth->eth_proto; + if (eth_proto == bpf_htons(ETH_P_IP)) + return process_packet(data, nh_off, data_end, false, ctx); + else if (eth_proto == bpf_htons(ETH_P_IPV6)) + return process_packet(data, nh_off, data_end, true, ctx); + else + return TC_ACT_SHOT; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 00b0aff56e2e7256de150815e0da2eb0fb20d7b6..8c10c9180c1a6535f18caafcdf7195f69d04ecd2 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -22,7 +22,7 @@ #include "bpf_util.h" #define LOCAL_FREE_TARGET (128) -#define PERCPU_FREE_TARGET (16) +#define PERCPU_FREE_TARGET (4) static int nr_cpus; @@ -191,12 +191,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) int next_cpu = 0; if (map_flags & BPF_F_NO_COMMON_LRU) - /* Ther percpu lru list (i.e each cpu has its own LRU - * list) does not have a local free list. Hence, - * it will only free old nodes till there is no free - * from the LRU list. Hence, this test does not apply - * to BPF_F_NO_COMMON_LRU - */ + /* This test is only applicable to common LRU list */ return; printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, @@ -227,7 +222,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } /* Insert 1+tgt_free to 2*tgt_free @@ -273,12 +268,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) int next_cpu = 0; if (map_flags & BPF_F_NO_COMMON_LRU) - /* Ther percpu lru list (i.e each cpu has its own LRU - * list) does not have a local free list. Hence, - * it will only free old nodes till there is no free - * from the LRU list. Hence, this test does not apply - * to BPF_F_NO_COMMON_LRU - */ + /* This test is only applicable to common LRU list */ return; printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, @@ -290,11 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(batch_size * 2 == tgt_free); map_size = tgt_free + batch_size; - if (map_flags & BPF_F_NO_COMMON_LRU) - lru_map_fd = create_map(map_type, map_flags, - map_size * nr_cpus); - else - lru_map_fd = create_map(map_type, map_flags, map_size); + lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); @@ -341,7 +327,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(value[0] == 4321); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } value[0] = 1234; @@ -361,7 +347,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -387,6 +373,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) unsigned int map_size; int next_cpu = 0; + if (map_flags & BPF_F_NO_COMMON_LRU) + /* This test is only applicable to common LRU list */ + return; + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, map_flags); @@ -396,11 +386,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(batch_size * 2 == tgt_free); map_size = tgt_free * 2; - if (map_flags & BPF_F_NO_COMMON_LRU) - lru_map_fd = create_map(map_type, map_flags, - map_size * nr_cpus); - else - lru_map_fd = create_map(map_type, map_flags, map_size); + lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); @@ -419,7 +405,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } /* Add 1+2*tgt_free to tgt_free*5/2 @@ -431,7 +417,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -491,7 +477,7 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -566,6 +552,65 @@ static void test_lru_sanity5(int map_type, int map_flags) printf("Pass\n"); } +/* Test list rotation for BPF_F_NO_COMMON_LRU map */ +static void test_lru_sanity6(int map_type, int map_flags, int tgt_free) +{ + int lru_map_fd, expected_map_fd; + unsigned long long key, value[nr_cpus]; + unsigned int map_size = tgt_free * 2; + int next_cpu = 0; + + if (!(map_flags & BPF_F_NO_COMMON_LRU)) + return; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, &next_cpu) != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); + assert(expected_map_fd != -1); + + lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus); + assert(lru_map_fd != -1); + + value[0] = 1234; + + for (key = 1; key <= tgt_free; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + for (; key <= tgt_free * 2; key++) { + unsigned long long stable_key; + + /* Make ref bit sticky for key: [1, tgt_free] */ + for (stable_key = 1; stable_key <= tgt_free; stable_key++) { + /* Mark the ref bit */ + assert(!bpf_map_lookup_elem(lru_map_fd, &stable_key, + value)); + } + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + } + + for (; key <= tgt_free * 3; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; @@ -593,6 +638,7 @@ int main(int argc, char **argv) test_lru_sanity3(map_types[t], map_flags[f], tgt_free); test_lru_sanity4(map_types[t], map_flags[f], tgt_free); test_lru_sanity5(map_types[t], map_flags[f]); + test_lru_sanity6(map_types[t], map_flags[f], tgt_free); printf("\n"); } diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 20f1871874df54a7d567797c753e379a760e492c..93314524de0d033ded2a00d71bbf500a0afbd60e 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -28,7 +28,7 @@ static int map_flags; static void test_hashmap(int task, void *data) { - long long key, next_key, value; + long long key, next_key, first_key, value; int fd; fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), @@ -89,10 +89,13 @@ static void test_hashmap(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 && + (first_key == 1 || first_key == 2)); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && - (next_key == 1 || next_key == 2)); + (next_key == first_key)); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && - (next_key == 1 || next_key == 2)); + (next_key == 1 || next_key == 2) && + (next_key != first_key)); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && errno == ENOENT); @@ -105,6 +108,8 @@ static void test_hashmap(int task, void *data) key = 0; /* Check that map is empty. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 && + errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); @@ -132,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data) static void test_hashmap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); - long long value[nr_cpus]; - long long key, next_key; + BPF_DECLARE_PERCPU(long, value); + long long key, next_key, first_key; int expected_key_mask = 0; int fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), - sizeof(value[0]), 2, map_flags); + sizeof(bpf_percpu(value, 0)), 2, map_flags); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - value[i] = i + 100; + bpf_percpu(value, i) = i + 100; key = 1; /* Insert key=1 element. */ @@ -165,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data) /* Check that key=1 can be found. Value could be 0 if the lookup * was run from a different CPU. */ - value[0] = 1; - assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); + bpf_percpu(value, 0) = 1; + assert(bpf_map_lookup_elem(fd, &key, value) == 0 && + bpf_percpu(value, 0) == 100); key = 2; /* Check that key=2 is not found. */ @@ -193,14 +199,20 @@ static void test_hashmap_percpu(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 && + ((expected_key_mask & first_key) == first_key)); while (!bpf_map_get_next_key(fd, &key, &next_key)) { + if (first_key) { + assert(next_key == first_key); + first_key = 0; + } assert((expected_key_mask & next_key) == next_key); expected_key_mask &= ~next_key; assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); for (i = 0; i < nr_cpus; i++) - assert(value[i] == i + 100); + assert(bpf_percpu(value, i) == i + 100); key = next_key; } @@ -219,6 +231,8 @@ static void test_hashmap_percpu(int task, void *data) key = 0; /* Check that map is empty. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 && + errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); @@ -264,6 +278,8 @@ static void test_arraymap(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 && + next_key == 0); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && @@ -281,34 +297,36 @@ static void test_arraymap(int task, void *data) static void test_arraymap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); int key, next_key, fd, i; - long long values[nr_cpus]; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), 2, 0); + sizeof(bpf_percpu(values, 0)), 2, 0); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - values[i] = i + 100; + bpf_percpu(values, i) = i + 100; key = 1; /* Insert key=1 element. */ assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); - values[0] = 0; + bpf_percpu(values, 0) = 0; assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that key=1 can be found. */ - assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && + bpf_percpu(values, 0) == 100); key = 0; /* Check that key=0 is also found and zero initialized. */ assert(bpf_map_lookup_elem(fd, &key, values) == 0 && - values[0] == 0 && values[nr_cpus - 1] == 0); + bpf_percpu(values, 0) == 0 && + bpf_percpu(values, nr_cpus - 1) == 0); /* Check that key=2 cannot be inserted due to max_entries limit. */ key = 2; @@ -319,6 +337,8 @@ static void test_arraymap_percpu(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 && + next_key == 0); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && @@ -336,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data) static void test_arraymap_percpu_many_keys(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); /* nr_keys is not too large otherwise the test stresses percpu * allocator more than anything else */ unsigned int nr_keys = 2000; - long long values[nr_cpus]; int key, fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), nr_keys, 0); + sizeof(bpf_percpu(values, 0)), nr_keys, 0); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", strerror(errno)); @@ -352,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void) } for (i = 0; i < nr_cpus; i++) - values[i] = i + 10; + bpf_percpu(values, i) = i + 10; for (key = 0; key < nr_keys; key++) assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); for (key = 0; key < nr_keys; key++) { for (i = 0; i < nr_cpus; i++) - values[i] = 0; + bpf_percpu(values, i) = 0; assert(bpf_map_lookup_elem(fd, &key, values) == 0); for (i = 0; i < nr_cpus; i++) - assert(values[i] == i + 10); + assert(bpf_percpu(values, i) == i + 10); } close(fd); @@ -400,6 +420,8 @@ static void test_map_large(void) errno == E2BIG); /* Iterate through all elements. */ + assert(bpf_map_get_next_key(fd, NULL, &key) == 0); + key.c = -1; for (i = 0; i < MAP_SIZE; i++) assert(bpf_map_get_next_key(fd, &key, &key) == 0); assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); @@ -499,6 +521,7 @@ static void test_map_parallel(void) errno == EEXIST); /* Check that all elements were inserted. */ + assert(bpf_map_get_next_key(fd, NULL, &key) == 0); key = -1; for (i = 0; i < MAP_SIZE; i++) assert(bpf_map_get_next_key(fd, &key, &key) == 0); @@ -518,6 +541,7 @@ static void test_map_parallel(void) /* Nothing should be left. */ key = -1; + assert(bpf_map_get_next_key(fd, NULL, &key) == -1 && errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); } diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c new file mode 100644 index 0000000000000000000000000000000000000000..39387bb7e08ca79be1ebd94452c0c126abd2316c --- /dev/null +++ b/tools/testing/selftests/bpf/test_pkt_access.c @@ -0,0 +1,64 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#define barrier() __asm__ __volatile__("": : :"memory") +int _version SEC("version") = 1; + +SEC("test1") +int process(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ethhdr *eth = (struct ethhdr *)(data); + struct tcphdr *tcp = NULL; + __u8 proto = 255; + __u64 ihl_len; + + if (eth + 1 > data_end) + return TC_ACT_SHOT; + + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(eth + 1); + + if (iph + 1 > data_end) + return TC_ACT_SHOT; + ihl_len = iph->ihl * 4; + proto = iph->protocol; + tcp = (struct tcphdr *)((void *)(iph) + ihl_len); + } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1); + + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + ihl_len = sizeof(*ip6h); + proto = ip6h->nexthdr; + tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); + } + + if (tcp) { + if (((void *)(tcp) + 20) > data_end || proto != 6) + return TC_ACT_SHOT; + barrier(); /* to force ordering of checks */ + if (((void *)(tcp) + 18) > data_end) + return TC_ACT_SHOT; + if (tcp->urg_ptr == 123) + return TC_ACT_OK; + } + + return TC_ACT_UNSPEC; +} diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c new file mode 100644 index 0000000000000000000000000000000000000000..4ed049a0b14bcc3054b8da0132adbc06e820f590 --- /dev/null +++ b/tools/testing/selftests/bpf/test_progs.c @@ -0,0 +1,283 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include + +#include +typedef __u16 __sum16; +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include "test_iptunnel_common.h" +#include "bpf_util.h" +#include "bpf_endian.h" + +static int error_cnt, pass_cnt; + +#define MAGIC_BYTES 123 + +/* ipv4 test vector */ +static struct { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed pkt_v4 = { + .eth.h_proto = bpf_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = 6, + .iph.tot_len = bpf_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, +}; + +/* ipv6 test vector */ +static struct { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed pkt_v6 = { + .eth.h_proto = bpf_htons(ETH_P_IPV6), + .iph.nexthdr = 6, + .iph.payload_len = bpf_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, +}; + +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + error_cnt++; \ + printf("%s:FAIL:%s ", __func__, tag); \ + printf(format); \ + } else { \ + pass_cnt++; \ + printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ + } \ +}) + +static int bpf_prog_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_program *prog; + struct bpf_object *obj; + int err; + + obj = bpf_object__open(file); + if (IS_ERR(obj)) { + error_cnt++; + return -ENOENT; + } + + prog = bpf_program__next(NULL, obj); + if (!prog) { + bpf_object__close(obj); + error_cnt++; + return -ENOENT; + } + + bpf_program__set_type(prog, type); + err = bpf_object__load(obj); + if (err) { + bpf_object__close(obj); + error_cnt++; + return -EINVAL; + } + + *pobj = obj; + *prog_fd = bpf_program__fd(prog); + return 0; +} + +static int bpf_find_map(const char *test, struct bpf_object *obj, + const char *name) +{ + struct bpf_map *map; + + map = bpf_object__find_map_by_name(obj, name); + if (!map) { + printf("%s:FAIL:map '%s' not found\n", test, name); + error_cnt++; + return -1; + } + return bpf_map__fd(map); +} + +static void test_pkt_access(void) +{ + const char *file = "./test_pkt_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || errno || retval, "ipv4", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || errno || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + bpf_object__close(obj); +} + +static void test_xdp(void) +{ + struct vip key4 = {.protocol = 6, .family = AF_INET}; + struct vip key6 = {.protocol = 6, .family = AF_INET6}; + struct iptnl_info value4 = {.family = AF_INET}; + struct iptnl_info value6 = {.family = AF_INET6}; + const char *file = "./test_xdp.o"; + struct bpf_object *obj; + char buf[128]; + struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); + struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + __u32 duration, retval, size; + int err, prog_fd, map_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (err) + return; + + map_fd = bpf_find_map(__func__, obj, "vip2tnl"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key4, &value4, 0); + bpf_map_update_elem(map_fd, &key6, &value6, 0); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + + CHECK(err || errno || retval != XDP_TX || size != 74 || + iph->protocol != IPPROTO_IPIP, "ipv4", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != XDP_TX || size != 114 || + iph6->nexthdr != IPPROTO_IPV6, "ipv6", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); +out: + bpf_object__close(obj); +} + +#define MAGIC_VAL 0x1234 +#define NUM_ITER 100000 +#define VIP_NUM 5 + +static void test_l4lb(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + const char *file = "./test_l4lb.o"; + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + __u32 duration, retval, size; + int err, i, prog_fd, map_fd; + __u64 bytes = 0, pkts = 0; + struct bpf_object *obj; + char buf[128]; + u32 *magic = (u32 *)buf; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + map_fd = bpf_find_map(__func__, obj, "vip_map"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key, &value, 0); + + map_fd = bpf_find_map(__func__, obj, "ch_rings"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); + + map_fd = bpf_find_map(__func__, obj, "reals"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &real_num, &real_def, 0); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || + *magic != MAGIC_VAL, "ipv4", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || + *magic != MAGIC_VAL, "ipv6", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + map_fd = bpf_find_map(__func__, obj, "stats"); + if (map_fd < 0) + goto out; + bpf_map_lookup_elem(map_fd, &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { + error_cnt++; + printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); + } +out: + bpf_object__close(obj); +} + +int main(void) +{ + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; + + setrlimit(RLIMIT_MEMLOCK, &rinf); + + test_pkt_access(); + test_xdp(); + test_l4lb(); + + printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c848e90b64213128a248c9669a2a2796692c5776..3773562056da267aee91878ed8088b3c577a997e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -46,6 +46,7 @@ #define MAX_INSNS 512 #define MAX_FIXUPS 8 +#define MAX_NR_MAPS 4 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) @@ -55,6 +56,7 @@ struct bpf_test { int fixup_map1[MAX_FIXUPS]; int fixup_map2[MAX_FIXUPS]; int fixup_prog[MAX_FIXUPS]; + int fixup_map_in_map[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; enum { @@ -188,6 +190,86 @@ static struct bpf_test tests[] = { .errstr = "invalid bpf_ld_imm64 insn", .result = REJECT, }, + { + "test6 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test7 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test8 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "uses reserved fields", + .result = REJECT, + }, + { + "test9 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 1, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test10 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test11 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test12 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "not pointing to valid bpf_map", + .result = REJECT, + }, + { + "test13 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, { "no bpf_exit", .insns = { @@ -328,6 +410,30 @@ static struct bpf_test tests[] = { .errstr = "invalid read from stack", .result = REJECT, }, + { + "invalid fp arithmetic", + /* If this gets ever changed, make sure JITs can deal with it. */ + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer arithmetic", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, + { + "non-invalid fp arithmetic", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, { "invalid argument register", .insns = { @@ -770,6 +876,9 @@ static struct bpf_test tests[] = { BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, vlan_tci)), BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, napi_id)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), BPF_EXIT_INSN(), }, .result = ACCEPT, @@ -1795,6 +1904,20 @@ static struct bpf_test tests[] = { .result_unpriv = REJECT, .result = ACCEPT, }, + { + "unpriv: adding of fp", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "pointer arithmetic prohibited", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, { "unpriv: cmp of stack pointer", .insns = { @@ -1809,16 +1932,22 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { - "unpriv: obfuscate stack pointer", + "stack pointer arithmetic", .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R2 pointer arithmetic", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -2466,6 +2595,25 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, + { + "direct packet access: test16 (arith on data_end)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { @@ -4720,6 +4868,75 @@ static struct bpf_test tests[] = { .result = REJECT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map in map access", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .result = ACCEPT, + }, + { + "invalid inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 type=inv expected=map_ptr", + .errstr_unpriv = "R1 pointer arithmetic prohibited", + .result = REJECT, + }, + { + "forgot null checking on the inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 type=map_value_or_null expected=map_ptr", + .result = REJECT, } }; @@ -4757,42 +4974,73 @@ static int create_prog_array(void) return fd; } +static int create_map_in_map(void) +{ + int inner_map_fd, outer_map_fd; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), + sizeof(int), 1, 0); + if (inner_map_fd < 0) { + printf("Failed to create array '%s'!\n", strerror(errno)); + return inner_map_fd; + } + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, + sizeof(int), inner_map_fd, 1, 0); + if (outer_map_fd < 0) + printf("Failed to create array of maps '%s'!\n", + strerror(errno)); + + close(inner_map_fd); + + return outer_map_fd; +} + static char bpf_vlog[32768]; static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, - int *fd_f1, int *fd_f2, int *fd_f3) + int *map_fds) { int *fixup_map1 = test->fixup_map1; int *fixup_map2 = test->fixup_map2; int *fixup_prog = test->fixup_prog; + int *fixup_map_in_map = test->fixup_map_in_map; /* Allocating HTs with 1 elem is fine here, since we only test * for verifier and not do a runtime lookup, so the only thing * that really matters is value size in this case. */ if (*fixup_map1) { - *fd_f1 = create_map(sizeof(long long), 1); + map_fds[0] = create_map(sizeof(long long), 1); do { - prog[*fixup_map1].imm = *fd_f1; + prog[*fixup_map1].imm = map_fds[0]; fixup_map1++; } while (*fixup_map1); } if (*fixup_map2) { - *fd_f2 = create_map(sizeof(struct test_val), 1); + map_fds[1] = create_map(sizeof(struct test_val), 1); do { - prog[*fixup_map2].imm = *fd_f2; + prog[*fixup_map2].imm = map_fds[1]; fixup_map2++; } while (*fixup_map2); } if (*fixup_prog) { - *fd_f3 = create_prog_array(); + map_fds[2] = create_prog_array(); do { - prog[*fixup_prog].imm = *fd_f3; + prog[*fixup_prog].imm = map_fds[2]; fixup_prog++; } while (*fixup_prog); } + + if (*fixup_map_in_map) { + map_fds[3] = create_map_in_map(); + do { + prog[*fixup_map_in_map].imm = map_fds[3]; + fixup_map_in_map++; + } while (*fixup_map_in_map); + } } static void do_test_single(struct bpf_test *test, bool unpriv, @@ -4802,10 +5050,14 @@ static void do_test_single(struct bpf_test *test, bool unpriv, struct bpf_insn *prog = test->insns; int prog_len = probe_filter_length(prog); int prog_type = test->prog_type; - int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; + int map_fds[MAX_NR_MAPS]; const char *expected_err; + int i; + + for (i = 0; i < MAX_NR_MAPS; i++) + map_fds[i] = -1; - do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); + do_test_fixup(test, prog, map_fds); fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_len, "GPL", 0, bpf_vlog, @@ -4848,9 +5100,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, " (NOTE: reject due to unknown alignment)" : ""); close_fds: close(fd_prog); - close(fd_f1); - close(fd_f2); - close(fd_f3); + for (i = 0; i < MAX_NR_MAPS; i++) + close(map_fds[i]); sched_yield(); return; fail_log: diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/test_xdp.c new file mode 100644 index 0000000000000000000000000000000000000000..5e7df8bb5b5db914e098a73ab1275921cf68b094 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp.c @@ -0,0 +1,235 @@ +/* Copyright (c) 2016,2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" +#include "bpf_endian.h" +#include "test_iptunnel_common.h" + +int _version SEC("version") = 1; + +struct bpf_map_def SEC("maps") rxcnt = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 256, +}; + +struct bpf_map_def SEC("maps") vip2tnl = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct vip), + .value_size = sizeof(struct iptnl_info), + .max_entries = MAX_IPTNL_ENTRIES, +}; + +static __always_inline void count_tx(__u32 protocol) +{ + __u64 *rxcnt_count; + + rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); + if (rxcnt_count) + *rxcnt_count += 1; +} + +static __always_inline int get_dport(void *trans_data, void *data_end, + __u8 protocol) +{ + struct tcphdr *th; + struct udphdr *uh; + + switch (protocol) { + case IPPROTO_TCP: + th = (struct tcphdr *)trans_data; + if (th + 1 > data_end) + return -1; + return th->dest; + case IPPROTO_UDP: + uh = (struct udphdr *)trans_data; + if (uh + 1 > data_end) + return -1; + return uh->dest; + default: + return 0; + } +} + +static __always_inline void set_ethhdr(struct ethhdr *new_eth, + const struct ethhdr *old_eth, + const struct iptnl_info *tnl, + __be16 h_proto) +{ + memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); + memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); + new_eth->h_proto = h_proto; +} + +static __always_inline int handle_ipv4(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct iphdr *iph = data + sizeof(struct ethhdr); + __u16 *next_iph; + __u16 payload_len; + struct vip vip = {}; + int dport; + __u32 csum = 0; + int i; + + if (iph + 1 > data_end) + return XDP_DROP; + + dport = get_dport(iph + 1, data_end, iph->protocol); + if (dport == -1) + return XDP_DROP; + + vip.protocol = iph->protocol; + vip.family = AF_INET; + vip.daddr.v4 = iph->daddr; + vip.dport = dport; + payload_len = bpf_ntohs(iph->tot_len); + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v4-in-v4 */ + if (!tnl || tnl->family != AF_INET) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + iph = data + sizeof(*new_eth); + old_eth = data + sizeof(*iph); + + if (new_eth + 1 > data_end || + old_eth + 1 > data_end || + iph + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP)); + + iph->version = 4; + iph->ihl = sizeof(*iph) >> 2; + iph->frag_off = 0; + iph->protocol = IPPROTO_IPIP; + iph->check = 0; + iph->tos = 0; + iph->tot_len = bpf_htons(payload_len + sizeof(*iph)); + iph->daddr = tnl->daddr.v4; + iph->saddr = tnl->saddr.v4; + iph->ttl = 8; + + next_iph = (__u16 *)iph; +#pragma clang loop unroll(full) + for (i = 0; i < sizeof(*iph) >> 1; i++) + csum += *next_iph++; + + iph->check = ~((csum & 0xffff) + (csum >> 16)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +static __always_inline int handle_ipv6(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); + __u16 payload_len; + struct vip vip = {}; + int dport; + + if (ip6h + 1 > data_end) + return XDP_DROP; + + dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr); + if (dport == -1) + return XDP_DROP; + + vip.protocol = ip6h->nexthdr; + vip.family = AF_INET6; + memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); + vip.dport = dport; + payload_len = ip6h->payload_len; + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v6-in-v6 */ + if (!tnl || tnl->family != AF_INET6) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + ip6h = data + sizeof(*new_eth); + old_eth = data + sizeof(*ip6h); + + if (new_eth + 1 > data_end || old_eth + 1 > data_end || + ip6h + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6)); + + ip6h->version = 6; + ip6h->priority = 0; + memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); + ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h)); + ip6h->nexthdr = IPPROTO_IPV6; + ip6h->hop_limit = 8; + memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); + memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +SEC("xdp_tx_iptunnel") +int _xdp_tx_iptunnel(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct ethhdr *eth = data; + __u16 h_proto; + + if (eth + 1 > data_end) + return XDP_DROP; + + h_proto = eth->h_proto; + + if (h_proto == bpf_htons(ETH_P_IP)) + return handle_ipv4(xdp); + else if (h_proto == bpf_htons(ETH_P_IPV6)) + + return handle_ipv6(xdp); + else + return XDP_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index fbfe5d0d5c2e05028e6af86d8db76d95e41c4fc5..35cbb4cba4109551cc14beabcba39dff5d22e85c 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -5,7 +5,7 @@ CFLAGS += -I../../../../usr/include/ reuseport_bpf_numa: LDFLAGS += -lnuma -TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh +TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa diff --git a/tools/testing/selftests/net/netdevice.sh b/tools/testing/selftests/net/netdevice.sh new file mode 100755 index 0000000000000000000000000000000000000000..4e00568d70c2c398651675ad763f646453b45b3b --- /dev/null +++ b/tools/testing/selftests/net/netdevice.sh @@ -0,0 +1,200 @@ +#!/bin/sh +# +# This test is for checking network interface +# For the moment it tests only ethernet interface (but wifi could be easily added) +# +# We assume that all network driver are loaded +# if not they probably have failed earlier in the boot process and their logged error will be catched by another test +# + +# this function will try to up the interface +# if already up, nothing done +# arg1: network interface name +kci_net_start() +{ + netdev=$1 + + ip link show "$netdev" |grep -q UP + if [ $? -eq 0 ];then + echo "SKIP: $netdev: interface already up" + return 0 + fi + + ip link set "$netdev" up + if [ $? -ne 0 ];then + echo "FAIL: $netdev: Fail to up interface" + return 1 + else + echo "PASS: $netdev: set interface up" + NETDEV_STARTED=1 + fi + return 0 +} + +# this function will try to setup an IP and MAC address on a network interface +# Doing nothing if the interface was already up +# arg1: network interface name +kci_net_setup() +{ + netdev=$1 + + # do nothing if the interface was already up + if [ $NETDEV_STARTED -eq 0 ];then + return 0 + fi + + MACADDR='02:03:04:05:06:07' + ip link set dev $netdev address "$MACADDR" + if [ $? -ne 0 ];then + echo "FAIL: $netdev: Cannot set MAC address" + else + ip link show $netdev |grep -q "$MACADDR" + if [ $? -eq 0 ];then + echo "PASS: $netdev: set MAC address" + else + echo "FAIL: $netdev: Cannot set MAC address" + fi + fi + + #check that the interface did not already have an IP + ip address show "$netdev" |grep '^[[:space:]]*inet' + if [ $? -eq 0 ];then + echo "SKIP: $netdev: already have an IP" + return 0 + fi + + # TODO what ipaddr to set ? DHCP ? + echo "SKIP: $netdev: set IP address" + return 0 +} + +# test an ethtool command +# arg1: return code for not supported (see ethtool code source) +# arg2: summary of the command +# arg3: command to execute +kci_netdev_ethtool_test() +{ + if [ $# -le 2 ];then + echo "SKIP: $netdev: ethtool: invalid number of arguments" + return 1 + fi + $3 >/dev/null + ret=$? + if [ $ret -ne 0 ];then + if [ $ret -eq "$1" ];then + echo "SKIP: $netdev: ethtool $2 not supported" + else + echo "FAIL: $netdev: ethtool $2" + return 1 + fi + else + echo "PASS: $netdev: ethtool $2" + fi + return 0 +} + +# test ethtool commands +# arg1: network interface name +kci_netdev_ethtool() +{ + netdev=$1 + + #check presence of ethtool + ethtool --version 2>/dev/null >/dev/null + if [ $? -ne 0 ];then + echo "SKIP: ethtool not present" + return 1 + fi + + TMP_ETHTOOL_FEATURES="$(mktemp)" + if [ ! -e "$TMP_ETHTOOL_FEATURES" ];then + echo "SKIP: Cannot create a tmp file" + return 1 + fi + + ethtool -k "$netdev" > "$TMP_ETHTOOL_FEATURES" + if [ $? -ne 0 ];then + echo "FAIL: $netdev: ethtool list features" + rm "$TMP_ETHTOOL_FEATURES" + return 1 + fi + echo "PASS: $netdev: ethtool list features" + #TODO for each non fixed features, try to turn them on/off + rm "$TMP_ETHTOOL_FEATURES" + + kci_netdev_ethtool_test 74 'dump' "ethtool -d $netdev" + kci_netdev_ethtool_test 94 'stats' "ethtool -S $netdev" + return 0 +} + +# stop a netdev +# arg1: network interface name +kci_netdev_stop() +{ + netdev=$1 + + if [ $NETDEV_STARTED -eq 0 ];then + echo "SKIP: $netdev: interface kept up" + return 0 + fi + + ip link set "$netdev" down + if [ $? -ne 0 ];then + echo "FAIL: $netdev: stop interface" + return 1 + fi + echo "PASS: $netdev: stop interface" + return 0 +} + +# run all test on a netdev +# arg1: network interface name +kci_test_netdev() +{ + NETDEV_STARTED=0 + IFACE_TO_UPDOWN="$1" + IFACE_TO_TEST="$1" + #check for VLAN interface + MASTER_IFACE="$(echo $1 | cut -d@ -f2)" + if [ ! -z "$MASTER_IFACE" ];then + IFACE_TO_UPDOWN="$MASTER_IFACE" + IFACE_TO_TEST="$(echo $1 | cut -d@ -f1)" + fi + + NETDEV_STARTED=0 + kci_net_start "$IFACE_TO_UPDOWN" + + kci_net_setup "$IFACE_TO_TEST" + + kci_netdev_ethtool "$IFACE_TO_TEST" + + kci_netdev_stop "$IFACE_TO_UPDOWN" + return 0 +} + +#check for needed privileges +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +ip -Version 2>/dev/null >/dev/null +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without the ip tool" + exit 0 +fi + +TMP_LIST_NETDEV="$(mktemp)" +if [ ! -e "$TMP_LIST_NETDEV" ];then + echo "FAIL: Cannot create a tmp file" + exit 1 +fi + +ip link show |grep '^[0-9]' | grep -oE '[[:space:]].*eth[0-9]*:|[[:space:]].*enp[0-9]s[0-9]:' | cut -d\ -f2 | cut -d: -f1> "$TMP_LIST_NETDEV" +while read netdev +do + kci_test_netdev "$netdev" +done < "$TMP_LIST_NETDEV" + +rm "$TMP_LIST_NETDEV" +exit 0 diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index e62bb354820cacdc653a19db385e3bd497931d0b..989f917068d1ccfa1dd67ed8d2353b4b13904d2b 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -71,7 +71,7 @@ /* Open a socket in a given fanout mode. * @return -1 if mode is bad, a valid socket otherwise */ -static int sock_fanout_open(uint16_t typeflags, int num_packets) +static int sock_fanout_open(uint16_t typeflags, uint16_t group_id) { int fd, val; @@ -81,8 +81,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets) exit(1); } - /* fanout group ID is always 0: tests whether old groups are deleted */ - val = ((int) typeflags) << 16; + val = (((int) typeflags) << 16) | group_id; if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val))) { if (close(fd)) { perror("close packet"); @@ -113,6 +112,20 @@ static void sock_fanout_set_cbpf(int fd) } } +static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id) +{ + int sockopt; + socklen_t sockopt_len = sizeof(sockopt); + + if (getsockopt(fd, SOL_PACKET, PACKET_FANOUT, + &sockopt, &sockopt_len)) { + perror("failed to getsockopt"); + exit(1); + } + *typeflags = sockopt >> 16; + *group_id = sockopt & 0xfffff; +} + static void sock_fanout_set_ebpf(int fd) { const int len_off = __builtin_offsetof(struct __sk_buff, len); @@ -241,26 +254,26 @@ static void test_control_group(void) fprintf(stderr, "test: control multiple sockets\n"); - fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 0); if (fds[0] == -1) { fprintf(stderr, "ERROR: failed to open HASH socket\n"); exit(1); } if (sock_fanout_open(PACKET_FANOUT_HASH | - PACKET_FANOUT_FLAG_DEFRAG, 10) != -1) { + PACKET_FANOUT_FLAG_DEFRAG, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong flag defrag\n"); exit(1); } if (sock_fanout_open(PACKET_FANOUT_HASH | - PACKET_FANOUT_FLAG_ROLLOVER, 10) != -1) { + PACKET_FANOUT_FLAG_ROLLOVER, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong flag ro\n"); exit(1); } - if (sock_fanout_open(PACKET_FANOUT_CPU, 10) != -1) { + if (sock_fanout_open(PACKET_FANOUT_CPU, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong mode\n"); exit(1); } - fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 0); if (fds[1] == -1) { fprintf(stderr, "ERROR: failed to join group\n"); exit(1); @@ -271,6 +284,61 @@ static void test_control_group(void) } } +/* Test creating a unique fanout group ids */ +static void test_unique_fanout_group_ids(void) +{ + int fds[3]; + uint16_t typeflags, first_group_id, second_group_id; + + fprintf(stderr, "test: unique ids\n"); + + fds[0] = sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_UNIQUEID, 0); + if (fds[0] == -1) { + fprintf(stderr, "ERROR: failed to create a unique id group.\n"); + exit(1); + } + + sock_fanout_getopts(fds[0], &typeflags, &first_group_id); + if (typeflags != PACKET_FANOUT_HASH) { + fprintf(stderr, "ERROR: unexpected typeflags %x\n", typeflags); + exit(1); + } + + if (sock_fanout_open(PACKET_FANOUT_CPU, first_group_id) != -1) { + fprintf(stderr, "ERROR: joined group with wrong type.\n"); + exit(1); + } + + fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, first_group_id); + if (fds[1] == -1) { + fprintf(stderr, + "ERROR: failed to join previously created group.\n"); + exit(1); + } + + fds[2] = sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_UNIQUEID, 0); + if (fds[2] == -1) { + fprintf(stderr, + "ERROR: failed to create a second unique id group.\n"); + exit(1); + } + + sock_fanout_getopts(fds[2], &typeflags, &second_group_id); + if (sock_fanout_open(PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_UNIQUEID, + second_group_id) != -1) { + fprintf(stderr, + "ERROR: specified a group id when requesting unique id\n"); + exit(1); + } + + if (close(fds[0]) || close(fds[1]) || close(fds[2])) { + fprintf(stderr, "ERROR: closing sockets\n"); + exit(1); + } +} + static int test_datapath(uint16_t typeflags, int port_off, const int expect1[], const int expect2[]) { @@ -281,8 +349,8 @@ static int test_datapath(uint16_t typeflags, int port_off, fprintf(stderr, "test: datapath 0x%hx\n", typeflags); - fds[0] = sock_fanout_open(typeflags, 20); - fds[1] = sock_fanout_open(typeflags, 20); + fds[0] = sock_fanout_open(typeflags, 0); + fds[1] = sock_fanout_open(typeflags, 0); if (fds[0] == -1 || fds[1] == -1) { fprintf(stderr, "ERROR: failed open\n"); exit(1); @@ -349,10 +417,12 @@ int main(int argc, char **argv) const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } }; const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } }; const int expect_bpf[2][2] = { { 15, 5 }, { 15, 20 } }; + const int expect_uniqueid[2][2] = { { 20, 20}, { 20, 20 } }; int port_off = 2, tries = 5, ret; test_control_single(); test_control_group(); + test_unique_fanout_group_ids(); /* find a set of ports that do not collide onto the same socket */ ret = test_datapath(PACKET_FANOUT_HASH, port_off, @@ -383,6 +453,9 @@ int main(int argc, char **argv) ret |= test_datapath(PACKET_FANOUT_CPU, port_off, expect_cpu1[0], expect_cpu1[1]); + ret |= test_datapath(PACKET_FANOUT_FLAG_UNIQUEID, port_off, + expect_uniqueid[0], expect_uniqueid[1]); + if (ret) return 1;