diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt index 03a7cee6ac73a4f2dd48248196994538d81f6a4e..c15e75386a0523d5398290aa9a63cc1d9f40f45e 100644 --- a/Documentation/cpu-freq/index.txt +++ b/Documentation/cpu-freq/index.txt @@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats. index.txt - File index, Mailing list and Links (this document) -intel-pstate.txt - Intel pstate cpufreq driver specific file. - pcc-cpufreq.txt - PCC cpufreq driver specific file. diff --git a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt index a135504c7d57148b938285919130d8dfc8fa18d3..cac24ee10b72ebc3765d29708fa27c5ae6848950 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt +++ b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt @@ -32,7 +32,7 @@ Example: compatible = "st,stm32h743-rcc", "st,stm32-rcc"; reg = <0x58024400 0x400>; #reset-cells = <1>; - #clock-cells = <2>; + #clock-cells = <1>; clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>; st,syscfg = <&pwrcfg>; diff --git a/Documentation/devicetree/bindings/i2c/i2c-altera.txt b/Documentation/devicetree/bindings/i2c/i2c-altera.txt new file mode 100644 index 0000000000000000000000000000000000000000..767664f448ece3315dc376f33ad0fdaef5ecb23d --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-altera.txt @@ -0,0 +1,39 @@ +* Altera I2C Controller +* This is Altera's synthesizable logic block I2C Controller for use +* in Altera's FPGAs. + +Required properties : + - compatible : should be "altr,softip-i2c-v1.0" + - reg : Offset and length of the register set for the device + - interrupts : where IRQ is the interrupt number. + - clocks : phandle to input clock. + - #address-cells = <1>; + - #size-cells = <0>; + +Recommended properties : + - clock-frequency : desired I2C bus clock frequency in Hz. + +Optional properties : + - fifo-size : Size of the RX and TX FIFOs in bytes. + - Child nodes conforming to i2c bus binding + +Example : + + i2c@100080000 { + compatible = "altr,softip-i2c-v1.0"; + reg = <0x00000001 0x00080000 0x00000040>; + interrupt-parent = <&intc>; + interrupts = <0 43 4>; + clocks = <&clk_0>; + clock-frequency = <100000>; + #address-cells = <1>; + #size-cells = <0>; + fifo-size = <4>; + + eeprom@51 { + compatible = "atmel,24c32"; + reg = <0x51>; + pagesize = <32>; + }; + }; + diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt index 78eaf7b718eddc764385357449cffa835c3c0d97..3b54899666342b8e5a114bc0842e193871139d5e 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt @@ -1,7 +1,9 @@ * I2C controller embedded in STMicroelectronics STM32 I2C platform Required properties : -- compatible : Must be "st,stm32f4-i2c" +- compatible : Must be one of the following + - "st,stm32f4-i2c" + - "st,stm32f7-i2c" - reg : Offset and length of the register set for the device - interrupts : Must contain the interrupt id for I2C event and then the interrupt id for I2C error. @@ -14,8 +16,16 @@ Required properties : Optional properties : - clock-frequency : Desired I2C bus clock frequency in Hz. If not specified, - the default 100 kHz frequency will be used. As only Normal and Fast modes - are supported, possible values are 100000 and 400000. + the default 100 kHz frequency will be used. + For STM32F4 SoC Standard-mode and Fast-mode are supported, possible values are + 100000 and 400000. + For STM32F7 SoC, Standard-mode, Fast-mode and Fast-mode Plus are supported, + possible values are 100000, 400000 and 1000000. +- i2c-scl-rising-time-ns : Only for STM32F7, I2C SCL Rising time for the board + (default: 25) +- i2c-scl-falling-time-ns : Only for STM32F7, I2C SCL Falling time for the board + (default: 10) + I2C Timings are derived from these 2 values Example : @@ -31,3 +41,16 @@ Example : pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; pinctrl-names = "default"; }; + + i2c@40005400 { + compatible = "st,stm32f7-i2c"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x40005400 0x400>; + interrupts = <31>, + <32>; + resets = <&rcc STM32F7_APB1_RESET(I2C1)>; + clocks = <&rcc 1 CLK_I2C1>; + pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>; + pinctrl-names = "default"; + }; diff --git a/Documentation/devicetree/bindings/input/pwm-vibrator.txt b/Documentation/devicetree/bindings/input/pwm-vibrator.txt new file mode 100644 index 0000000000000000000000000000000000000000..09145d18491d866f2642fed6e1ce981961a41ac9 --- /dev/null +++ b/Documentation/devicetree/bindings/input/pwm-vibrator.txt @@ -0,0 +1,66 @@ +* PWM vibrator device tree bindings + +Registers a PWM device as vibrator. It is expected, that the vibrator's +strength increases based on the duty cycle of the enable PWM channel +(100% duty cycle meaning strongest vibration, 0% meaning no vibration). + +The binding supports an optional direction PWM channel, that can be +driven at fixed duty cycle. If available this is can be used to increase +the vibration effect of some devices. + +Required properties: +- compatible: should contain "pwm-vibrator" +- pwm-names: Should contain "enable" and optionally "direction" +- pwms: Should contain a PWM handle for each entry in pwm-names + +Optional properties: +- vcc-supply: Phandle for the regulator supplying power +- direction-duty-cycle-ns: Duty cycle of the direction PWM channel in + nanoseconds, defaults to 50% of the channel's + period. + +Example from Motorola Droid 4: + +&omap4_pmx_core { + vibrator_direction_pin: pinmux_vibrator_direction_pin { + pinctrl-single,pins = < + OMAP4_IOPAD(0x1ce, PIN_OUTPUT | MUX_MODE1) /* dmtimer8_pwm_evt (gpio_27) */ + >; + }; + + vibrator_enable_pin: pinmux_vibrator_enable_pin { + pinctrl-single,pins = < + OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1) /* dmtimer9_pwm_evt (gpio_28) */ + >; + }; +}; + +/ { + pwm8: dmtimer-pwm { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_direction_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer8>; + ti,clock-source = <0x01>; + }; + + pwm9: dmtimer-pwm { + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_enable_pin>; + + compatible = "ti,omap-dmtimer-pwm"; + #pwm-cells = <3>; + ti,timers = <&timer9>; + ti,clock-source = <0x01>; + }; + + vibrator { + compatible = "pwm-vibrator"; + pwms = <&pwm8 0 1000000000 0>, + <&pwm9 0 1000000000 0>; + pwm-names = "enable", "direction"; + direction-duty-cycle-ns = <1000000000>; + }; +}; diff --git a/Documentation/devicetree/bindings/leds/ams,as3645a.txt b/Documentation/devicetree/bindings/leds/ams,as3645a.txt index 12c5ef26ec73924566361aa2ad96710d1e12b210..fdc40e354a64dd582d86841afac520a73d6fa5f0 100644 --- a/Documentation/devicetree/bindings/leds/ams,as3645a.txt +++ b/Documentation/devicetree/bindings/leds/ams,as3645a.txt @@ -15,11 +15,14 @@ Required properties compatible : Must be "ams,as3645a". reg : The I2C address of the device. Typically 0x30. +#address-cells : 1 +#size-cells : 0 -Required properties of the "flash" child node -============================================= +Required properties of the flash child node (0) +=============================================== +reg: 0 flash-timeout-us: Flash timeout in microseconds. The value must be in the range [100000, 850000] and divisible by 50000. flash-max-microamp: Maximum flash current in microamperes. Has to be @@ -33,20 +36,21 @@ ams,input-max-microamp: Maximum flash controller input current. The and divisible by 50000. -Optional properties of the "flash" child node -============================================= +Optional properties of the flash child node +=========================================== label : The label of the flash LED. -Required properties of the "indicator" child node -================================================= +Required properties of the indicator child node (1) +=================================================== +reg: 1 led-max-microamp: Maximum indicator current. The allowed values are 2500, 5000, 7500 and 10000. -Optional properties of the "indicator" child node -================================================= +Optional properties of the indicator child node +=============================================== label : The label of the indicator LED. @@ -55,16 +59,20 @@ Example ======= as3645a@30 { + #address-cells: 1 + #size-cells: 0 reg = <0x30>; compatible = "ams,as3645a"; - flash { + flash@0 { + reg = <0x0>; flash-timeout-us = <150000>; flash-max-microamp = <320000>; led-max-microamp = <60000>; ams,input-max-microamp = <1750000>; label = "as3645a:flash"; }; - indicator { + indicator@1 { + reg = <0x1>; led-max-microamp = <10000>; label = "as3645a:indicator"; }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt b/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a2df433833202fd80903720d9ca9e45dad306ba --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/fpi-bus.txt @@ -0,0 +1,31 @@ +Lantiq XWAY SoC FPI BUS binding +============================ + + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,xrx200-fpi" +- reg : The address and length of the XBAR + configuration register. + Address and length of the FPI bus itself. +- lantiq,rcu : A phandle to the RCU syscon +- lantiq,offset-endianness : Offset of the endianness configuration + register + +------------------------------------------------------------------------------- +Example for the FPI on the xrx200 SoCs: + fpi@10000000 { + compatible = "lantiq,xrx200-fpi"; + ranges = <0x0 0x10000000 0xf000000>; + reg = <0x1f400000 0x1000>, + <0x10000000 0xf000000>; + lantiq,rcu = <&rcu0>; + lantiq,offset-endianness = <0x4c>; + #address-cells = <1>; + #size-cells = <1>; + + gptu@e100a00 { + ...... + }; + }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0c19bd1ce6625e603a658aba8420e033cf60910 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt @@ -0,0 +1,36 @@ +Lantiq XWAY SoC GPHY binding +============================ + +This binding describes a software-defined ethernet PHY, provided by the RCU +module on newer Lantiq XWAY SoCs (xRX200 and newer). + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,xrx200a1x-gphy" + "lantiq,xrx200a2x-gphy" + "lantiq,xrx300-gphy" + "lantiq,xrx330-gphy" +- reg : Addrress of the GPHY FW load address register +- resets : Must reference the RCU GPHY reset bit +- reset-names : One entry, value must be "gphy" or optional "gphy2" +- clocks : A reference to the (PMU) GPHY clock gate + +Optional properties: +- lantiq,gphy-mode : GPHY_MODE_GE (default) or GPHY_MODE_FE as defined in + + + +------------------------------------------------------------------------------- +Example for the GPHys on the xRX200 SoCs: + +#include + gphy0: gphy@20 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x20 0x4>; + + resets = <&reset0 31 30>, <&reset1 7 7>; + reset-names = "gphy", "gphy2"; + clocks = <&pmu0 XRX200_PMU_GATE_GPHY>; + lantiq,gphy-mode = ; + }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a086f1e1cdd78809f87f145dba13adaf68849602 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt @@ -0,0 +1,89 @@ +Lantiq XWAY SoC RCU binding +=========================== + +This binding describes the RCU (reset controller unit) multifunction device, +where each sub-device has it's own set of registers. + +The RCU register range is used for multiple purposes. Mostly one device +uses one or multiple register exclusively, but for some registers some +bits are for one driver and some other bits are for a different driver. +With this patch all accesses to the RCU registers will go through +syscon. + + +------------------------------------------------------------------------------- +Required properties: +- compatible : The first and second values must be: + "lantiq,xrx200-rcu", "simple-mfd", "syscon" +- reg : The address and length of the system control registers + + +------------------------------------------------------------------------------- +Example of the RCU bindings on a xRX200 SoC: + rcu0: rcu@203000 { + compatible = "lantiq,xrx200-rcu", "simple-mfd", "syscon"; + reg = <0x203000 0x100>; + ranges = <0x0 0x203000 0x100>; + big-endian; + + gphy0: gphy@20 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x20 0x4>; + + resets = <&reset0 31 30>, <&reset1 7 7>; + reset-names = "gphy", "gphy2"; + lantiq,gphy-mode = ; + }; + + gphy1: gphy@68 { + compatible = "lantiq,xrx200a2x-gphy"; + reg = <0x68 0x4>; + + resets = <&reset0 29 28>, <&reset1 6 6>; + reset-names = "gphy", "gphy2"; + lantiq,gphy-mode = ; + }; + + reset0: reset-controller@10 { + compatible = "lantiq,xrx200-reset"; + reg = <0x10 4>, <0x14 4>; + + #reset-cells = <2>; + }; + + reset1: reset-controller@48 { + compatible = "lantiq,xrx200-reset"; + reg = <0x48 4>, <0x24 4>; + + #reset-cells = <2>; + }; + + usb_phy0: usb2-phy@18 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x18 4>, <0x38 4>; + status = "disabled"; + + resets = <&reset1 4 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; + + usb_phy1: usb2-phy@34 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x34 4>, <0x3C 4>; + status = "disabled"; + + resets = <&reset1 5 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; + + reboot@10 { + compatible = "syscon-reboot"; + reg = <0x10 4>; + + regmap = <&rcu0>; + offset = <0x10>; + mask = <0x40000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/mips/ni.txt b/Documentation/devicetree/bindings/mips/ni.txt new file mode 100644 index 0000000000000000000000000000000000000000..722bf2d62da9644f3595e8125d53a0c80c184d31 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/ni.txt @@ -0,0 +1,7 @@ +National Instruments MIPS platforms + +required root node properties: + - compatible: must be "ni,169445" + +CPU Nodes + - compatible: must be "mti,mips14KEc" diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt index b35a8d04f8b6e3c9fe62b7c460c2aea5217386ef..a16e8d7fe56c55eb8c9486f1e8825a6e18fc16ee 100644 --- a/Documentation/devicetree/bindings/mips/ralink.txt +++ b/Documentation/devicetree/bindings/mips/ralink.txt @@ -15,3 +15,4 @@ value must be one of the following values: ralink,rt5350-soc ralink,mt7620a-soc ralink,mt7620n-soc + ralink,mt7628a-soc diff --git a/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt b/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt new file mode 100644 index 0000000000000000000000000000000000000000..643948b6b576515ac83d4c39e0ff24087947e61b --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-lantiq-rcu-usb2.txt @@ -0,0 +1,40 @@ +Lantiq XWAY SoC RCU USB 1.1/2.0 PHY binding +=========================================== + +This binding describes the USB PHY hardware provided by the RCU module on the +Lantiq XWAY SoCs. + +This node has to be a sub node of the Lantiq RCU block. + +------------------------------------------------------------------------------- +Required properties (controller (parent) node): +- compatible : Should be one of + "lantiq,ase-usb2-phy" + "lantiq,danube-usb2-phy" + "lantiq,xrx100-usb2-phy" + "lantiq,xrx200-usb2-phy" + "lantiq,xrx300-usb2-phy" +- reg : Defines the following sets of registers in the parent + syscon device + - Offset of the USB PHY configuration register + - Offset of the USB Analog configuration + register (only for xrx200 and xrx200) +- clocks : References to the (PMU) "phy" clk gate. +- clock-names : Must be "phy" +- resets : References to the RCU USB configuration reset bits. +- reset-names : Must be one of the following: + "phy" (optional) + "ctrl" (shared) + +------------------------------------------------------------------------------- +Example for the USB PHYs on an xRX200 SoC: + usb_phy0: usb2-phy@18 { + compatible = "lantiq,xrx200-usb2-phy"; + reg = <0x18 4>, <0x38 4>; + + clocks = <&pmu PMU_GATE_USB0_PHY>; + clock-names = "phy"; + resets = <&reset1 4 4>, <&reset0 4 4>; + reset-names = "phy", "ctrl"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/reset/lantiq,reset.txt b/Documentation/devicetree/bindings/reset/lantiq,reset.txt new file mode 100644 index 0000000000000000000000000000000000000000..c6aef36b7d150e4adf7ad2c2eae4041b2eef4e9f --- /dev/null +++ b/Documentation/devicetree/bindings/reset/lantiq,reset.txt @@ -0,0 +1,30 @@ +Lantiq XWAY SoC RCU reset controller binding +============================================ + +This binding describes a reset-controller found on the RCU module on Lantiq +XWAY SoCs. + +This node has to be a sub node of the Lantiq RCU block. + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,danube-reset" + "lantiq,xrx200-reset" +- reg : Defines the following sets of registers in the parent + syscon device + - Offset of the reset set register + - Offset of the reset status register +- #reset-cells : Specifies the number of cells needed to encode the + reset line, should be 2. + The first cell takes the reset set bit and the + second cell takes the status bit. + +------------------------------------------------------------------------------- +Example for the reset-controllers on the xRX200 SoCs: + reset0: reset-controller@10 { + compatible = "lantiq,xrx200-reset"; + reg <0x10 0x04>, <0x14 0x04>; + + #reset-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt index 3eca6de6369d773ef70bba048345aaed1d2737df..a65d7b71e81a2a22bfc23ab79ede461a25d2de37 100644 --- a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt +++ b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt @@ -8,6 +8,12 @@ Required properties: the firmware event log - linux,sml-size : size of the memory allocated for the firmware event log +Optional properties: + +- powered-while-suspended: present when the TPM is left powered on between + suspend and resume (makes the suspend/resume + callbacks do nothing). + Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C) ---------------------------------------------------------- diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 69183f0fbc7873f13e9b4516a34e3dde41686ab7..1afd298eddd73147ebf6a1dcbc56bf404559cff1 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -3,8 +3,8 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order. This isn't an exhaustive list, but you should add new prefixes to it before using them to avoid name-space collisions. -abcn Abracon Corporation abilis Abilis Systems +abracon Abracon Corporation actions Actions Semiconductor Co., Ltd. active-semi Active-Semi International Inc ad Avionic Design GmbH @@ -361,6 +361,7 @@ variscite Variscite Ltd. via VIA Technologies, Inc. virtio Virtual I/O Device Specification, developed by the OASIS consortium vivante Vivante Corporation +vocore VoCore Studio voipac Voipac Technologies s.r.o. wd Western Digital Corp. wetek WeTek Electronics, limited. diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt index c5e74d7b4406127735d42e9221a8fb3e417e8717..c5077a1f5cb3e4753fbf74bb3c905e8e8fce2c81 100644 --- a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt @@ -8,9 +8,49 @@ Required properties: - reg: physical base address of the controller and length of memory mapped region +Optional properties: + + - aspeed,reset-type = "cpu|soc|system|none" + + Reset behavior - Whenever a timeout occurs the watchdog can be programmed + to generate one of three different, mutually exclusive, types of resets. + + Type "none" can be specified to indicate that no resets are to be done. + This is useful in situations where another watchdog engine on chip is + to perform the reset. + + If 'aspeed,reset-type=' is not specfied the default is to enable system + reset. + + Reset types: + + - cpu: Reset CPU on watchdog timeout + + - soc: Reset 'System on Chip' on watchdog timeout + + - system: Reset system on watchdog timeout + + - none: No reset is performed on timeout. Assumes another watchdog + engine is responsible for this. + + - aspeed,alt-boot: If property is present then boot from alternate block. + - aspeed,external-signal: If property is present then signal is sent to + external reset counter (only WDT1 and WDT2). If not + specified no external signal is sent. + - aspeed,ext-pulse-duration: External signal pulse duration in microseconds + +Optional properties for AST2500-compatible watchdogs: + - aspeed,ext-push-pull: If aspeed,external-signal is present, set the pin's + drive type to push-pull. The default is open-drain. + - aspeed,ext-active-high: If aspeed,external-signal is present and and the pin + is configured as push-pull, then set the pulse + polarity to active-high. The default is active-low. + Example: wdt1: watchdog@1e785000 { compatible = "aspeed,ast2400-wdt"; reg = <0x1e785000 0x1c>; + aspeed,reset-type = "system"; + aspeed,external-signal; }; diff --git a/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt b/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt new file mode 100644 index 0000000000000000000000000000000000000000..18d4d83027026dd166b762bc87188fa475a186ac --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/lantiq-wdt.txt @@ -0,0 +1,24 @@ +Lantiq WTD watchdog binding +============================ + +This describes the binding of the Lantiq watchdog driver. + +------------------------------------------------------------------------------- +Required properties: +- compatible : Should be one of + "lantiq,wdt" + "lantiq,xrx100-wdt" + "lantiq,xrx200-wdt", "lantiq,xrx100-wdt" + "lantiq,falcon-wdt" +- reg : Address of the watchdog block +- lantiq,rcu : A phandle to the RCU syscon (required for + "lantiq,falcon-wdt" and "lantiq,xrx100-wdt") + +------------------------------------------------------------------------------- +Example for the watchdog on the xRX200 SoCs: + watchdog@803f0 { + compatible = "lantiq,xrx200-wdt", "lantiq,xrx100-wdt"; + reg = <0x803f0 0x10>; + + lantiq,rcu = <&rcu0>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt index ae70185d96e645df9ad6a97b9d5e2faa45bd5743..8a6d84cb36c96521a74d3d8b12aae1f4fd1a97f6 100644 --- a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt @@ -2,7 +2,11 @@ Meson SoCs Watchdog timer Required properties: -- compatible : should be "amlogic,meson6-wdt" or "amlogic,meson8b-wdt" +- compatible : depending on the SoC this should be one of: + "amlogic,meson6-wdt" on Meson6 SoCs + "amlogic,meson8-wdt" and "amlogic,meson6-wdt" on Meson8 SoCs + "amlogic,meson8b-wdt" on Meson8b SoCs + "amlogic,meson8m2-wdt" and "amlogic,meson8b-wdt" on Meson8m2 SoCs - reg : Specifies base physical address and size of the registers. Example: diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt index 20ada673ab0c17a1647be7eeb80353aedea64ceb..235de0683bb6f7e3ae557175d57d40cdfa17218d 100644 --- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt @@ -6,6 +6,8 @@ Required properties: "mediatek,mt2701-wdt", "mediatek,mt6589-wdt": for MT2701 "mediatek,mt6589-wdt": for MT6589 "mediatek,mt6797-wdt", "mediatek,mt6589-wdt": for MT6797 + "mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622 + "mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623 - reg : Specifies base physical address and size of the registers. diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 9e306afbbd49e91b689f6e7fbf63a47d6167dfaf..bf6d1ca58af7d1989deda93d037b57a23ffd426d 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt @@ -6,6 +6,7 @@ Required properties: Examples with soctypes are: - "renesas,r8a7795-wdt" (R-Car H3) - "renesas,r8a7796-wdt" (R-Car M3-W) + - "renesas,r8a77995-wdt" (R-Car D3) - "renesas,r7s72100-wdt" (RZ/A1) When compatible with the generic version, nodes must list the SoC-specific diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index bedd32388dac51d8a603dd1389c3b4de24807d7a..a0dc2879a152c89dbb9d5f715ec07fbfe6a4cd8e 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst @@ -675,7 +675,7 @@ sub-domain of the parent domain. Support for power domains is provided through the :c:member:`pm_domain` field of |struct device|. This field is a pointer to an object of type -|struct dev_pm_domain|, defined in :file:`include/linux/pm.h``, providing a set +|struct dev_pm_domain|, defined in :file:`include/linux/pm.h`, providing a set of power management callbacks analogous to the subsystem-level and device driver callbacks that are executed for the given device during all power transitions, instead of the respective subsystem-level callbacks. Specifically, if a diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt index 4a9739abc860651825f62e6ffb71f9b592d57015..a38d3aa4d189960121ed9740fa0827773b574b91 100644 --- a/Documentation/fb/fbcon.txt +++ b/Documentation/fb/fbcon.txt @@ -148,6 +148,13 @@ C. Boot options Actually, the underlying fb driver is totally ignorant of console rotation. +5. fbcon=margin: + + This option specifies the color of the margins. The margins are the + leftover area at the right and the bottom of the screen that are not + used by text. By default, this area will be black. The 'color' value + is an integer number that depends on the framebuffer driver being used. + C. Attaching, Detaching and Unloading Before going on how to attach, detach and unload the framebuffer console, an diff --git a/Documentation/filesystems/cifs/AUTHORS b/Documentation/filesystems/cifs/AUTHORS index c98800df677fef0e18103415af6a0333e8c6a10a..9f4f87e1624036349533adf9534bfd3c4b08535d 100644 --- a/Documentation/filesystems/cifs/AUTHORS +++ b/Documentation/filesystems/cifs/AUTHORS @@ -41,6 +41,11 @@ Igor Mammedov (DFS support) Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) Scott Lovenberg Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features) +Aurelien Aptel (for DFS SMB3 work and some key bug fixes) +Ronnie Sahlberg (for SMB3 xattr work and bug fixes) +Shirish Pargaonkar (for many ACL patches over the years) +Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security) + Test case and Bug Report contributors ------------------------------------- diff --git a/Documentation/filesystems/cifs/README b/Documentation/filesystems/cifs/README index a54788405429dce03af2dd950f6dd817df0d9a56..a9da51553ba3e15049c17aecbad840eb607ef056 100644 --- a/Documentation/filesystems/cifs/README +++ b/Documentation/filesystems/cifs/README @@ -1,10 +1,14 @@ -The CIFS VFS support for Linux supports many advanced network filesystem -features such as hierarchical dfs like namespace, hardlinks, locking and more. +This module supports the SMB3 family of advanced network protocols (as well +as older dialects, originally called "CIFS" or SMB1). + +The CIFS VFS module for Linux supports many advanced network filesystem +features such as hierarchical DFS like namespace, hardlinks, locking and more. It was designed to comply with the SNIA CIFS Technical Reference (which supersedes the 1992 X/Open SMB Standard) as well as to perform best practice practical interoperability with Windows 2000, Windows XP, Samba and equivalent servers. This code was developed in participation with the Protocol Freedom -Information Foundation. +Information Foundation. CIFS and now SMB3 has now become a defacto +standard for interoperating between Macs and Windows and major NAS appliances. Please see http://protocolfreedom.org/ and @@ -15,30 +19,11 @@ for more details. For questions or bug reports please contact: sfrench@samba.org (sfrench@us.ibm.com) +See the project page at: https://wiki.samba.org/index.php/LinuxCIFS_utils + Build instructions: ================== -For Linux 2.4: -1) Get the kernel source (e.g.from http://www.kernel.org) -and download the cifs vfs source (see the project page -at http://us1.samba.org/samba/Linux_CIFS_client.html) -and change directory into the top of the kernel directory -then patch the kernel (e.g. "patch -p1 < cifs_24.patch") -to add the cifs vfs to your kernel configure options if -it has not already been added (e.g. current SuSE and UL -users do not need to apply the cifs_24.patch since the cifs vfs is -already in the kernel configure menu) and then -mkdir linux/fs/cifs and then copy the current cifs vfs files from -the cifs download to your kernel build directory e.g. - - cp /fs/cifs/* to /fs/cifs - -2) make menuconfig (or make xconfig) -3) select cifs from within the network filesystem choices -4) save and exit -5) make dep -6) make modules (or "make" if CIFS VFS not to be built as a module) - -For Linux 2.6: +For Linux: 1) Download the kernel (e.g. from http://www.kernel.org) and change directory into the top of the kernel directory tree (e.g. /usr/src/linux-2.5.73) @@ -61,16 +46,13 @@ would simply type "make install"). If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on the CIFS VFS web site) copy it to the same directory in which mount.smbfs and similar files reside (usually /sbin). Although the helper software is not -required, mount.cifs is recommended. Eventually the Samba 3.0 utility program -"net" may also be helpful since it may someday provide easier mount syntax for -users who are used to Windows e.g. - net use +required, mount.cifs is recommended. Most distros include a "cifs-utils" +package that includes this utility so it is recommended to install this. + Note that running the Winbind pam/nss module (logon service) on all of your Linux clients is useful in mapping Uids and Gids consistently across the domain to the proper network user. The mount.cifs mount helper can be -trivially built from Samba 3.0 or later source e.g. by executing: - - gcc samba/source/client/mount.cifs.c -o mount.cifs +found at cifs-utils.git on git.samba.org If cifs is built as a module, then the size and number of network buffers and maximum number of simultaneous requests to one server can be configured. @@ -79,6 +61,18 @@ Changing these from their defaults is not recommended. By executing modinfo on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made at module initialization time (by running insmod cifs.ko) can be seen. +Recommendations +=============== +To improve security the SMB2.1 dialect or later (usually will get SMB3) is now +the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0" +on mount (or vers=2.0 for Windows Vista). Note that the CIFS (vers=1.0) is +much older and less secure than the default dialect SMB3 which includes +many advanced security features such as downgrade attack detection +and encrypted shares and stronger signing and authentication algorithms. +There are additional mount options that may be helpful for SMB3 to get +improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1): + "mfsymlinks" and "cifsacl" and "idsfromsid" + Allowing User Mounts ==================== To permit users to mount and unmount over directories they own is possible @@ -98,9 +92,7 @@ and execution of suid programs on the remote target would be enabled by default. This can be changed, as with nfs and other filesystems, by simply specifying "nosuid" among the mount options. For user mounts though to be able to pass the suid flag to mount requires rebuilding -mount.cifs with the following flag: - - gcc samba/source/client/mount.cifs.c -DCIFS_ALLOW_USR_SUID -o mount.cifs +mount.cifs with the following flag: CIFS_ALLOW_USR_SUID There is a corresponding manual page for cifs mounting in the Samba 3.0 and later source tree in docs/manpages/mount.cifs.8 @@ -189,18 +181,18 @@ applications running on the same server as Samba. Use instructions: ================ Once the CIFS VFS support is built into the kernel or installed as a module -(cifs.o), you can use mount syntax like the following to access Samba or Windows -servers: +(cifs.ko), you can use mount syntax like the following to access Samba or +Mac or Windows servers: - mount -t cifs //9.53.216.11/e$ /mnt -o user=myname,pass=mypassword + mount -t cifs //9.53.216.11/e$ /mnt -o username=myname,password=mypassword Before -o the option -v may be specified to make the mount.cifs mount helper display the mount steps more verbosely. After -o the following commonly used cifs vfs specific options are supported: - user= - pass= + username= + password= domain= Other cifs mount options are described below. Use of TCP names (in addition to @@ -246,13 +238,16 @@ the Server's registry. Samba starting with version 3.10 will allow such filenames (ie those which contain valid Linux characters, which normally would be forbidden for Windows/CIFS semantics) as long as the server is configured for Unix Extensions (and the client has not disabled -/proc/fs/cifs/LinuxExtensionsEnabled). - +/proc/fs/cifs/LinuxExtensionsEnabled). In addition the mount option +"mapposix" can be used on CIFS (vers=1.0) to force the mapping of +illegal Windows/NTFS/SMB characters to a remap range (this mount parm +is the default for SMB3). This remap ("mapposix") range is also +compatible with Mac (and "Services for Mac" on some older Windows). CIFS VFS Mount Options ====================== A partial list of the supported mount options follows: - user The user name to use when trying to establish + username The user name to use when trying to establish the CIFS session. password The user password. If the mount helper is installed, the user will be prompted for password diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO index 066ffddc3964917d75287198ffd178daacc1cac6..396ecfd6ff4a0da40f099dfef57ef6111ae8da71 100644 --- a/Documentation/filesystems/cifs/TODO +++ b/Documentation/filesystems/cifs/TODO @@ -1,4 +1,4 @@ -Version 2.03 August 1, 2014 +Version 2.04 September 13, 2017 A Partial List of Missing Features ================================== @@ -8,73 +8,69 @@ for visible, important contributions to this module. Here is a partial list of the known problems and missing features: a) SMB3 (and SMB3.02) missing optional features: - - RDMA + - RDMA (started) - multichannel (started) - directory leases (improved metadata caching) - T10 copy offload (copy chunk is only mechanism supported) - - encrypted shares b) improved sparse file support c) Directory entry caching relies on a 1 second timer, rather than -using FindNotify or equivalent. - (started) +using Directory Leases d) quota support (needs minor kernel change since quota calls to make it to network filesystems or deviceless filesystems) -e) improve support for very old servers (OS/2 and Win9x for example) -Including support for changing the time remotely (utimes command). +e) Better optimize open to reduce redundant opens (using reference +counts more) and to improve use of compounding in SMB3 to reduce +number of roundtrips. -f) hook lower into the sockets api (as NFS/SunRPC does) to avoid the -extra copy in/out of the socket buffers in some cases. - -g) Better optimize open (and pathbased setfilesize) to reduce the -oplock breaks coming from windows srv. Piggyback identical file -opens on top of each other by incrementing reference count rather -than resending (helps reduce server resource utilization and avoid -spurious oplock breaks). - -h) Add support for storing symlink info to Windows servers -in the Extended Attribute format their SFU clients would recognize. - -i) Finish inotify support so kde and gnome file list windows +f) Finish inotify support so kde and gnome file list windows will autorefresh (partially complete by Asser). Needs minor kernel vfs change to support removing D_NOTIFY on a file. -j) Add GUI tool to configure /proc/fs/cifs settings and for display of +g) Add GUI tool to configure /proc/fs/cifs settings and for display of the CIFS statistics (started) -k) implement support for security and trusted categories of xattrs +h) implement support for security and trusted categories of xattrs (requires minor protocol extension) to enable better support for SELINUX -l) Implement O_DIRECT flag on open (already supported on mount) +i) Implement O_DIRECT flag on open (already supported on mount) -m) Create UID mapping facility so server UIDs can be mapped on a per +j) Create UID mapping facility so server UIDs can be mapped on a per mount or a per server basis to client UIDs or nobody if no mapping -exists. This is helpful when Unix extensions are negotiated to -allow better permission checking when UIDs differ on the server -and client. Add new protocol request to the CIFS protocol -standard for asking the server for the corresponding name of a -particular uid. +exists. Also better integration with winbind for resolving SID owners + +k) Add tools to take advantage of more smb3 specific ioctls and features + +l) encrypted file support + +m) improved stats gathering, tools (perhaps integration with nfsometer?) -n) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too) +n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed +file attribute via chflags) and improve user space tools for managing and +viewing them. -o) mount check for unmatched uids +o) mount helper GUI (to simplify the various configuration options on mount) -p) Add support for new vfs entry point for fallocate +p) autonegotiation of dialects (offering more than one dialect ie SMB3.02, +SMB3, SMB2.1 not just SMB3). -q) Add tools to take advantage of cifs/smb3 specific ioctls and features -such as "CopyChunk" (fast server side file copy) +q) Allow mount.cifs to be more verbose in reporting errors with dialect +or unsupported feature errors. -r) encrypted file support +r) updating cifs documentation, and user guid. -s) improved stats gathering, tools (perhaps integration with nfsometer?) +s) Addressing bugs found by running a broader set of xfstests in standard +file system xfstest suite. -t) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed -file attribute via chflags) +t) split cifs and smb3 support into separate modules so legacy (and less +secure) CIFS dialect can be disabled in environments that don't need it +and simplify the code. -u) mount helper GUI (to simplify the various configuration options on mount) +u) Finish up SMB3.1.1 dialect support +v) POSIX Extensions for SMB3.1.1 KNOWN BUGS ==================================== diff --git a/Documentation/filesystems/cifs/cifs.txt b/Documentation/filesystems/cifs/cifs.txt index 2fac91ac96cf7484aae9e148e5841755feba5f70..67756607246e767a9105bc06c00a7b97730abbb1 100644 --- a/Documentation/filesystems/cifs/cifs.txt +++ b/Documentation/filesystems/cifs/cifs.txt @@ -1,24 +1,28 @@ - This is the client VFS module for the Common Internet File System - (CIFS) protocol which is the successor to the Server Message Block + This is the client VFS module for the SMB3 NAS protocol as well + older dialects such as the Common Internet File System (CIFS) + protocol which was the successor to the Server Message Block (SMB) protocol, the native file sharing mechanism for most early PC operating systems. New and improved versions of CIFS are now called SMB2 and SMB3. These dialects are also supported by the CIFS VFS module. CIFS is fully supported by network - file servers such as Windows 2000, 2003, 2008 and 2012 + file servers such as Windows 2000, 2003, 2008, 2012 and 2016 as well by Samba (which provides excellent CIFS - server support for Linux and many other operating systems), so + server support for Linux and many other operating systems), Apple + systems, as well as most Network Attached Storage vendors, so this network filesystem client can mount to a wide variety of servers. The intent of this module is to provide the most advanced network - file system function for CIFS compliant servers, including better - POSIX compliance, secure per-user session establishment, high - performance safe distributed caching (oplock), optional packet + file system function for SMB3 compliant servers, including advanced + security features, excellent parallelized high performance i/o, better + POSIX compliance, secure per-user session establishment, encryption, + high performance safe distributed caching (leases/oplocks), optional packet signing, large files, Unicode support and other internationalization improvements. Since both Samba server and this filesystem client support - the CIFS Unix extensions, the combination can provide a reasonable - alternative to NFSv4 for fileserving in some Linux to Linux environments, - not just in Linux to Windows environments. + the CIFS Unix extensions (and in the future SMB3 POSIX extensions), + the combination can provide a reasonable alternative to other network and + cluster file systems for fileserving in some Linux to Linux environments, + not just in Linux to Windows (or Linux to Mac) environments. This filesystem has an mount utility (mount.cifs) that can be obtained from diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt index 1dfdec7909464650acd7786db4368e03223c0765..e2818b60a5c21739d65588cdcac350af0809e794 100644 --- a/Documentation/filesystems/orangefs.txt +++ b/Documentation/filesystems/orangefs.txt @@ -45,14 +45,11 @@ upstream version of the kernel client. BUILDING THE USERSPACE FILESYSTEM ON A SINGLE SERVER ==================================================== -When Orangefs is upstream, "--with-kernel" shouldn't be needed, but -until then the path to where the kernel with the Orangefs kernel client -patch was built is needed to ensure that pvfs2-client-core (the bridge -between kernel space and user space) will build properly. You can omit ---prefix if you don't care that things are sprinkled around in -/usr/local. +You can omit --prefix if you don't care that things are sprinkled around in +/usr/local. As of version 2.9.6, Orangefs uses Berkeley DB by default, we +will probably be changing the default to lmdb soon. -./configure --prefix=/opt/ofs --with-kernel=/path/to/orangefs/kernel +./configure --prefix=/opt/ofs --with-db-backend=lmdb make @@ -82,9 +79,6 @@ prove things are working with: /opt/osf/bin/pvfs2-ls /mymountpoint -You might not want to enforce selinux, it doesn't seem to matter by -linux 3.11... - If stuff seems to be working, turn on the client core: /opt/osf/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 5fb17f49f7a21907a2c446267cc952c749e42043..93e0a24045322b3e2417e024821520c9996eb6fc 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -228,7 +228,7 @@ anything from oops to silent memory corruption. --- [mandatory] - FS_NOMOUNT is gone. If you use it - just set MS_NOUSER in flags + FS_NOMOUNT is gone. If you use it - just set SB_NOUSER in flags (see rootfs for one kind of solution and bdev/socket/pipe for another). --- diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index e5e33bac20689fba3852ad91be990bddb33f3ff0..87814859cfc21c5c6a64c1dfb83a1d6cff596731 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -45,7 +45,7 @@ in many more places. There's xt_bpf for netfilter, cls_bpf in the kernel qdisc layer, SECCOMP-BPF (SECure COMPuting [1]), and lots of other places such as team driver, PTP code, etc where BPF is being used. - [1] Documentation/prctl/seccomp_filter.txt + [1] Documentation/userspace-api/seccomp_filter.rst Original BPF paper: @@ -337,7 +337,7 @@ Examples for low-level BPF: jeq #14, good /* __NR_rt_sigprocmask */ jeq #13, good /* __NR_rt_sigaction */ jeq #35, good /* __NR_nanosleep */ - bad: ret #0 /* SECCOMP_RET_KILL */ + bad: ret #0 /* SECCOMP_RET_KILL_THREAD */ good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */ The above example code can be placed into a file (here called "foo"), and diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index b3345d0fe0a67e477a6754848e7fc7be144322d5..77f4de59dc9ceb3cdb36692d1ea41e1d861468b0 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1680,6 +1680,9 @@ accept_dad - INTEGER 2: Enable DAD, and disable IPv6 operation if MAC-based duplicate link-local address has been found. + DAD operation and mode on a given interface will be selected according + to the maximum value of conf/{all,interface}/accept_dad. + force_tllao - BOOLEAN Enable sending the target link-layer address option even when responding to a unicast neighbor solicitation. @@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER optimistic_dad - BOOLEAN Whether to perform Optimistic Duplicate Address Detection (RFC 4429). - 0: disabled (default) - 1: enabled + 0: disabled (default) + 1: enabled + + Optimistic Duplicate Address Detection for the interface will be enabled + if at least one of conf/{all,interface}/optimistic_dad is set to 1, + it will be disabled otherwise. use_optimistic - BOOLEAN If enabled, do not classify optimistic addresses as deprecated during source address selection. Preferred addresses will still be chosen before optimistic addresses, subject to other ranking in the source address selection algorithm. - 0: disabled (default) - 1: enabled + 0: disabled (default) + 1: enabled + + This will be enabled if at least one of + conf/{all,interface}/use_optimistic is set to 1, disabled otherwise. stable_secret - IPv6 address This IPv6 address will be used as a secret to generate IPv6 diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 5e40e1f68873b0f2b594c22a6e69ddb218ce56ec..82236a17b5e65198be004d2cdd6a7c5bd8a9b7d4 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups with SR-IOV or soft switches, such as OVS, are possible. -                             User-space tools - -       user space                   | -      +-------------------------------------------------------------------+ -       kernel                       | Netlink -                                    | -                     +--------------+-------------------------------+ -                     |         Network stack                        | -                     |           (Linux)                            | -                     |                                              | -                     +----------------------------------------------+ + User-space tools + + user space | + +-------------------------------------------------------------------+ + kernel | Netlink + | + +--------------+-------------------------------+ + | Network stack | + | (Linux) | + | | + +----------------------------------------------+ sw1p2 sw1p4 sw1p6 -                      sw1p1  + sw1p3 +  sw1p5 +         eth1 -                        +    |    +    |    +    |            + -                        |    |    |    |    |    |            | -                     +--+----+----+----+-+--+----+---+  +-----+-----+ -                     |         Switch driver         |  |    mgmt   | -                     |        (this document)        |  |   driver  | -                     |                               |  |           | -                     +--------------+----------------+  +-----------+ -                                    | -       kernel                       | HW bus (eg PCI) -      +-------------------------------------------------------------------+ -       hardware                     | -                     +--------------+---+------------+ -                     |         Switch device (sw1)   | -                     |  +----+                       +--------+ -                     |  |    v offloaded data path   | mgmt port -                     |  |    |                       | -                     +--|----|----+----+----+----+---+ -                        |    |    |    |    |    | -                        +    +    +    +    +    + -                       p1   p2   p3   p4   p5   p6 - -                             front-panel ports + sw1p1 + sw1p3 + sw1p5 + eth1 + + | + | + | + + | | | | | | | + +--+----+----+----+----+----+---+ +-----+-----+ + | Switch driver | | mgmt | + | (this document) | | driver | + | | | | + +--------------+----------------+ +-----------+ + | + kernel | HW bus (eg PCI) + +-------------------------------------------------------------------+ + hardware | + +--------------+----------------+ + | Switch device (sw1) | + | +----+ +--------+ + | | v offloaded data path | mgmt port + | | | | + +--|----|----+----+----+----+---+ + | | | | | | + + + + + + + + p1 p2 p3 p4 p5 p6 + + front-panel ports Fig 1. diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index ce61d1fe08cacb5af99260241b058d139838029a..694968c7523cc28620c8ac51a28a33dc1b14336e 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -75,6 +75,7 @@ show up in /proc/sys/kernel: - reboot-cmd [ SPARC only ] - rtsig-max - rtsig-nr +- seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst - sem - sem_next_id [ sysv ipc ] - sg-big-buff [ generic SCSI device (sg) ] diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index f71eb5ef1f2df4154a0a31dd0307da00c5fde42f..099c412951d6b9dea515b08384e023e2c2d69025 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst @@ -87,11 +87,16 @@ Return values A seccomp filter may return any of the following values. If multiple filters exist, the return value for the evaluation of a given system call will always use the highest precedent value. (For example, -``SECCOMP_RET_KILL`` will always take precedence.) +``SECCOMP_RET_KILL_PROCESS`` will always take precedence.) In precedence order, they are: -``SECCOMP_RET_KILL``: +``SECCOMP_RET_KILL_PROCESS``: + Results in the entire process exiting immediately without executing + the system call. The exit status of the task (``status & 0x7f``) + will be ``SIGSYS``, not ``SIGKILL``. + +``SECCOMP_RET_KILL_THREAD``: Results in the task exiting immediately without executing the system call. The exit status of the task (``status & 0x7f``) will be ``SIGSYS``, not ``SIGKILL``. @@ -141,6 +146,15 @@ In precedence order, they are: allow use of ptrace, even of other sandboxed processes, without extreme care; ptracers can use this mechanism to escape.) +``SECCOMP_RET_LOG``: + Results in the system call being executed after it is logged. This + should be used by application developers to learn which syscalls their + application needs without having to iterate through multiple test and + development cycles to build the list. + + This action will only be logged if "log" is present in the + actions_logged sysctl string. + ``SECCOMP_RET_ALLOW``: Results in the system call being executed. @@ -169,7 +183,41 @@ The ``samples/seccomp/`` directory contains both an x86-specific example and a more generic example of a higher level macro interface for BPF program generation. +Sysctls +======= +Seccomp's sysctl files can be found in the ``/proc/sys/kernel/seccomp/`` +directory. Here's a description of each file in that directory: + +``actions_avail``: + A read-only ordered list of seccomp return values (refer to the + ``SECCOMP_RET_*`` macros above) in string form. The ordering, from + left-to-right, is the least permissive return value to the most + permissive return value. + + The list represents the set of seccomp return values supported + by the kernel. A userspace program may use this list to + determine if the actions found in the ``seccomp.h``, when the + program was built, differs from the set of actions actually + supported in the current running kernel. + +``actions_logged``: + A read-write ordered list of seccomp return values (refer to the + ``SECCOMP_RET_*`` macros above) that are allowed to be logged. Writes + to the file do not need to be in ordered form but reads from the file + will be ordered in the same way as the actions_avail sysctl. + + It is important to note that the value of ``actions_logged`` does not + prevent certain actions from being logged when the audit subsystem is + configured to audit a task. If the action is not found in + ``actions_logged`` list, the final decision on whether to audit the + action for that task is ultimately left up to the audit subsystem to + decide for all seccomp return values other than ``SECCOMP_RET_ALLOW``. + + The ``allow`` string is not accepted in the ``actions_logged`` sysctl + as it is not possible to log ``SECCOMP_RET_ALLOW`` actions. Attempting + to write ``allow`` to the sysctl will result in an EINVAL being + returned. Adding architecture support =========================== diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index b3526365ea8e340143edd1670bc9d89e3944152d..6f9d7b4189170f25143d852d9cf2699af211f997 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started ------------------------------------------------- iTCO_wdt: heartbeat: Watchdog heartbeat in seconds. - (5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30) + (2 +M: Len Brown +R: Andy Shevchenko +R: Mika Westerberg +L: linux-acpi@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-acpi/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm +B: https://bugzilla.kernel.org +S: Supported +F: drivers/acpi/pmic/ + ACPI THERMAL DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org @@ -644,6 +656,11 @@ ALPS PS/2 TOUCHPAD DRIVER R: Pali Rohár F: drivers/input/mouse/alps.* +ALTERA I2C CONTROLLER DRIVER +M: Thor Thayer +S: Maintained +F: drivers/i2c/busses/i2c-altera.c + ALTERA MAILBOX DRIVER M: Ley Foon Tan L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) @@ -2848,7 +2865,6 @@ S: Supported F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER -M: Yuval Mintz M: Ariel Elior M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org @@ -6638,8 +6654,8 @@ M: Alexander Aring M: Stefan Schmidt L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ @@ -7712,6 +7728,7 @@ M: John Crispin L: linux-mips@linux-mips.org S: Maintained F: arch/mips/lantiq +F: drivers/soc/lantiq LAPB module L: linux-x25@vger.kernel.org @@ -8580,6 +8597,12 @@ M: Sean Wang S: Maintained F: drivers/media/rc/mtk-cir.c +MEDIATEK PMIC LED DRIVER +M: Sean Wang +S: Maintained +F: drivers/leds/leds-mt6323.c +F: Documentation/devicetree/bindings/leds/leds-mt6323.txt + MEDIATEK ETHERNET DRIVER M: Felix Fietkau M: John Crispin @@ -8977,6 +9000,7 @@ M: Paul Burton L: linux-mips@linux-mips.org S: Supported F: arch/mips/generic/ +F: arch/mips/tools/generic-board-config.sh MIPS/LOONGSON1 ARCHITECTURE M: Keguang Zhang @@ -8987,6 +9011,13 @@ F: arch/mips/include/asm/mach-loongson32/ F: drivers/*/*loongson1* F: drivers/*/*/*loongson1* +MIPS RINT INSTRUCTION EMULATION +M: Aleksandar Markovic +L: linux-mips@linux-mips.org +S: Supported +F: arch/mips/math-emu/sp_rint.c +F: arch/mips/math-emu/dp_rint.c + MIROSOUND PCM20 FM RADIO RECEIVER DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org @@ -9864,6 +9895,12 @@ F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c F: include/linux/i2c-omap.h +ONION OMEGA2+ BOARD +M: Harvey Hunt +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/boot/dts/ralink/omega2p.dts + OMFS FILESYSTEM M: Bob Copeland L: linux-karma-devel@lists.sourceforge.net @@ -11027,7 +11064,6 @@ S: Supported F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER -M: Yuval Mintz M: Ariel Elior M: everest-linux-l2@cavium.com L: netdev@vger.kernel.org @@ -11415,6 +11451,8 @@ RENESAS ETHERNET DRIVERS R: Sergei Shtylyov L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org +F: Documentation/devicetree/bindings/net/renesas,*.txt +F: Documentation/devicetree/bindings/net/sh_eth.txt F: drivers/net/ethernet/renesas/ F: include/linux/sh_eth.h @@ -14385,6 +14423,12 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/vmxnet3/ +VOCORE VOCORE2 BOARD +M: Harvey Hunt +L: linux-mips@linux-mips.org +S: Maintained +F: arch/mips/boot/dts/ralink/vocore2.dts + VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown diff --git a/Makefile b/Makefile index ab067d51ddf15786df7daef7d388e4ce7c8ac3b3..cf007a31d575d6312f3f5a860e9591dc329a86a4 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 -PATCHLEVEL = 13 +PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc3 NAME = Fearless Coyote # *DOCUMENTATION* @@ -130,8 +130,8 @@ endif ifneq ($(KBUILD_OUTPUT),) # check that the output directory actually exists saved-output := $(KBUILD_OUTPUT) -KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \ - && /bin/pwd) +$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT)) +KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT)) $(if $(KBUILD_OUTPUT),, \ $(error failed to create output directory "$(saved-output)")) @@ -978,7 +978,7 @@ ifdef CONFIG_HEADERS_CHECK $(Q)$(MAKE) -f $(srctree)/Makefile headers_check endif ifdef CONFIG_GDB_SCRIPTS - $(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py + $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py) endif ifdef CONFIG_TRIM_UNUSED_KSYMS $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ @@ -1128,16 +1128,6 @@ headerdep: $(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \ $(srctree)/scripts/headerdep.pl -I$(srctree)/include -# --------------------------------------------------------------------------- -# Firmware install -INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware -export INSTALL_FW_PATH - -PHONY += firmware_install -firmware_install: - @mkdir -p $(objtree)/firmware - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install - # --------------------------------------------------------------------------- # Kernel headers @@ -1182,11 +1172,11 @@ headers_check: headers_install PHONY += kselftest kselftest: - $(Q)$(MAKE) -C tools/testing/selftests run_tests + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests PHONY += kselftest-clean kselftest-clean: - $(Q)$(MAKE) -C tools/testing/selftests clean + $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean PHONY += kselftest-merge kselftest-merge: @@ -1216,7 +1206,6 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order @$(kecho) ' Building modules, stage 2.'; $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) $(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin @@ -1238,7 +1227,7 @@ _modinst_: @rm -rf $(MODLIB)/kernel @rm -f $(MODLIB)/source @mkdir -p $(MODLIB)/kernel - @ln -s `cd $(srctree) && /bin/pwd` $(MODLIB)/source + @ln -s $(abspath $(srctree)) $(MODLIB)/source @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \ rm -f $(MODLIB)/build ; \ ln -s $(CURDIR) $(MODLIB)/build ; \ @@ -1252,7 +1241,6 @@ _modinst_: # boot script depmod is the master version. PHONY += _modinst_post _modinst_post: _modinst_ - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst $(call cmd,depmod) ifeq ($(CONFIG_MODULE_SIG), y) @@ -1375,8 +1363,6 @@ help: @echo '* vmlinux - Build the bare kernel' @echo '* modules - Build all modules' @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)' - @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH' - @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)' @echo ' dir/ - Build all files in dir and below' @echo ' dir/file.[ois] - Build specified target only' @echo ' dir/file.ll - Build the LLVM assembly file' @@ -1630,11 +1616,11 @@ image_name: # Clear a bunch of variables before executing the submake tools/: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/ tools/%: FORCE $(Q)mkdir -p $(objtree)/tools - $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $* + $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/ $* # Single targets # --------------------------------------------------------------------------- diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index d400a2161935de5442c0ae1ddc7159ae16c003a4..8ee41e9881690b648bf1d4e5c385ddeef0a860c2 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -78,9 +78,6 @@ struct task_struct; #endif -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index c4ffb441716c405d61cc16fd74e6fe071076940f..877cec8f5ea21256e233f4f353882ff02097537a 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -510,7 +510,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) goto done; } - str = (char *)__get_free_page(GFP_TEMPORARY); + str = (char *)__get_free_page(GFP_KERNEL); if (!str) goto done; diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 7e94476f39943026eda5522f281d50233b4d8d79..7d8c1d6c2f60f918e95f2cafc81c90c028684b9a 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -178,7 +178,7 @@ void show_regs(struct pt_regs *regs) struct callee_regs *cregs; char *buf; - buf = (char *)__get_free_page(GFP_TEMPORARY); + buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return; diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index cb47ae79a5f9e87ed1e9ce84b148b6829c9e2935..1b0bd72945f21337d1e89c3e1636bbfb5cabeecd 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -267,15 +267,19 @@ clock-frequency = <400000>; as3645a@30 { + #address-cells = <1>; + #size-cells = <0>; reg = <0x30>; compatible = "ams,as3645a"; - flash { + flash@0 { + reg = <0x0>; flash-timeout-us = <150000>; flash-max-microamp = <320000>; led-max-microamp = <60000>; - peak-current-limit = <1750000>; + ams,input-max-microamp = <1750000>; }; - indicator { + indicator@1 { + reg = <0x1>; led-max-microamp = <10000>; }; }; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1d468b527b7b6928d2bb21c2152a3a282a7a366d..776757d1604ab3901996bb24bb02748e54c2aee7 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -139,11 +139,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ -#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */ -#define TIF_SYSCALL_TRACE 5 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ -#define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 @@ -154,7 +153,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_UPROBE (1 << TIF_UPROBE) -#define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -168,9 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * Change these and you break ASM code in entry-common.S */ -#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_FSCHECK) +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 87936dd5d1510572993d4bcc9b4fa1222a4dd3b2..0bf2347495f13e4db7fdc1c560b0976ec1fbd619 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -70,8 +70,6 @@ static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); - /* On user-mode return, check fs is correct */ - set_thread_flag(TIF_FSCHECK); } #define segment_eq(a, b) ((a) == (b)) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index ca3614dc6938e1595f71a095f53456eb26701bec..99c908226065cf6331324309edeadfa618597874 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_AEABI #include #endif @@ -48,12 +49,14 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne fast_work_pending - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending + /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr @@ -76,16 +79,16 @@ ret_fast_syscall: UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK - bne fast_work_pending - tst r1, #_TIF_WORK_MASK + tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) /* Slower path - fall through to work_pending */ -fast_work_pending: #endif tst r1, #_TIF_SYSCALL_WORK @@ -111,6 +114,9 @@ ENTRY(ret_to_user) ret_slow_syscall: disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne slow_work_pending diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e2de50bf87425e4a55baa66ef68d1fe99e53e1f8..b67ae12503f30c3ba113f1d8c7a943d17e9db717 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -614,10 +614,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) * Update the trace code with the current status. */ trace_hardirqs_off(); - - /* Check valid user FS if needed */ - addr_limit_user_check(); - do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); @@ -678,3 +674,9 @@ struct page *get_signal_page(void) return page; } + +/* Defer to generic check */ +asmlinkage void addr_limit_check_failed(void) +{ + addr_limit_user_check(); +} diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 9b41f1e3b1a039cd45fe842e10abff0181186fdf..939b310913cf38cd7ca3136128fb5440340d7d12 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -50,17 +50,22 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) +KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) +KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB LD += -EB +LDFLAGS += -maarch64linuxb UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL LD += -EL +LDFLAGS += -maarch64linux UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 636c1bced7d4a9dd60d3a6489997ebd82b83d96e..1b266292f0bee00ce1952005479053e820b5ca0e 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -1,7 +1,7 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -#define __ALIGN .align 4 -#define __ALIGN_STR ".align 4" +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bc4e92337d1690045b7b5c97efd11ad4fb8bb453..b46e54c2399b58b6451ea9dacc5c033115b05c64 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) +#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3a68cf38a6b36712e1a5a6a8463a19d32bbdb4f5..f444f374bd7b3ccea1b097cc13d9229db5c8f2c3 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -321,6 +321,8 @@ void kernel_neon_end(void) } EXPORT_SYMBOL(kernel_neon_end); +#ifdef CONFIG_EFI + static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); @@ -370,6 +372,8 @@ void __efi_fpsimd_end(void) kernel_neon_end(); } +#endif /* CONFIG_EFI */ + #endif /* CONFIG_KERNEL_MODE_NEON */ #ifdef CONFIG_CPU_PM diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7434ec0c7a271632adce1a497bc097306199e580..0b243ecaf7ac87faa81fed0a09db996757d71f4e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -384,6 +384,7 @@ ENTRY(kimage_vaddr) * booted in EL1 or EL2 respectively. */ ENTRY(el2_setup) + msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index c45214f8fb5497fdaed8665b8b066bbfc0118ed2..0bdc96c61bc0f2b14f627142a2778409c9c6a7d0 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -751,10 +751,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, */ trace_hardirqs_off(); - /* Check valid user FS if needed */ - addr_limit_user_check(); - do { + /* Check valid user FS if needed */ + addr_limit_user_check(); + if (thread_flags & _TIF_NEED_RESCHED) { schedule(); } else { diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 3144584617e7a474cc9c15e829d294bb4315ebb0..76809ccd309ccaf330e45ac4b814922ae5949624 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -140,7 +140,8 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; @@ -150,15 +151,16 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) data.trace = trace; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.pc = thread_saved_pc(tsk); } else { - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; @@ -172,9 +174,15 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } + EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 89993c4be1befe3d7629bf45174f10081027d2a2..2069e9bc0fca674ba1d6fe769be05e6f731ef93d 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, - { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "unknown 8" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index 7c87b5be53b5b74c76fbe03829034b1e96ac3c7c..8f7cce829f8e2630cb88e81cdb024a8375a59e4f 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h @@ -92,9 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - /* * saved kernel SP and DP of a blocked thread. */ diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index e4d08d74ed9f8dc4f81140fcf60442378f02cf58..021cce78b4010d7b5c727d1e86812f85e81671bf 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -92,10 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) extern asmlinkage void save_user_regs(struct user_context *target); extern asmlinkage void *restore_user_regs(const struct user_context *target, ...); -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) -#define forget_segments() do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug index de9d507ba0fd466d8b466f54d1143a46d20caaa0..4763887ba3682112f5fe5020622391c81c830d73 100644 --- a/arch/ia64/Kconfig.debug +++ b/arch/ia64/Kconfig.debug @@ -56,9 +56,4 @@ config IA64_DEBUG_IRQ and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. -config SYSVIPC_COMPAT - bool - depends on COMPAT && SYSVIPC - default y - endmenu diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 657874eeeccc262c11268094e0f0ba530e05dd92..c70fa9ac71690d2529d97e1040a87ed01a8a35da 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h @@ -118,14 +118,6 @@ struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -extern void copy_segments(struct task_struct *p, struct mm_struct * mm); -extern void release_segments(struct mm_struct * mm); - -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.lr) #define KSTK_ESP(tsk) ((tsk)->thread.sp) diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index ec6a49076980fff90dc04ece4b389ea1782449b9..8ae92d6abfd2e24d9e6d767fab2fc83db8720fcf 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -131,9 +131,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -#define copy_segments(tsk, mm) do { } while (0) -#define release_segments(mm) do { } while (0) - /* * Return saved PC of a blocked thread. */ diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 9d26abdf0dc1c2349e5822b01416f0f0cd154b58..4f798aa671ddd2f481c35689f03be1ea73318ae8 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -39,7 +39,7 @@ config MICROBLAZE # Endianness selection choice prompt "Endianness selection" - default CPU_BIG_ENDIAN + default CPU_LITTLE_ENDIAN help microblaze architectures can be configured for either little or big endian formats. Be sure to select the appropriate mode. diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index e77a596f3f1efcfe94c4bc89b9b3cc860d589210..06609ca361150ab77529bf0999d8e258ad25d62c 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += fcntl.h generic-y += ioctl.h generic-y += ioctls.h generic-y += ipcbuf.h +generic-y += kvm_para.h generic-y += mman.h generic-y += msgbuf.h generic-y += param.h diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index e45ada8fb00669a8889284db230908ed64f2dda1..94700c5270a91e69cb0acb7b32b20f272899a1a6 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, unsigned long attrs) { #ifdef CONFIG_MMU - unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; unsigned long pfn; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 48d91d5be4e9b6cf56c1cd0849830e2708967aa2..cb7fcc4216fdfa5d25e6a76265efc0b66b81e0ad 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1627,14 +1627,6 @@ config CPU_R5500 NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV instruction set. -config CPU_R6000 - bool "R6000" - depends on SYS_HAS_CPU_R6000 - select CPU_SUPPORTS_32BIT_KERNEL - help - MIPS Technologies R6000 and R6000A series processors. Note these - processors are extremely rare and the support for them is incomplete. - config CPU_NEVADA bool "RM52xx" depends on SYS_HAS_CPU_NEVADA @@ -1950,9 +1942,6 @@ config SYS_HAS_CPU_R5432 config SYS_HAS_CPU_R5500 bool -config SYS_HAS_CPU_R6000 - bool - config SYS_HAS_CPU_NEVADA bool @@ -2180,7 +2169,7 @@ config PAGE_SIZE_32KB config PAGE_SIZE_64KB bool "64kB" - depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000 + depends on !CPU_R3000 && !CPU_TX39XX help Using 64kB page size will result in higher performance kernel at the price of higher memory consumption. This option is available on @@ -2248,11 +2237,11 @@ config CPU_HAS_PREFETCH config CPU_GENERIC_DUMP_TLB bool - default y if !(CPU_R3000 || CPU_R6000 || CPU_R8000 || CPU_TX39XX) + default y if !(CPU_R3000 || CPU_R8000 || CPU_TX39XX) config CPU_R4K_FPU bool - default y if !(CPU_R3000 || CPU_R6000 || CPU_TX39XX || CPU_CAVIUM_OCTEON) + default y if !(CPU_R3000 || CPU_TX39XX) config CPU_R4K_CACHE_TLB bool @@ -2260,6 +2249,7 @@ config CPU_R4K_CACHE_TLB config MIPS_MT_SMP bool "MIPS MT SMP support (1 TC on each available VPE)" + default y depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI @@ -2376,7 +2366,6 @@ config MIPS_CPS bool "MIPS Coherent Processing System support" depends on SYS_SUPPORTS_MIPS_CPS select MIPS_CM - select MIPS_CPC select MIPS_CPS_PM if HOTPLUG_CPU select SMP select SYNC_R4K if (CEVT_R4K || CSRC_R4K) @@ -2393,11 +2382,11 @@ config MIPS_CPS config MIPS_CPS_PM depends on MIPS_CPS - select MIPS_CPC bool config MIPS_CM bool + select MIPS_CPC config MIPS_CPC bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index bc2708c9ada40cf4206cd228d84a6ebf634161b3..a96d97a806c9906e826ae8a1086be7eb7a8ff65f 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -151,7 +151,6 @@ cflags-y += -fno-stack-check # cflags-$(CONFIG_CPU_R3000) += -march=r3000 cflags-$(CONFIG_CPU_TX39XX) += -march=r3900 -cflags-$(CONFIG_CPU_R6000) += -march=r6000 -Wa,--trap cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap @@ -291,7 +290,8 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \ - PLATFORM="$(platform-y)" + PLATFORM="$(platform-y)" \ + ITS_INPUTS="$(its-y)" ifdef CONFIG_32BIT bootvars-y += ADDR_BITS=32 endif @@ -299,6 +299,10 @@ ifdef CONFIG_64BIT bootvars-y += ADDR_BITS=64 endif +# This is required to get dwarf unwinding tables into .debug_frame +# instead of .eh_frame so we don't discard them. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS @@ -500,8 +504,14 @@ $(eval $(call gen_generic_defconfigs,micro32,r2,eb el)) .PHONY: $(generic_defconfigs) $(generic_defconfigs): $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \ - -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \ - $(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config) + -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ | \ + grep -Ev '^#' + $(Q)cp $(KCONFIG_CONFIG) $(objtree)/.config.$@ + $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig \ + KCONFIG_CONFIG=$(objtree)/.config.$@ >/dev/null + $(Q)$(CONFIG_SHELL) $(srctree)/arch/$(ARCH)/tools/generic-board-config.sh \ + $(srctree) $(objtree) $(objtree)/.config.$@ $(KCONFIG_CONFIG) \ + "$(origin BOARDS)" $(BOARDS) $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig # @@ -509,6 +519,19 @@ $(generic_defconfigs): # $(generic_config_dir)/%.config: ; +# +# Prevent direct use of generic_defconfig, which is intended to be used as the +# basis of the various ISA-specific targets generated above. +# +.PHONY: generic_defconfig +generic_defconfig: + $(Q)echo "generic_defconfig is not intended for direct use, but should instead be" + $(Q)echo "used via an ISA-specific target from the following list:" + $(Q)echo + $(Q)for cfg in $(generic_defconfigs); do echo " $${cfg}"; done + $(Q)echo + $(Q)false + # # Legacy defconfig compatibility - these targets used to be real defconfigs but # now that the boards have been converted to use the generic kernel they are diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 83831002c832b8be8d03d1ea8bd259324b0fac65..da76637704253e3610f4f15b69a42e75af99d516 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -344,28 +344,32 @@ static struct platform_device db1200_ide_dev = { /* SD carddetects: they're supposed to be edge-triggered, but ack * doesn't seem to work (CPLD Rev 2). Instead, the screaming one - * is disabled and its counterpart enabled. The 500ms timeout is - * because the carddetect isn't debounced in hardware. + * is disabled and its counterpart enabled. The 200ms timeout is + * because the carddetect usually triggers twice, after debounce. */ static irqreturn_t db1200_mmc_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - if (irq == DB1200_SD0_INSERT_INT) { - disable_irq_nosync(DB1200_SD0_INSERT_INT); - enable_irq(DB1200_SD0_EJECT_INT); - } else { - disable_irq_nosync(DB1200_SD0_EJECT_INT); - enable_irq(DB1200_SD0_INSERT_INT); - } +static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); } + msleep(100); /* debounce */ + if (irq == DB1200_SD0_INSERT_INT) + enable_irq(DB1200_SD0_EJECT_INT); + else + enable_irq(DB1200_SD0_INSERT_INT); + return IRQ_HANDLED; } @@ -374,13 +378,13 @@ static int db1200_mmc_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd, - 0, "sd_insert", mmc_host); + ret = request_threaded_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd, + db1200_mmc_cdfn, 0, "sd_insert", mmc_host); if (ret) goto out; - ret = request_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd, - 0, "sd_eject", mmc_host); + ret = request_threaded_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd, + db1200_mmc_cdfn, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1200_SD0_INSERT_INT, mmc_host); goto out; @@ -436,23 +440,27 @@ static struct led_classdev db1200_mmc_led = { static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - if (irq == PB1200_SD1_INSERT_INT) { - disable_irq_nosync(PB1200_SD1_INSERT_INT); - enable_irq(PB1200_SD1_EJECT_INT); - } else { - disable_irq_nosync(PB1200_SD1_EJECT_INT); - enable_irq(PB1200_SD1_INSERT_INT); - } +static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m */ mmc_cd = symbol_get(mmc_detect_change); if (mmc_cd) { - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); } + msleep(100); /* debounce */ + if (irq == PB1200_SD1_INSERT_INT) + enable_irq(PB1200_SD1_EJECT_INT); + else + enable_irq(PB1200_SD1_INSERT_INT); + return IRQ_HANDLED; } @@ -461,13 +469,13 @@ static int pb1200_mmc1_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd, 0, - "sd1_insert", mmc_host); + ret = request_threaded_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd, + pb1200_mmc1_cdfn, 0, "sd1_insert", mmc_host); if (ret) goto out; - ret = request_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd, 0, - "sd1_eject", mmc_host); + ret = request_threaded_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd, + pb1200_mmc1_cdfn, 0, "sd1_eject", mmc_host); if (ret) { free_irq(PB1200_SD1_INSERT_INT, mmc_host); goto out; diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index 3e7fbdbdb3c450c75ce425bb10a57f71bbf21d78..cd1ae29f95a378e76fea3d8f189ee904197bde99 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -450,24 +450,27 @@ static struct platform_device db1300_ide_dev = { static irqreturn_t db1300_mmc_cd(int irq, void *ptr) { - void(*mmc_cd)(struct mmc_host *, unsigned long); + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} - /* disable the one currently screaming. No other way to shut it up */ - if (irq == DB1300_SD1_INSERT_INT) { - disable_irq_nosync(DB1300_SD1_INSERT_INT); - enable_irq(DB1300_SD1_EJECT_INT); - } else { - disable_irq_nosync(DB1300_SD1_EJECT_INT); - enable_irq(DB1300_SD1_INSERT_INT); - } +static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr) +{ + void (*mmc_cd)(struct mmc_host *, unsigned long); /* link against CONFIG_MMC=m. We can only be called once MMC core has * initialized the controller, so symbol_get() should always succeed. */ mmc_cd = symbol_get(mmc_detect_change); - mmc_cd(ptr, msecs_to_jiffies(500)); + mmc_cd(ptr, msecs_to_jiffies(200)); symbol_put(mmc_detect_change); + msleep(100); /* debounce */ + if (irq == DB1300_SD1_INSERT_INT) + enable_irq(DB1300_SD1_EJECT_INT); + else + enable_irq(DB1300_SD1_INSERT_INT); + return IRQ_HANDLED; } @@ -487,13 +490,13 @@ static int db1300_mmc_cd_setup(void *mmc_host, int en) int ret; if (en) { - ret = request_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, 0, - "sd_insert", mmc_host); + ret = request_threaded_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, + db1300_mmc_cdfn, 0, "sd_insert", mmc_host); if (ret) goto out; - ret = request_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, 0, - "sd_eject", mmc_host); + ret = request_threaded_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, + db1300_mmc_cdfn, 0, "sd_eject", mmc_host); if (ret) { free_irq(DB1300_SD1_INSERT_INT, mmc_host); goto out; diff --git a/arch/mips/alchemy/devboards/db1xxx.c b/arch/mips/alchemy/devboards/db1xxx.c index 2d47f951121a710fb5960fdae14d28a777e07997..c9ad28995cd2d637ca4bc37ffeec5e7f853d26bc 100644 --- a/arch/mips/alchemy/devboards/db1xxx.c +++ b/arch/mips/alchemy/devboards/db1xxx.c @@ -2,6 +2,7 @@ * Alchemy DB/PB1xxx board support. */ +#include #include #include @@ -97,6 +98,7 @@ arch_initcall(db1xxx_arch_init); static int __init db1xxx_dev_init(void) { + mips_set_machine_name(board_type_str()); switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { case BCSR_WHOAMI_DB1000: case BCSR_WHOAMI_DB1500: diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c index dda422a0f36cdc17f9c897bfbdb5455b72e7e072..0137656107a9c5b58c2dcdcaa6bac2132fb1957d 100644 --- a/arch/mips/ar7/clock.c +++ b/arch/mips/ar7/clock.c @@ -430,6 +430,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } EXPORT_SYMBOL(clk_get_rate); diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index fa845953f736f87af7f62100a0b4ac149b3f331b..6b1000b6a6a6717a2e18032a644feef99666a9b4 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -487,17 +487,16 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) { struct clk *ref_clk; void __iomem *pll_base; - const char *dnfn = of_node_full_name(np); ref_clk = of_clk_get(np, 0); if (IS_ERR(ref_clk)) { - pr_err("%s: of_clk_get failed\n", dnfn); + pr_err("%pOF: of_clk_get failed\n", np); goto err; } pll_base = of_iomap(np, 0); if (!pll_base) { - pr_err("%s: can't map pll registers\n", dnfn); + pr_err("%pOF: can't map pll registers\n", np); goto err_clk; } @@ -506,12 +505,12 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) else if (of_device_is_compatible(np, "qca,ar9330-pll")) ar9330_clk_init(ref_clk, pll_base); else { - pr_err("%s: could not find any appropriate clk_init()\n", dnfn); + pr_err("%pOF: could not find any appropriate clk_init()\n", np); goto err_iounmap; } if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) { - pr_err("%s: could not register clk provider\n", dnfn); + pr_err("%pOF: could not register clk provider\n", np); goto err_iounmap; } diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c index 730c0b03060d0c941eaf29ee394f3c55be121954..b816cb4a25ff95eb7ca737f3296a71c5aea54830 100644 --- a/arch/mips/ath79/pci.c +++ b/arch/mips/ath79/pci.c @@ -22,10 +22,10 @@ #include "pci.h" static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); -static const struct ath79_pci_irq *ath79_pci_irq_map __initdata; -static unsigned ath79_pci_nr_irqs __initdata; +static const struct ath79_pci_irq *ath79_pci_irq_map; +static unsigned ath79_pci_nr_irqs; -static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq ar71xx_pci_irq_map[] = { { .slot = 17, .pin = 1, @@ -41,7 +41,7 @@ static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { } }; -static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq ar724x_pci_irq_map[] = { { .slot = 0, .pin = 1, @@ -49,7 +49,7 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { } }; -static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { +static const struct ath79_pci_irq qca955x_pci_irq_map[] = { { .bus = 0, .slot = 0, @@ -64,7 +64,7 @@ static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { }, }; -int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) +int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) { int irq = -1; int i; diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 73626040e4d6a1150f8ac02719eef9b6f7a31d4d..19577f771c1f0ce4ecc5376c40c14f4d138520e7 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -339,6 +339,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 145b5ce8eb7e660dda67f6a53aa472f74a413af0..1bd5c4f00d19bd7ff4aee34789e0fbb371240a4d 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -118,6 +118,12 @@ ifeq ($(ADDR_BITS),64) itb_addr_cells = 2 endif +quiet_cmd_its_cat = CAT $@ + cmd_its_cat = cat $^ >$@ + +$(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) + $(call if_changed,its_cat) + quiet_cmd_cpp_its_S = ITS $@ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ @@ -128,19 +134,19 @@ quiet_cmd_cpp_its_S = ITS $@ -DADDR_BITS=$(ADDR_BITS) \ -DADDR_CELLS=$(itb_addr_cells) -$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) -$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.gz.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) -$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.bz2.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) -$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.lzma.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) -$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE +$(obj)/vmlinux.lzo.its: $(obj)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) quiet_cmd_itb-image = ITB $@ diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile index b9db49203e0c08a1d427fee24864074891c146b2..cbac26ce063ef2954472d9ec0cae42855c7b35d7 100644 --- a/arch/mips/boot/dts/Makefile +++ b/arch/mips/boot/dts/Makefile @@ -5,6 +5,7 @@ dts-dirs += ingenic dts-dirs += lantiq dts-dirs += mti dts-dirs += netlogic +dts-dirs += ni dts-dirs += pic32 dts-dirs += qca dts-dirs += ralink diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index fd138d9978c11fb68812f7a2a97b9ec464a8a9ab..6c381844929cf41c5fedba979c1eff2b2b0450b4 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -1,6 +1,7 @@ /dts-v1/; #include "jz4780.dtsi" +#include / { compatible = "img,ci20", "ingenic,jz4780"; @@ -21,6 +22,13 @@ reg = <0x0 0x10000000 0x30000000 0x30000000>; }; + + eth0_power: fixedregulator@0 { + compatible = "regulator-fixed"; + regulator-name = "eth0_power"; + gpio = <&gpb 25 GPIO_ACTIVE_LOW>; + enable-active-high; + }; }; &ext { @@ -123,6 +131,29 @@ }; }; }; + + dm9000@6 { + compatible = "davicom,dm9000"; + davicom,no-eeprom; + + pinctrl-names = "default"; + pinctrl-0 = <&pins_nemc_cs6>; + + reg = <6 0 1 /* addr */ + 6 2 1>; /* data */ + + ingenic,nemc-tAS = <15>; + ingenic,nemc-tAH = <10>; + ingenic,nemc-tBP = <20>; + ingenic,nemc-tAW = <50>; + ingenic,nemc-tSTRV = <100>; + + reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>; + vcc-supply = <ð0_power>; + + interrupt-parent = <&gpe>; + interrupts = <19 4>; + }; }; &bch { @@ -165,4 +196,10 @@ groups = "nemc-cs1"; bias-disable; }; + + pins_nemc_cs6: nemc-cs6 { + function = "nemc-cs6"; + groups = "nemc-cs6"; + bias-disable; + }; }; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index 4853ef67b3ab3c556abe4b0fa7876689cab39ebf..e906134ecaef3a194296645aac76a7dcab599d9c 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -44,6 +44,17 @@ #clock-cells = <1>; }; + rtc_dev: rtc@10003000 { + compatible = "ingenic,jz4780-rtc"; + reg = <0x10003000 0x4c>; + + interrupt-parent = <&intc>; + interrupts = <32>; + + clocks = <&cgu JZ4780_CLK_RTCLK>; + clock-names = "rtc"; + }; + pinctrl: pin-controller@10010000 { compatible = "ingenic,jz4780-pinctrl"; reg = <0x10010000 0x600>; diff --git a/arch/mips/boot/dts/ni/169445.dts b/arch/mips/boot/dts/ni/169445.dts new file mode 100644 index 0000000000000000000000000000000000000000..5389ef46c48092471928902dfadbb38dfb8edcfe --- /dev/null +++ b/arch/mips/boot/dts/ni/169445.dts @@ -0,0 +1,100 @@ +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ni,169445"; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + device_type = "cpu"; + compatible = "mti,mips14KEc"; + clocks = <&baseclk>; + reg = <0>; + }; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x10000000>; + }; + + baseclk: baseclock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <50000000>; + }; + + cpu_intc: interrupt-controller { + #address-cells = <0>; + compatible = "mti,cpu-interrupt-controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + ahb@1f300000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x1f300000 0x80FFF>; + + gpio1: gpio@10 { + compatible = "ni,169445-nand-gpio"; + reg = <0x10 0x4>; + reg-names = "dat"; + gpio-controller; + #gpio-cells = <2>; + }; + + gpio2: gpio@14 { + compatible = "ni,169445-nand-gpio"; + reg = <0x14 0x4>; + reg-names = "dat"; + gpio-controller; + #gpio-cells = <2>; + no-output; + }; + + nand@0 { + compatible = "gpio-control-nand"; + nand-on-flash-bbt; + nand-ecc-mode = "soft_bch"; + nand-ecc-step-size = <512>; + nand-ecc-strength = <4>; + reg = <0x0 4>; + gpios = <&gpio2 0 0>, /* rdy */ + <&gpio1 1 0>, /* nce */ + <&gpio1 2 0>, /* ale */ + <&gpio1 3 0>, /* cle */ + <&gpio1 4 0>; /* nwp */ + }; + + serial@80000 { + compatible = "ns16550a"; + reg = <0x80000 0x1000>; + interrupt-parent = <&cpu_intc>; + interrupts = <6>; + clocks = <&baseclk>; + reg-shift = <0>; + }; + + ethernet@40000 { + compatible = "snps,dwmac-4.10a"; + interrupt-parent = <&cpu_intc>; + interrupts = <5>; + interrupt-names = "macirq"; + reg = <0x40000 0x2000>; + clock-names = "stmmaceth", "pclk"; + clocks = <&baseclk>, <&baseclk>; + + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; +}; diff --git a/arch/mips/boot/dts/ni/Makefile b/arch/mips/boot/dts/ni/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..66cfdffc51c2d193a62b41dc1b3b8a2d64bcc018 --- /dev/null +++ b/arch/mips/boot/dts/ni/Makefile @@ -0,0 +1,7 @@ +dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb + +# Force kbuild to make empty built-in.o if necessary +obj- += dummy.o + +always := $(dtb-y) +clean-files := *.dtb *.dtb.S diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile index 2a7225954bf6be48c9a7c024f9177de33787c3be..55e2937b61f3810c5654713dc55ef5b01694421e 100644 --- a/arch/mips/boot/dts/ralink/Makefile +++ b/arch/mips/boot/dts/ralink/Makefile @@ -2,6 +2,8 @@ dtb-$(CONFIG_DTB_RT2880_EVAL) += rt2880_eval.dtb dtb-$(CONFIG_DTB_RT305X_EVAL) += rt3052_eval.dtb dtb-$(CONFIG_DTB_RT3883_EVAL) += rt3883_eval.dtb dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb +dtb-$(CONFIG_DTB_OMEGA2P) += omega2p.dtb +dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) diff --git a/arch/mips/boot/dts/ralink/mt7628a.dtsi b/arch/mips/boot/dts/ralink/mt7628a.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..9ff7e8faaeccafa948c81df8cd98fed60c339426 --- /dev/null +++ b/arch/mips/boot/dts/ralink/mt7628a.dtsi @@ -0,0 +1,126 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,mt7628a-soc"; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + compatible = "mti,mips24KEc"; + device_type = "cpu"; + reg = <0>; + }; + }; + + resetc: reset-controller { + compatible = "ralink,rt2880-reset"; + #reset-cells = <1>; + }; + + cpuintc: interrupt-controller { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc: system-controller@0 { + compatible = "ralink,mt7620a-sysc", "syscon"; + reg = <0x0 0x100>; + }; + + intc: interrupt-controller@200 { + compatible = "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + resets = <&resetc 9>; + reset-names = "intc"; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + + ralink,intc-registers = <0x9c 0xa0 + 0x6c 0xa4 + 0x80 0x78>; + }; + + memory-controller@300 { + compatible = "ralink,mt7620a-memc"; + reg = <0x300 0x100>; + }; + + uart0: uartlite@c00 { + compatible = "ns16550a"; + reg = <0xc00 0x100>; + + resets = <&resetc 12>; + reset-names = "uart0"; + + interrupt-parent = <&intc>; + interrupts = <20>; + + reg-shift = <2>; + }; + + uart1: uart1@d00 { + compatible = "ns16550a"; + reg = <0xd00 0x100>; + + resets = <&resetc 19>; + reset-names = "uart1"; + + interrupt-parent = <&intc>; + interrupts = <21>; + + reg-shift = <2>; + }; + + uart2: uart2@e00 { + compatible = "ns16550a"; + reg = <0xe00 0x100>; + + resets = <&resetc 20>; + reset-names = "uart2"; + + interrupt-parent = <&intc>; + interrupts = <22>; + + reg-shift = <2>; + }; + }; + + usb_phy: usb-phy@10120000 { + compatible = "mediatek,mt7628-usbphy"; + reg = <0x10120000 0x1000>; + + #phy-cells = <0>; + + ralink,sysctl = <&sysc>; + resets = <&resetc 22 &resetc 25>; + reset-names = "host", "device"; + }; + + ehci@101c0000 { + compatible = "generic-ehci"; + reg = <0x101c0000 0x1000>; + + phys = <&usb_phy>; + phy-names = "usb"; + + interrupt-parent = <&intc>; + interrupts = <18>; + }; +}; diff --git a/arch/mips/boot/dts/ralink/omega2p.dts b/arch/mips/boot/dts/ralink/omega2p.dts new file mode 100644 index 0000000000000000000000000000000000000000..5884fd48f59a62988f5b2246cc609745812ecbea --- /dev/null +++ b/arch/mips/boot/dts/ralink/omega2p.dts @@ -0,0 +1,18 @@ +/dts-v1/; + +/include/ "mt7628a.dtsi" + +/ { + compatible = "onion,omega2+", "ralink,mt7688a-soc", "ralink,mt7628a-soc"; + model = "Onion Omega2+"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200"; + stdout-path = &uart0; + }; +}; diff --git a/arch/mips/boot/dts/ralink/vocore2.dts b/arch/mips/boot/dts/ralink/vocore2.dts new file mode 100644 index 0000000000000000000000000000000000000000..fa8a5f8f236a87cb3dde5c8482cd4eecd6ac6cb0 --- /dev/null +++ b/arch/mips/boot/dts/ralink/vocore2.dts @@ -0,0 +1,18 @@ +/dts-v1/; + +#include "mt7628a.dtsi" + +/ { + compatible = "vocore,vocore2", "ralink,mt7628a-soc"; + model = "VoCore2"; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS2,115200"; + stdout-path = &uart2; + }; +}; diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index b6d6e841a984ea61270581b4fed097dcd68897bf..50b427879465ef85a48aceb9b7efd65842473a0f 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile @@ -16,4 +16,4 @@ obj-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \ cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \ cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o -obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o +obj-y += cvmx-helper-errata.o cvmx-helper-jtag.o cvmx-boot-vector.o diff --git a/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c new file mode 100644 index 0000000000000000000000000000000000000000..b7019d21808e04719e9913270dbab0c90438eadb --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-boot-vector.c @@ -0,0 +1,167 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2017 Cavium, Inc. + */ + + +/* + We install this program at the bootvector: +------------------------------------ + .set noreorder + .set nomacro + .set noat +reset_vector: + dmtc0 $k0, $31, 0 # Save $k0 to DESAVE + dmtc0 $k1, $31, 3 # Save $k1 to KScratch2 + + mfc0 $k0, $12, 0 # Status + mfc0 $k1, $15, 1 # Ebase + + ori $k0, 0x84 # Enable 64-bit addressing, set + # ERL (should already be set) + andi $k1, 0x3ff # mask out core ID + + mtc0 $k0, $12, 0 # Status + sll $k1, 5 + + lui $k0, 0xbfc0 + cache 17, 0($0) # Core-14345, clear L1 Dcache virtual + # tags if the core hit an NMI + + ld $k0, 0x78($k0) # k0 <- (bfc00078) pointer to the reset vector + synci 0($0) # Invalidate ICache to get coherent + # view of target code. + + daddu $k0, $k0, $k1 + nop + + ld $k0, 0($k0) # k0 <- core specific target address + dmfc0 $k1, $31, 3 # Restore $k1 from KScratch2 + + beqz $k0, wait_loop # Spin in wait loop + nop + + jr $k0 + nop + + nop # NOPs needed here to fill delay slots + nop # on endian reversal of previous instructions + +wait_loop: + wait + nop + + b wait_loop + nop + + nop + nop +------------------------------------ + +0000000000000000 : + 0: 40baf800 dmtc0 k0,c0_desave + 4: 40bbf803 dmtc0 k1,c0_kscratch2 + + 8: 401a6000 mfc0 k0,c0_status + c: 401b7801 mfc0 k1,c0_ebase + + 10: 375a0084 ori k0,k0,0x84 + 14: 337b03ff andi k1,k1,0x3ff + + 18: 409a6000 mtc0 k0,c0_status + 1c: 001bd940 sll k1,k1,0x5 + + 20: 3c1abfc0 lui k0,0xbfc0 + 24: bc110000 cache 0x11,0(zero) + + 28: df5a0078 ld k0,120(k0) + 2c: 041f0000 synci 0(zero) + + 30: 035bd02d daddu k0,k0,k1 + 34: 00000000 nop + + 38: df5a0000 ld k0,0(k0) + 3c: 403bf803 dmfc0 k1,c0_kscratch2 + + 40: 13400005 beqz k0,58 + 44: 00000000 nop + + 48: 03400008 jr k0 + 4c: 00000000 nop + + 50: 00000000 nop + 54: 00000000 nop + +0000000000000058 : + 58: 42000020 wait + 5c: 00000000 nop + + 60: 1000fffd b 58 + 64: 00000000 nop + + 68: 00000000 nop + 6c: 00000000 nop + + */ + +#include + +static unsigned long long _cvmx_bootvector_data[16] = { + 0x40baf80040bbf803ull, /* patch low order 8-bits if no KScratch*/ + 0x401a6000401b7801ull, + 0x375a0084337b03ffull, + 0x409a6000001bd940ull, + 0x3c1abfc0bc110000ull, + 0xdf5a0078041f0000ull, + 0x035bd02d00000000ull, + 0xdf5a0000403bf803ull, /* patch low order 8-bits if no KScratch*/ + 0x1340000500000000ull, + 0x0340000800000000ull, + 0x0000000000000000ull, + 0x4200002000000000ull, + 0x1000fffd00000000ull, + 0x0000000000000000ull, + OCTEON_BOOT_MOVEABLE_MAGIC1, + 0 /* To be filled in with address of vector block*/ +}; + +/* 2^10 CPUs */ +#define VECTOR_TABLE_SIZE (1024 * sizeof(struct cvmx_boot_vector_element)) + +static void cvmx_boot_vector_init(void *mem) +{ + uint64_t kseg0_mem; + int i; + + memset(mem, 0, VECTOR_TABLE_SIZE); + kseg0_mem = cvmx_ptr_to_phys(mem) | 0x8000000000000000ull; + + for (i = 0; i < 15; i++) { + uint64_t v = _cvmx_bootvector_data[i]; + + if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7)) + v &= 0xffffffff00000000ull; /* KScratch not availble. */ + cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v); + } + cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, 15 * 8); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, kseg0_mem); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000); +} + +/** + * Get a pointer to the per-core table of reset vector pointers + * + */ +struct cvmx_boot_vector_element *cvmx_boot_vector_get(void) +{ + struct cvmx_boot_vector_element *ret; + + ret = cvmx_bootmem_alloc_named_range_once(VECTOR_TABLE_SIZE, 0, + (1ull << 32) - 1, 8, "__boot_vector1__", cvmx_boot_vector_init); + return ret; +} +EXPORT_SYMBOL(cvmx_boot_vector_get); diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 8d54d774933c560526831f5a3d76402cc2484820..94d97ebfa03604d28fe30c5c71ebde2c3a93cd9d 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -44,6 +44,55 @@ static struct cvmx_bootmem_desc *cvmx_bootmem_desc; /* See header file for descriptions of functions */ +/** + * This macro returns the size of a member of a structure. + * Logically it is the same as "sizeof(s::field)" in C++, but + * C lacks the "::" operator. + */ +#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field) + +/** + * This macro returns a member of the + * cvmx_bootmem_named_block_desc_t structure. These members can't + * be directly addressed as they might be in memory not directly + * reachable. In the case where bootmem is compiled with + * LINUX_HOST, the structure itself might be located on a remote + * Octeon. The argument "field" is the member name of the + * cvmx_bootmem_named_block_desc_t to read. Regardless of the type + * of the field, the return type is always a uint64_t. The "addr" + * parameter is the physical address of the structure. + */ +#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \ + __cvmx_bootmem_desc_get(addr, \ + offsetof(struct cvmx_bootmem_named_block_desc, field), \ + SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field)) + +/** + * This function is the implementation of the get macros defined + * for individual structure members. The argument are generated + * by the macros inorder to read only the needed memory. + * + * @param base 64bit physical address of the complete structure + * @param offset Offset from the beginning of the structure to the member being + * accessed. + * @param size Size of the structure member. + * + * @return Value of the structure member promoted into a uint64_t. + */ +static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset, + int size) +{ + base = (1ull << 63) | (base + offset); + switch (size) { + case 4: + return cvmx_read64_uint32(base); + case 8: + return cvmx_read64_uint64(base); + default: + return 0; + } +} + /* * Wrapper functions are provided for reading/writing the size and * next block values as these may not be directly addressible (in 32 @@ -98,6 +147,42 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name, + void (*init) (void *)) +{ + int64_t addr; + void *ptr; + uint64_t named_block_desc_addr; + + named_block_desc_addr = (uint64_t) + cvmx_bootmem_phy_named_block_find(name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (named_block_desc_addr) { + addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, + base_addr); + return cvmx_phys_to_ptr(addr); + } + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, + (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING); + + if (addr < 0) + return NULL; + ptr = cvmx_phys_to_ptr(addr); + + if (init) + init(ptr); + else + memset(ptr, 0, size); + + return ptr; +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once); + void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name) diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index c1eb1ff7c80017f53e545229fdd6acbb1c91969d..5b3a3f6a9ad31fd845bd18cdd3197d9c75833c84 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2963,3 +2963,12 @@ void octeon_fixup_irqs(void) } #endif /* CONFIG_HOTPLUG_CPU */ + +struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block) +{ + struct octeon_ciu3_info *ciu3_info; + + ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK]; + return ciu3_info->domain[block]; +} +EXPORT_SYMBOL(octeon_irq_get_block_domain); diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 3de786545ded10ac64f5fa77d2025fa3a9955fd9..75e7c862565983b34f6cc127332f6e8b0f5832c7 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -205,7 +205,7 @@ int plat_post_relocation(long offset) * Firmware CPU startup hook * */ -static void octeon_boot_secondary(int cpu, struct task_struct *idle) +static int octeon_boot_secondary(int cpu, struct task_struct *idle) { int count; @@ -223,8 +223,12 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle) udelay(1); count--; } - if (count == 0) + if (count == 0) { pr_err("Secondary boot timeout\n"); + return -ETIMEDOUT; + } + + return 0; } /** @@ -408,7 +412,7 @@ late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops octeon_smp_ops = { +const struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, @@ -485,7 +489,7 @@ static void octeon_78xx_send_ipi_mask(const struct cpumask *mask, octeon_78xx_send_ipi_single(cpu, action); } -static struct plat_smp_ops octeon_78xx_smp_ops = { +static const struct plat_smp_ops octeon_78xx_smp_ops = { .send_ipi_single = octeon_78xx_send_ipi_single, .send_ipi_mask = octeon_78xx_send_ipi_mask, .init_secondary = octeon_init_secondary, @@ -501,7 +505,7 @@ static struct plat_smp_ops octeon_78xx_smp_ops = { void __init octeon_setup_smp(void) { - struct plat_smp_ops *ops; + const struct plat_smp_ops *ops; if (octeon_has_feature(OCTEON_FEATURE_CIU3)) ops = &octeon_78xx_smp_ops; diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index e5b18f1a31a078b5a26e5caeae6059f7b2efec94..490b12af103c1285043ecfec912b1839c5586f06 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -60,11 +60,8 @@ CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_OCTEON=y CONFIG_PATA_OCTEON_CF=y -CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y -CONFIG_MII=y # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_ALTEON is not set @@ -121,22 +118,30 @@ CONFIG_SPI=y CONFIG_SPI_OCTEON=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y -CONFIG_USB=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_HCD_PLATFORM=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB=y +# CONFIG_USB_PCI is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set -# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_CAVIUM_OCTEON=y +CONFIG_EDAC=y +CONFIG_EDAC_OCTEON_PC=y +CONFIG_EDAC_OCTEON_L2C=y +CONFIG_EDAC_OCTEON_LMC=y +CONFIG_EDAC_OCTEON_PCI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y CONFIG_STAGING=y CONFIG_OCTEON_ETHERNET=y -CONFIG_OCTEON_USB=m # CONFIG_IOMMU_SUPPORT is not set +CONFIG_RAS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index b42cfa7865f981e86e9fb6218cc3f292e1edaa34..5ea3104a3aca9891a10be74c5c6757a192febf71 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -91,6 +91,7 @@ CONFIG_SERIAL_OF_PLATFORM=y CONFIG_I2C=y CONFIG_I2C_JZ4780=y CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_INGENIC=y # CONFIG_HWMON is not set CONFIG_REGULATOR=y CONFIG_REGULATOR_DEBUG=y @@ -99,6 +100,8 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_MMC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_JZ4740=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_MEMORY=y # CONFIG_DNOTIFY is not set diff --git a/arch/mips/configs/generic/board-ni169445.config b/arch/mips/configs/generic/board-ni169445.config new file mode 100644 index 0000000000000000000000000000000000000000..f72223b366cabc4294a4ec40c9e3eb5987cfb55f --- /dev/null +++ b/arch/mips/configs/generic/board-ni169445.config @@ -0,0 +1,30 @@ +# require CONFIG_CPU_MIPS32_R2=y +# require CONFIG_CPU_LITTLE_ENDIAN=y + +CONFIG_FIT_IMAGE_FDT_NI169445=y + +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y + +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_GENERIC_PLATFORM=y + +CONFIG_MTD=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CMDLINE_PARTS=y + +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_ECC_BCH=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_GPIO=y +CONFIG_MTD_NAND_IDS=y + +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BLOCK=y + +CONFIG_NETDEVICES=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +CONFIG_DWMAC_GENERIC=y diff --git a/arch/mips/configs/generic/board-sead-3.config b/arch/mips/configs/generic/board-sead-3.config index 3b5e1ac579ebb78e38dd1abdcc6803b29a51bb0e..df49a592dbb5b288e9091b3b59e6fef7c3b1af4d 100644 --- a/arch/mips/configs/generic/board-sead-3.config +++ b/arch/mips/configs/generic/board-sead-3.config @@ -1,3 +1,5 @@ +# require CONFIG_32BIT=y + CONFIG_LEGACY_BOARD_SEAD3=y CONFIG_AUXDISPLAY=y diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 91aacf2ef26dd9572a2d2e3dac27bce59b253511..26b1cd5ffbf550d33b7ff1afe567b30bbfe265b5 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_MIPS_CPS=y CONFIG_CPU_HAS_MSA=y CONFIG_HIGHMEM=y -CONFIG_NR_CPUS=2 +CONFIG_NR_CPUS=16 CONFIG_MIPS_O32_FP64_SUPPORT=y CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y @@ -61,7 +61,6 @@ CONFIG_HID_KENSINGTON=y CONFIG_HID_LOGITECH=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y -# CONFIG_USB_SUPPORT is not set # CONFIG_MIPS_PLATFORM_DEVICES is not set # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT4_FS=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index b1911816337c30561f421ed93f7b95196444011c..55438fc9991ec53715b392f074dbb3d864dc197c 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -111,12 +111,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 1ec8ed8d05d117c11e73ea467f6486a66550555e..02be95c1b7128436b1a30e0680298a384c098caa 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -37,7 +37,6 @@ CONFIG_PM=y CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="/dev/hda3" CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEBUG=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 078ecac071aba6f9a6a69510151144d8bea3da0e..396408404487e1a248bd331b228bc1d4199dc111 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y CONFIG_SYSVIPC=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 80ecd94ed1263f4ce85494f361d1592989624b2c..5691673a332772dedd4af5c80af14c2102e79d88 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_NR_CPUS=8 CONFIG_HZ_100=y CONFIG_SYSVIPC=y diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index 35ad1f8d1a79cd015141fbf7c97f053f43bb6f61..e9cadb37d684f74321c508abf61e0df2b4951353 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -3,6 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_KVM_GUEST=y CONFIG_PAGE_SIZE_16KB=y +# CONFIG_MIPS_MT_SMP is not set CONFIG_HZ_100=y CONFIG_SYSVIPC=y CONFIG_NO_HZ=y diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 55b68b981b057c56e2216fe6fc85545606b0088b..d8c8f5fb89180f956221cc3f913e7867d703b88a 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 5ca590cf163581e69c0be8417536ea4d2ca8c550..04827bc9f87f97dd5aad599cc10fa9eb4fd22720 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -3,7 +3,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_CPU_MIPS32_3_5_FEATURES=y CONFIG_PAGE_SIZE_16KB=y -CONFIG_MIPS_MT_SMP=y CONFIG_SCHED_SMT=y CONFIG_MIPS_CPS=y CONFIG_NR_CPUS=8 diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 4011f1869e726eb4965bb80b2356cd0ddb33ee77..c3d0d0a6e04483247baeec72c6704966a4ee559c 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -146,12 +146,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_CBQ=m diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig index 5720ce23e9aae0ac6f92380ceddbe15ea9a44d8c..7357248b3d7aa75df627dfb05f25a87ad64b5c58 100644 --- a/arch/mips/configs/nlm_xlp_defconfig +++ b/arch/mips/configs/nlm_xlp_defconfig @@ -259,7 +259,6 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m CONFIG_WAN_ROUTER=m diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig index fea56c535d9237c4d121a48d4668556b615d2914..1e18fd7de209bc9be10006589cf03b11870b0c99 100644 --- a/arch/mips/configs/nlm_xlr_defconfig +++ b/arch/mips/configs/nlm_xlr_defconfig @@ -240,12 +240,8 @@ CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m CONFIG_IPDDP=m CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y CONFIG_X25=m CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y CONFIG_WAN_ROUTER=m CONFIG_PHONET=m CONFIG_IEEE802154=m diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..e2731c3cc7e7053a5f77311af9f689265e768b51 --- /dev/null +++ b/arch/mips/configs/omega2p_defconfig @@ -0,0 +1,129 @@ +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_OMEGA2P=y +CONFIG_CPU_MIPS32_R2=y +# CONFIG_COMPACTION is not set +CONFIG_HZ_100=y +CONFIG_PREEMPT=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_SUSPEND is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_NETDEVICES=y +# CONFIG_ETHERNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=2 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_MMC=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_PHY_RALINK_USB=y +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=10 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_STACKTRACE=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig index 3598d58aac3066a94955c545b7dc205507a625d3..b22a3cf149b6f04911a40da454917ce2403dd78a 100644 --- a/arch/mips/configs/pistachio_defconfig +++ b/arch/mips/configs/pistachio_defconfig @@ -47,6 +47,8 @@ CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -292,7 +294,8 @@ CONFIG_SQUASHFS_LZO=y CONFIG_PSTORE=y CONFIG_PSTORE_CONSOLE=y CONFIG_PSTORE_RAM=y -# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=m CONFIG_NLS_ASCII=m diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..9121e4194a63c096a64ca99d6a2441f0b391d0be --- /dev/null +++ b/arch/mips/configs/vocore2_defconfig @@ -0,0 +1,129 @@ +CONFIG_RALINK=y +CONFIG_SOC_MT7620=y +CONFIG_DTB_VOCORE2=y +CONFIG_CPU_MIPS32_R2=y +# CONFIG_COMPACTION is not set +CONFIG_HZ_100=y +CONFIG_PREEMPT=y +# CONFIG_SECCOMP is not set +CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_SUSPEND is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +# CONFIG_FW_LOADER is not set +# CONFIG_ALLOW_DEV_COREDUMP is not set +CONFIG_NETDEVICES=y +# CONFIG_ETHERNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=2 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_MMC=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_MEMORY=y +CONFIG_PHY_RALINK_USB=y +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=y +CONFIG_NLS_CODEPAGE_775=y +CONFIG_NLS_CODEPAGE_850=y +CONFIG_NLS_CODEPAGE_852=y +CONFIG_NLS_CODEPAGE_855=y +CONFIG_NLS_CODEPAGE_857=y +CONFIG_NLS_CODEPAGE_860=y +CONFIG_NLS_CODEPAGE_861=y +CONFIG_NLS_CODEPAGE_862=y +CONFIG_NLS_CODEPAGE_863=y +CONFIG_NLS_CODEPAGE_864=y +CONFIG_NLS_CODEPAGE_865=y +CONFIG_NLS_CODEPAGE_866=y +CONFIG_NLS_CODEPAGE_869=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=y +CONFIG_NLS_CODEPAGE_949=y +CONFIG_NLS_CODEPAGE_874=y +CONFIG_NLS_ISO8859_8=y +CONFIG_NLS_CODEPAGE_1250=y +CONFIG_NLS_CODEPAGE_1251=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=y +CONFIG_NLS_ISO8859_3=y +CONFIG_NLS_ISO8859_4=y +CONFIG_NLS_ISO8859_5=y +CONFIG_NLS_ISO8859_6=y +CONFIG_NLS_ISO8859_7=y +CONFIG_NLS_ISO8859_9=y +CONFIG_NLS_ISO8859_13=y +CONFIG_NLS_ISO8859_14=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_KOI8_R=y +CONFIG_NLS_KOI8_U=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=10 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_STACKTRACE=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRC16=y +CONFIG_XZ_DEC=y diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c index 629b24db0d3a1f46a94d5ec402965e133a51c98d..008555969534236082ea61b1ecab5d627ff7a6d0 100644 --- a/arch/mips/fw/arc/init.c +++ b/arch/mips/fw/arc/init.c @@ -51,7 +51,7 @@ void __init prom_init(void) #endif #ifdef CONFIG_SGI_IP27 { - extern struct plat_smp_ops ip27_smp_ops; + extern const struct plat_smp_ops ip27_smp_ops; register_smp_ops(&ip27_smp_ops); } diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig index 51ffbbaddee2e27c770eeb1da89e6dcc3514e8df..e0436aaf7f389e9a6f04d042975eaa29d45020c1 100644 --- a/arch/mips/generic/Kconfig +++ b/arch/mips/generic/Kconfig @@ -36,4 +36,10 @@ config FIT_IMAGE_FDT_BOSTON enable this if you wish to boot on a MIPS Boston board, as it is expected by the bootloader. +config FIT_IMAGE_FDT_NI169445 + bool "Include FDT for NI 169445" + help + Enable this to include the FDT for the 169445 platform from + National Instruments in the FIT kernel image. + endif diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform index 9a30d69e22816e887c5080a69bdd0b8c918b86f3..f5312dfa81840e3506806c9f4912c9ebd325267d 100644 --- a/arch/mips/generic/Platform +++ b/arch/mips/generic/Platform @@ -12,3 +12,7 @@ platform-$(CONFIG_MIPS_GENERIC) += generic/ cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000 all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb + +its-y := vmlinux.its.S +its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S +its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S new file mode 100644 index 0000000000000000000000000000000000000000..a7f51f97b9102f79dd4e75fa1894ee42e24e099e --- /dev/null +++ b/arch/mips/generic/board-boston.its.S @@ -0,0 +1,22 @@ +/ { + images { + fdt@boston { + description = "img,boston Device Tree"; + data = /incbin/("boot/dts/img/boston.dtb"); + type = "flat_dt"; + arch = "mips"; + compression = "none"; + hash@0 { + algo = "sha1"; + }; + }; + }; + + configurations { + conf@boston { + description = "Boston Linux kernel"; + kernel = "kernel@0"; + fdt = "fdt@boston"; + }; + }; +}; diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S new file mode 100644 index 0000000000000000000000000000000000000000..d12e12fe90bec6f4d0290b63ba8a8b2154bff4bc --- /dev/null +++ b/arch/mips/generic/board-ni169445.its.S @@ -0,0 +1,22 @@ +{ + images { + fdt@ni169445 { + description = "NI 169445 device tree"; + data = /incbin/("boot/dts/ni/169445.dtb"); + type = "flat_dt"; + arch = "mips"; + compression = "none"; + hash@0 { + algo = "sha1"; + }; + }; + }; + + configurations { + conf@ni169445 { + description = "NI 169445 Linux Kernel"; + kernel = "kernel@0"; + fdt = "fdt@ni169445"; + }; + }; +}; diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c index 3f32b376d30e482774f55aa2cbcc3664a488ddfb..15a7fb8e2a2e80b28e8bca9d73bef2a64a8bf29a 100644 --- a/arch/mips/generic/init.c +++ b/arch/mips/generic/init.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -88,6 +89,8 @@ void __init *plat_get_fdt(void) return (void *)fdt; } +#ifdef CONFIG_RELOCATABLE + void __init plat_fdt_relocated(void *new_location) { /* @@ -101,6 +104,8 @@ void __init plat_fdt_relocated(void *new_location) fw_arg1 = (unsigned long)new_location; } +#endif /* CONFIG_RELOCATABLE */ + void __init plat_mem_setup(void) { if (mach && mach->fixup_fdt) diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c index 14064bdd91ddcc3e3cd990f457242871af0af524..5322d09dd51b5563d1bf58aa0b31b927c982d080 100644 --- a/arch/mips/generic/irq.c +++ b/arch/mips/generic/irq.c @@ -12,10 +12,11 @@ #include #include #include -#include #include #include +#include +#include int get_c0_fdc_int(void) { @@ -23,7 +24,7 @@ int get_c0_fdc_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_fdc_irq = gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; @@ -39,7 +40,7 @@ int get_c0_perfcount_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_perf_irq = gic_get_c0_perfcount_int(); else if (cp0_perfcount_irq >= 0) mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; @@ -55,7 +56,7 @@ unsigned int get_c0_compare_int(void) if (cpu_has_veic) panic("Unimplemented!"); - else if (gic_present) + else if (mips_gic_present()) mips_cpu_timer_irq = gic_get_c0_compare_int(); else mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S index 3390e2f80b8014a9a8c6c8c39555de460cd5bb48..f67fbf1c85417cc2e2bb07abec86d84b466b8196 100644 --- a/arch/mips/generic/vmlinux.its.S +++ b/arch/mips/generic/vmlinux.its.S @@ -29,28 +29,3 @@ }; }; }; - -#ifdef CONFIG_FIT_IMAGE_FDT_BOSTON -/ { - images { - fdt@boston { - description = "img,boston Device Tree"; - data = /incbin/("boot/dts/img/boston.dtb"); - type = "flat_dt"; - arch = "mips"; - compression = "none"; - hash@0 { - algo = "sha1"; - }; - }; - }; - - configurations { - conf@boston { - description = "Boston Linux kernel"; - kernel = "kernel@0"; - fdt = "fdt@boston"; - }; - }; -}; -#endif /* CONFIG_FIT_IMAGE_FDT_BOSTON */ diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 859cf7048347bf4b0ebad1f619507202976f74b3..81fae23ce7cdb41b523dd356a64f4da18c833237 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -55,6 +55,7 @@ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, 0, ra; \ + .cfi_startproc; \ .insn /* @@ -66,12 +67,14 @@ symbol: .frame sp, 0, ra; \ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, framesize, rpc; \ + .cfi_startproc; \ .insn /* * END - mark end of function */ #define END(function) \ + .cfi_endproc; \ .end function; \ .size function, .-function diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index a92aee7b977acb56539f74f7f9db2d9d0c1291a5..b3e2975f83d36e021a57988672ef26c09701f879 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -48,8 +48,8 @@ #include #include -extern struct plat_smp_ops bmips43xx_smp_ops; -extern struct plat_smp_ops bmips5000_smp_ops; +extern const struct plat_smp_ops bmips43xx_smp_ops; +extern const struct plat_smp_ops bmips5000_smp_ops; static inline int register_bmips_smp_ops(void) { diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index cd6efb07c9808594c143d16fcc005c2ce9afb0ea..a41059d47d31c52d377aa39694a0da3ba3e07af5 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -15,6 +15,8 @@ #include #include +#include + /* * Descriptor for a cache */ @@ -77,16 +79,9 @@ struct cpuinfo_mips { struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ int package;/* physical package number */ - int core; /* physical core number */ + unsigned int globalnumber; #ifdef CONFIG_64BIT int vmbits; /* Virtual memory size in bits */ -#endif -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) - /* - * There is not necessarily a 1:1 mapping of VPE num to CPU number - * in particular on multi-core systems. - */ - int vpe_id; /* Virtual Processor number */ #endif void *data; /* Additional data */ unsigned int watch_reg_count; /* Number that exist */ @@ -144,11 +139,52 @@ struct proc_cpuinfo_notifier_args { unsigned long n; }; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) -# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) -#else -# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) -#endif +static inline unsigned int cpu_cluster(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where multiple clusters aren't used */ + if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CLUSTER) >> + MIPS_GLOBALNUMBER_CLUSTER_SHF; +} + +static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo) +{ + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CORE) >> + MIPS_GLOBALNUMBER_CORE_SHF; +} + +static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where VP(E)s aren't used */ + if (!IS_ENABLED(CONFIG_MIPS_MT_SMP) && !IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_VP) >> + MIPS_GLOBALNUMBER_VP_SHF; +} + +extern void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster); +extern void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core); +extern void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe); + +static inline bool cpus_are_siblings(int cpua, int cpub) +{ + struct cpuinfo_mips *infoa = &cpu_data[cpua]; + struct cpuinfo_mips *infob = &cpu_data[cpub]; + unsigned int gnuma, gnumb; + + if (infoa->package != infob->package) + return false; + + gnuma = infoa->globalnumber & ~MIPS_GLOBALNUMBER_VP; + gnumb = infob->globalnumber & ~MIPS_GLOBALNUMBER_VP; + if (gnuma != gnumb) + return false; + + return true; +} static inline unsigned long cpu_asid_inc(void) { diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 175fe565f4e1e6e078ead8d42da079cb779d7340..a45af3de075d900cf819071c390820c7f749f47b 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -151,11 +151,6 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_R5500: #endif -#ifdef CONFIG_SYS_HAS_CPU_R6000 - case CPU_R6000: - case CPU_R6000A: -#endif - #ifdef CONFIG_SYS_HAS_CPU_NEVADA case CPU_NEVADA: #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index d0c152b989f8eea1f6d8380386f7997483329f70..ece9b84f3bcbd78511903c97269795dddd8f111d 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -285,11 +285,6 @@ enum cpu_type_enum { CPU_R2000, CPU_R3000, CPU_R3000A, CPU_R3041, CPU_R3051, CPU_R3052, CPU_R3081, CPU_R3081E, - /* - * R6000 class processors - */ - CPU_R6000, CPU_R6000A, - /* * R4000 class processors */ diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h index d75aed36480a843cfd7703741209e518638ca56f..021d09ae5670a1649f3766927826e342ec35deb3 100644 --- a/arch/mips/include/asm/floppy.h +++ b/arch/mips/include/asm/floppy.h @@ -10,11 +10,11 @@ #ifndef _ASM_FLOPPY_H #define _ASM_FLOPPY_H -#include +#include static inline void fd_cacheflush(char * addr, long size) { - dma_cache_sync(NULL, addr, size, DMA_BIDIRECTIONAL); + dma_cache_wback_inv((unsigned long)addr, size); } #define MAX_BUFFER_SECTORS 24 diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index c05369e0b8d60352dc1833880b026e15ac09ba17..b36097d3cbf43ae9d6c5a27abf10b0f01e9e1636 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -36,6 +36,7 @@ struct mips_fpu_emulator_stats { unsigned long emulated; unsigned long loads; unsigned long stores; + unsigned long branches; unsigned long cp1ops; unsigned long cp1xops; unsigned long errors; @@ -45,6 +46,121 @@ struct mips_fpu_emulator_stats { unsigned long ieee754_zerodiv; unsigned long ieee754_invalidop; unsigned long ds_emul; + + unsigned long abs_s; + unsigned long abs_d; + unsigned long add_s; + unsigned long add_d; + unsigned long bc1eqz; + unsigned long bc1nez; + unsigned long ceil_w_s; + unsigned long ceil_w_d; + unsigned long ceil_l_s; + unsigned long ceil_l_d; + unsigned long class_s; + unsigned long class_d; + unsigned long cmp_af_s; + unsigned long cmp_af_d; + unsigned long cmp_eq_s; + unsigned long cmp_eq_d; + unsigned long cmp_le_s; + unsigned long cmp_le_d; + unsigned long cmp_lt_s; + unsigned long cmp_lt_d; + unsigned long cmp_ne_s; + unsigned long cmp_ne_d; + unsigned long cmp_or_s; + unsigned long cmp_or_d; + unsigned long cmp_ueq_s; + unsigned long cmp_ueq_d; + unsigned long cmp_ule_s; + unsigned long cmp_ule_d; + unsigned long cmp_ult_s; + unsigned long cmp_ult_d; + unsigned long cmp_un_s; + unsigned long cmp_un_d; + unsigned long cmp_une_s; + unsigned long cmp_une_d; + unsigned long cmp_saf_s; + unsigned long cmp_saf_d; + unsigned long cmp_seq_s; + unsigned long cmp_seq_d; + unsigned long cmp_sle_s; + unsigned long cmp_sle_d; + unsigned long cmp_slt_s; + unsigned long cmp_slt_d; + unsigned long cmp_sne_s; + unsigned long cmp_sne_d; + unsigned long cmp_sor_s; + unsigned long cmp_sor_d; + unsigned long cmp_sueq_s; + unsigned long cmp_sueq_d; + unsigned long cmp_sule_s; + unsigned long cmp_sule_d; + unsigned long cmp_sult_s; + unsigned long cmp_sult_d; + unsigned long cmp_sun_s; + unsigned long cmp_sun_d; + unsigned long cmp_sune_s; + unsigned long cmp_sune_d; + unsigned long cvt_d_l; + unsigned long cvt_d_s; + unsigned long cvt_d_w; + unsigned long cvt_l_s; + unsigned long cvt_l_d; + unsigned long cvt_s_d; + unsigned long cvt_s_l; + unsigned long cvt_s_w; + unsigned long cvt_w_s; + unsigned long cvt_w_d; + unsigned long div_s; + unsigned long div_d; + unsigned long floor_w_s; + unsigned long floor_w_d; + unsigned long floor_l_s; + unsigned long floor_l_d; + unsigned long maddf_s; + unsigned long maddf_d; + unsigned long max_s; + unsigned long max_d; + unsigned long maxa_s; + unsigned long maxa_d; + unsigned long min_s; + unsigned long min_d; + unsigned long mina_s; + unsigned long mina_d; + unsigned long mov_s; + unsigned long mov_d; + unsigned long msubf_s; + unsigned long msubf_d; + unsigned long mul_s; + unsigned long mul_d; + unsigned long neg_s; + unsigned long neg_d; + unsigned long recip_s; + unsigned long recip_d; + unsigned long rint_s; + unsigned long rint_d; + unsigned long round_w_s; + unsigned long round_w_d; + unsigned long round_l_s; + unsigned long round_l_d; + unsigned long rsqrt_s; + unsigned long rsqrt_d; + unsigned long sel_s; + unsigned long sel_d; + unsigned long seleqz_s; + unsigned long seleqz_d; + unsigned long selnez_s; + unsigned long selnez_d; + unsigned long sqrt_s; + unsigned long sqrt_d; + unsigned long sub_s; + unsigned long sub_d; + unsigned long trunc_w_s; + unsigned long trunc_w_d; + unsigned long trunc_l_s; + unsigned long trunc_l_d; }; DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); @@ -62,7 +178,7 @@ do { \ extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, - void *__user *fault_addr); + void __user **fault_addr); void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, struct task_struct *tsk); int process_fpemu_return(int sig, void __user *fault_addr, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index ecabc00c1e665ae237faf60a10f07599d04a65c3..0cbf3af37ecad9d195847c49ddffb15a6165fc4d 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -632,4 +632,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); */ #define xlate_dev_kmem_ptr(p) p +void __ioread64_copy(void *to, const void __iomem *from, size_t count); + #endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index bace5b9ae4df27e1b419095305f08790cdd296ec..f439cf9cf9d123f7bd0a3c1fa08dd8465617b52a 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -8,12 +8,16 @@ #define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H #define cpu_has_tlb 1 +#define cpu_has_ftlb 0 #define cpu_has_tlbinv 0 #define cpu_has_segments 0 #define cpu_has_eva 0 #define cpu_has_htw 0 +#define cpu_has_ldpte 0 #define cpu_has_rixiex 0 #define cpu_has_maar 0 +#define cpu_has_rw_llb 0 +#define cpu_has_3kex 0 #define cpu_has_4kex 1 #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 @@ -30,6 +34,12 @@ #define cpu_has_mcheck 1 #define cpu_has_ejtag 1 #define cpu_has_llsc 1 +#define cpu_has_guestctl0ext 0 +#define cpu_has_guestctl1 0 +#define cpu_has_guestctl2 0 +#define cpu_has_guestid 0 +#define cpu_has_drg 0 +#define cpu_has_bp_ghist 0 #define cpu_has_mips16 0 #define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 @@ -37,17 +47,23 @@ #define cpu_has_smartmips 0 #define cpu_has_rixi 0 #define cpu_has_mmips 0 +#define cpu_has_lpa 0 +#define cpu_has_mhv 0 #define cpu_has_vtag_icache 0 #define cpu_has_dc_aliases 0 #define cpu_has_ic_fills_f_dc 1 #define cpu_has_pindexed_dcache 0 #define cpu_has_mips32r1 1 #define cpu_has_mips32r2 0 +#define cpu_has_mips32r6 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 +#define cpu_has_mips64r6 0 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 +#define cpu_has_dsp3 0 #define cpu_has_mipsmt 0 +#define cpu_has_vp 0 #define cpu_has_userlocal 0 #define cpu_has_nofpuex 0 #define cpu_has_64bits 0 @@ -58,9 +74,19 @@ #define cpu_dcache_line_size() 32 #define cpu_icache_line_size() 32 +#define cpu_scache_line_size() 0 #define cpu_has_perf_cntr_intr_bit 0 #define cpu_has_vz 0 #define cpu_has_msa 0 +#define cpu_has_fre 0 +#define cpu_has_cdmm 0 +#define cpu_has_small_pages 0 +#define cpu_has_nan_legacy 1 +#define cpu_has_nan_2008 1 +#define cpu_has_ebase_wg 0 +#define cpu_has_badinstr 0 +#define cpu_has_badinstrp 0 +#define cpu_has_contextconfig 0 #endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 5035f09c54274a50d89fdb81426743872a51ad66..24080af570f9ae3d202062f8f6ffc45980446ad6 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -710,7 +710,7 @@ /* Broadcom 6345 ENET DMA definitions */ #define ENETDMA_6345_CHANCFG_REG (0x00) -#define ENETDMA_6345_MAXBURST_REG (0x40) +#define ENETDMA_6345_MAXBURST_REG (0x04) #define ENETDMA_6345_RSTART_REG (0x08) diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index bd8b9bbe17719df3fce2c8c9484fd9dad5eca4d4..a4f798629c3d8147655b411cdcd1cf23f10e49de 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -46,9 +46,9 @@ #define cpu_has_64bits 1 #define cpu_has_octeon_cache 1 #define cpu_has_saa octeon_has_saa() -#define cpu_has_mips32r1 0 -#define cpu_has_mips32r2 0 -#define cpu_has_mips64r1 0 +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 1 #define cpu_has_mips64r2 1 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index defd135e7ac8d726f320962cc97cb403aba56d63..3fb7a0e094943c36cb096a129e361f063778c6ba 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h @@ -23,7 +23,6 @@ struct cpuinfo_ip27 { extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) -#define parent_node(node) (node) #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ &hub_data(node)->h_cpus) diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h index 8064d7a4b33d7ae88d0889fa32ab0e92c5197c8b..d750f93232e40ab7d9f3c88c273485ead724926b 100644 --- a/arch/mips/include/asm/mach-lantiq/lantiq.h +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h @@ -46,8 +46,6 @@ extern struct clk *clk_get_ppe(void); /* find out what bootsource we have */ extern unsigned char ltq_boot_select(void); -/* find out what caused the last cpu reset */ -extern int ltq_reset_cause(void); /* find out the soc type */ extern int ltq_soc_type(void); diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index c68c0cc879c6b22756c0f20ea66d469ee56969b1..d0ae5d55413b68f48c8aaa3703bdb3217c9919c1 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -26,7 +26,7 @@ extern void mach_prepare_shutdown(void); /* environment arguments from bootloader */ extern u32 cpu_clock_freq; extern u32 memsize, highmemsize; -extern struct plat_smp_ops loongson3_smp_ops; +extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ extern void __init prom_init_memory(void); diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h index 0d8f3b55bdbc710da31c666ffc3b4f4b0ce5f65d..bcb885615fcad2e1c79690c85b78224e6589ccf0 100644 --- a/arch/mips/include/asm/mach-loongson64/topology.h +++ b/arch/mips/include/asm/mach-loongson64/topology.h @@ -4,7 +4,6 @@ #ifdef CONFIG_NUMA #define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2) -#define parent_node(node) (node) #define cpumask_of_node(node) (&__node_data[(node)]->cpumask) struct pci_bus; diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index 987ff580466b8961225dbc934155ef44309b441b..817698abf2eb089f2aac42fef1a6c7f6aa4e2f38 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h @@ -10,8 +10,6 @@ #ifndef _MIPS_MALTAINT_H #define _MIPS_MALTAINT_H -#include - /* * Interrupts 0..15 are used for Malta ISA compatible interrupts */ @@ -62,7 +60,4 @@ #define MSC01E_INT_PERFCTR 10 #define MSC01E_INT_CPUCTR 11 -/* GIC external interrupts */ -#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3) - #endif /* !(_MIPS_MALTAINT_H) */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index cfdbab0157697f74cf0eedf048d2791f2b4dd643..f6231b91b7247fdf39cd81d05036c7c33eb1f534 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -8,16 +8,18 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cm.h +#endif + #ifndef __MIPS_ASM_MIPS_CM_H__ #define __MIPS_ASM_MIPS_CM_H__ #include #include -#include -#include /* The base address of the CM GCR block */ -extern void __iomem *mips_cm_base; +extern void __iomem *mips_gcr_base; /* The base address of the CM L2-only sync region */ extern void __iomem *mips_cm_l2sync_base; @@ -80,7 +82,7 @@ static inline int mips_cm_probe(void) static inline bool mips_cm_present(void) { #ifdef CONFIG_MIPS_CM - return mips_cm_base != NULL; + return mips_gcr_base != NULL; #else return false; #endif @@ -112,321 +114,219 @@ static inline bool mips_cm_has_l2sync(void) /* Size of the L2-only sync region */ #define MIPS_CM_L2SYNC_SIZE 0x1000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CM_R_(name, off) \ -static inline unsigned long __iomem *addr_gcr_##name(void) \ -{ \ - return (unsigned long __iomem *)(mips_cm_base + (off)); \ -} \ - \ -static inline u32 read32_gcr_##name(void) \ -{ \ - return __raw_readl(addr_gcr_##name()); \ -} \ - \ -static inline u64 read64_gcr_##name(void) \ -{ \ - void __iomem *addr = addr_gcr_##name(); \ - u64 ret; \ - \ - if (mips_cm_is64) { \ - ret = __raw_readq(addr); \ - } else { \ - ret = __raw_readl(addr); \ - ret |= (u64)__raw_readl(addr + 0x4) << 32; \ - } \ - \ - return ret; \ -} \ - \ -static inline unsigned long read_gcr_##name(void) \ -{ \ - if (mips_cm_is64) \ - return read64_gcr_##name(); \ - else \ - return read32_gcr_##name(); \ -} - -#define BUILD_CM__W(name, off) \ -static inline void write32_gcr_##name(u32 value) \ -{ \ - __raw_writel(value, addr_gcr_##name()); \ -} \ - \ -static inline void write64_gcr_##name(u64 value) \ -{ \ - __raw_writeq(value, addr_gcr_##name()); \ -} \ - \ -static inline void write_gcr_##name(unsigned long value) \ -{ \ - if (mips_cm_is64) \ - write64_gcr_##name(value); \ - else \ - write32_gcr_##name(value); \ -} - -#define BUILD_CM_RW(name, off) \ - BUILD_CM_R_(name, off) \ - BUILD_CM__W(name, off) - -#define BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off)) - -#define BUILD_CM_Cx__W(name, off) \ - BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off)) - -#define BUILD_CM_Cx_RW(name, off) \ - BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_Cx__W(name, off) - -/* GCB register accessor functions */ -BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00) -BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08) -BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20) -BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30) -BUILD_CM_RW(err_control, MIPS_CM_GCB_OFS + 0x38) -BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40) -BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48) -BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50) -BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58) -BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70) -BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80) -BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88) -BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90) -BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98) -BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0) -BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8) -BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0) -BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8) -BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0) -BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) -BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) -BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) -BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) -BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) -BUILD_CM_RW(l2_pft_control, MIPS_CM_GCB_OFS + 0x300) -BUILD_CM_RW(l2_pft_control_b, MIPS_CM_GCB_OFS + 0x308) -BUILD_CM_RW(bev_base, MIPS_CM_GCB_OFS + 0x680) - -/* Core Local & Core Other register accessor functions */ -BUILD_CM_Cx_RW(reset_release, 0x00) -BUILD_CM_Cx_RW(coherence, 0x08) -BUILD_CM_Cx_R_(config, 0x10) -BUILD_CM_Cx_RW(other, 0x18) -BUILD_CM_Cx_RW(reset_base, 0x20) -BUILD_CM_Cx_R_(id, 0x28) -BUILD_CM_Cx_RW(reset_ext_base, 0x30) -BUILD_CM_Cx_R_(tcid_0_priority, 0x40) -BUILD_CM_Cx_R_(tcid_1_priority, 0x48) -BUILD_CM_Cx_R_(tcid_2_priority, 0x50) -BUILD_CM_Cx_R_(tcid_3_priority, 0x58) -BUILD_CM_Cx_R_(tcid_4_priority, 0x60) -BUILD_CM_Cx_R_(tcid_5_priority, 0x68) -BUILD_CM_Cx_R_(tcid_6_priority, 0x70) -BUILD_CM_Cx_R_(tcid_7_priority, 0x78) -BUILD_CM_Cx_R_(tcid_8_priority, 0x80) - -/* GCR_CONFIG register fields */ -#define CM_GCR_CONFIG_NUMIOCU_SHF 8 -#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_CONFIG_PCORES_SHF 0 -#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0) - -/* GCR_BASE register fields */ -#define CM_GCR_BASE_GCRBASE_SHF 15 -#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_BASE_CMDEFTGT_SHF 0 -#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) +#define GCR_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) + +#define GCR_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) + +#define GCR_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) + +#define GCR_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) + +/* GCR_CONFIG - Information about the system */ +GCR_ACCESSOR_RO(64, 0x000, config) +#define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43) +#define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32) +#define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23) +#define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8) +#define CM_GCR_CONFIG_PCORES GENMASK(7, 0) + +/* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */ +GCR_ACCESSOR_RW(64, 0x008, base) +#define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15) +#define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0) #define CM_GCR_BASE_CMDEFTGT_DISABLED 0 #define CM_GCR_BASE_CMDEFTGT_MEM 1 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 -/* GCR_RESET_EXT_BASE register fields */ -#define CM_GCR_RESET_EXT_BASE_EVARESET BIT(31) -#define CM_GCR_RESET_EXT_BASE_UEB BIT(30) - -/* GCR_ACCESS register fields */ -#define CM_GCR_ACCESS_ACCESSEN_SHF 0 -#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0) +/* GCR_ACCESS - Controls core/IOCU access to GCRs */ +GCR_ACCESSOR_RW(32, 0x020, access) +#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0) -/* GCR_REV register fields */ -#define CM_GCR_REV_MAJOR_SHF 8 -#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8) -#define CM_GCR_REV_MINOR_SHF 0 -#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0) +/* GCR_REV - Indicates the Coherence Manager revision */ +GCR_ACCESSOR_RO(32, 0x030, rev) +#define CM_GCR_REV_MAJOR GENMASK(15, 8) +#define CM_GCR_REV_MINOR GENMASK(7, 0) #define CM_ENCODE_REV(major, minor) \ - (((major) << CM_GCR_REV_MAJOR_SHF) | \ - ((minor) << CM_GCR_REV_MINOR_SHF)) + (((major) << __ffs(CM_GCR_REV_MAJOR)) | \ + ((minor) << __ffs(CM_GCR_REV_MINOR))) #define CM_REV_CM2 CM_ENCODE_REV(6, 0) #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) #define CM_REV_CM3 CM_ENCODE_REV(8, 0) - -/* GCR_ERR_CONTROL register fields */ -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_SHF 1 -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK (_ULCAST_(0x1) << 1) -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_SHF 0 -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK (_ULCAST_(0x1) << 0) - -/* GCR_ERROR_CAUSE register fields */ -#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27 -#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27) -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF 58 -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_MSK GENMASK_ULL(63, 58) -#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0 -#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0) - -/* GCR_ERROR_MULT register fields */ -#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0 -#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0) - -/* GCR_L2_ONLY_SYNC_BASE register fields */ -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_GIC_BASE register fields */ -#define CM_GCR_GIC_BASE_GICBASE_SHF 17 -#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17) -#define CM_GCR_GIC_BASE_GICEN_SHF 0 -#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_CPC_BASE register fields */ -#define CM_GCR_CPC_BASE_CPCBASE_SHF 15 -#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_CPC_BASE_CPCEN_SHF 0 -#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_GICEX_SHF 0 -#define CM_GCR_GIC_STATUS_GICEX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_REGn_BASE register fields */ -#define CM_GCR_REGn_BASE_BASEADDR_SHF 16 -#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16) - -/* GCR_REGn_MASK register fields */ -#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16 -#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16) -#define CM_GCR_REGn_MASK_CCAOVR_SHF 5 -#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5) -#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4 -#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4) -#define CM_GCR_REGn_MASK_DROPL2_SHF 2 -#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2) -#define CM_GCR_REGn_MASK_CMTGT_SHF 0 -#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0) -#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0) -#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0) - -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_EX_SHF 0 -#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_CPC_STATUS register fields */ -#define CM_GCR_CPC_STATUS_EX_SHF 0 -#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_L2_CONFIG register fields */ -#define CM_GCR_L2_CONFIG_BYPASS_SHF 20 -#define CM_GCR_L2_CONFIG_BYPASS_MSK (_ULCAST_(0x1) << 20) -#define CM_GCR_L2_CONFIG_SET_SIZE_SHF 12 -#define CM_GCR_L2_CONFIG_SET_SIZE_MSK (_ULCAST_(0xf) << 12) -#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF 8 -#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_L2_CONFIG_ASSOC_SHF 0 -#define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) - -/* GCR_SYS_CONFIG2 register fields */ -#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 -#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) - -/* GCR_L2_PFT_CONTROL register fields */ -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_SHF 12 -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_PFT_CONTROL_PFTEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_PFTEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_NPFT_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_NPFT_MSK (_ULCAST_(0xff) << 0) - -/* GCR_L2_PFT_CONTROL_B register fields */ -#define CM_GCR_L2_PFT_CONTROL_B_CEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_B_CEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK (_ULCAST_(0xff) << 0) - -/* GCR_Cx_COHERENCE register fields */ -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) -#define CM3_GCR_Cx_COHERENCE_COHEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_Cx_CONFIG register fields */ -#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10 -#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10) -#define CM_GCR_Cx_CONFIG_PVPE_SHF 0 -#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x3ff) << 0) - -/* GCR_Cx_OTHER register fields */ -#define CM_GCR_Cx_OTHER_CORENUM_SHF 16 -#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16) -#define CM3_GCR_Cx_OTHER_CORE_SHF 8 -#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8) -#define CM3_GCR_Cx_OTHER_VP_SHF 0 -#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0) - -/* GCR_Cx_RESET_BASE register fields */ -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12 -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12) - -/* GCR_Cx_RESET_EXT_BASE register fields */ -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31 -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31) -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30 -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1) -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0 -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0) - -/** - * mips_cm_numcores - return the number of cores present in the system - * - * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or - * zero if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numcores(void) -{ - if (!mips_cm_present()) - return 0; - - return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK) - >> CM_GCR_CONFIG_PCORES_SHF) + 1; -} - -/** - * mips_cm_numiocu - return the number of IOCUs present in the system - * - * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero - * if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numiocu(void) -{ - if (!mips_cm_present()) - return 0; - - return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK) - >> CM_GCR_CONFIG_NUMIOCU_SHF; -} +#define CM_REV_CM3_5 CM_ENCODE_REV(9, 0) + +/* GCR_ERR_CONTROL - Control error checking logic */ +GCR_ACCESSOR_RW(32, 0x038, err_control) +#define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1) +#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0) + +/* GCR_ERR_MASK - Control which errors are reported as interrupts */ +GCR_ACCESSOR_RW(64, 0x040, error_mask) + +/* GCR_ERR_CAUSE - Indicates the type of error that occurred */ +GCR_ACCESSOR_RW(64, 0x048, error_cause) +#define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27) +#define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58) +#define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0) + +/* GCR_ERR_ADDR - Indicates the address associated with an error */ +GCR_ACCESSOR_RW(64, 0x050, error_addr) + +/* GCR_ERR_MULT - Indicates when multiple errors have occurred */ +GCR_ACCESSOR_RW(64, 0x058, error_mult) +#define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0) + +/* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */ +GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0) + +/* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RW(64, 0x080, gic_base) +#define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17) +#define CM_GCR_GIC_BASE_GICEN BIT(0) + +/* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RW(64, 0x088, cpc_base) +#define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15) +#define CM_GCR_CPC_BASE_CPCEN BIT(0) + +/* GCR_REGn_BASE - Base addresses of CM address regions */ +GCR_ACCESSOR_RW(64, 0x090, reg0_base) +GCR_ACCESSOR_RW(64, 0x0a0, reg1_base) +GCR_ACCESSOR_RW(64, 0x0b0, reg2_base) +GCR_ACCESSOR_RW(64, 0x0c0, reg3_base) +#define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16) + +/* GCR_REGn_MASK - Size & destination of CM address regions */ +GCR_ACCESSOR_RW(64, 0x098, reg0_mask) +GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask) +GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask) +GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask) +#define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16) +#define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5) +#define CM_GCR_REGn_MASK_CCAOVREN BIT(4) +#define CM_GCR_REGn_MASK_DROPL2 BIT(2) +#define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0) +#define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0 +#define CM_GCR_REGn_MASK_CMTGT_MEM 0x1 +#define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2 +#define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3 + +/* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RO(32, 0x0d0, gic_status) +#define CM_GCR_GIC_STATUS_EX BIT(0) + +/* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RO(32, 0x0f0, cpc_status) +#define CM_GCR_CPC_STATUS_EX BIT(0) + +/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */ +GCR_ACCESSOR_RW(32, 0x130, l2_config) +#define CM_GCR_L2_CONFIG_BYPASS BIT(20) +#define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12) +#define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8) +#define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0) + +/* GCR_SYS_CONFIG2 - Further information about the system */ +GCR_ACCESSOR_RO(32, 0x150, sys_config2) +#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0) + +/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x300, l2_pft_control) +#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12) +#define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0) + +/* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b) +#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0) + +/* GCR_L2SM_COP - L2 cache op state machine control */ +GCR_ACCESSOR_RW(32, 0x620, l2sm_cop) +#define CM_GCR_L2SM_COP_PRESENT BIT(31) +#define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6) +#define CM_GCR_L2SM_COP_RESULT_DONTCARE 0 +#define CM_GCR_L2SM_COP_RESULT_DONE_OK 1 +#define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2 +#define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3 +#define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4 +#define CM_GCR_L2SM_COP_RUNNING BIT(5) +#define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2) +#define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2 +#define CM_GCR_L2SM_COP_TYPE_HIT_INV 4 +#define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5 +#define CM_GCR_L2SM_COP_TYPE_HIT_WB 6 +#define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7 +#define CM_GCR_L2SM_COP_CMD GENMASK(1, 0) +#define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */ +#define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */ + +/* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */ +GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop) +#define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48) +#define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6) + +/* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */ +GCR_ACCESSOR_RW(64, 0x680, bev_base) + +/* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */ +GCR_CX_ACCESSOR_RW(32, 0x000, reset_release) + +/* GCR_Cx_COHERENCE - Controls core coherence */ +GCR_CX_ACCESSOR_RW(32, 0x008, coherence) +#define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0) +#define CM3_GCR_Cx_COHERENCE_COHEN BIT(0) + +/* GCR_Cx_CONFIG - Information about a core's configuration */ +GCR_CX_ACCESSOR_RO(32, 0x010, config) +#define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10) +#define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0) + +/* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */ +GCR_CX_ACCESSOR_RW(32, 0x018, other) +#define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */ +#define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1 +#define CM_GCR_Cx_OTHER_BLOCK_USER 2 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3 +#define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */ +#define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */ +#define CM_GCR_Cx_OTHER_CORE_CM 32 +#define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */ + +/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ +GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) +#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) + +/* GCR_Cx_ID - Identify the current core */ +GCR_CX_ACCESSOR_RO(32, 0x028, id) +#define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8) +#define CM_GCR_Cx_ID_CORE GENMASK(7, 0) + +/* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */ +GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base) +#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31) +#define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1) +#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0) /** * mips_cm_l2sync - perform an L2-only sync operation @@ -469,7 +369,7 @@ static inline unsigned int mips_cm_max_vp_width(void) uint32_t cfg; if (mips_cm_revision() >= CM_REV_CM3) - return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW; if (mips_cm_present()) { /* @@ -477,8 +377,8 @@ static inline unsigned int mips_cm_max_vp_width(void) * number of VP(E)s, and if that ever changes then this will * need revisiting. */ - cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; - return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE; + return (cfg >> __ffs(CM_GCR_Cx_CONFIG_PVPE)) + 1; } if (IS_ENABLED(CONFIG_SMP)) @@ -499,7 +399,7 @@ static inline unsigned int mips_cm_max_vp_width(void) */ static inline unsigned int mips_cm_vp_id(unsigned int cpu) { - unsigned int core = cpu_data[cpu].core; + unsigned int core = cpu_core(&cpu_data[cpu]); unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); return (core * mips_cm_max_vp_width()) + vp; @@ -508,29 +408,56 @@ static inline unsigned int mips_cm_vp_id(unsigned int cpu) #ifdef CONFIG_MIPS_CM /** - * mips_cm_lock_other - lock access to another core + * mips_cm_lock_other - lock access to redirect/other region + * @cluster: the other cluster to be accessed * @core: the other core to be accessed * @vp: the VP within the other core to be accessed + * @block: the register block to be accessed * - * Call before operating upon a core via the 'other' register region in - * order to prevent the region being moved during access. Must be followed - * by a call to mips_cm_unlock_other. + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cluster, @core, @vp & register + * @block. Must be called before using the redirect/other region, and followed + * by a call to mips_cm_unlock_other() when access to the redirect/other region + * is complete. + * + * This function acquires a spinlock such that code between it & + * mips_cm_unlock_other() calls cannot be pre-empted by anything which may + * reconfigure the redirect/other region, and cannot be interfered with by + * another VP in the core. As such calls to this function should not be nested. */ -extern void mips_cm_lock_other(unsigned int core, unsigned int vp); +extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block); /** - * mips_cm_unlock_other - unlock access to another core + * mips_cm_unlock_other - unlock access to redirect/other region * - * Call after operating upon another core via the 'other' register region. - * Must be called after mips_cm_lock_other. + * Must be called after mips_cm_lock_other() once all required access to the + * redirect/other region has been completed. */ extern void mips_cm_unlock_other(void); #else /* !CONFIG_MIPS_CM */ -static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { } +static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block) { } static inline void mips_cm_unlock_other(void) { } #endif /* !CONFIG_MIPS_CM */ +/** + * mips_cm_lock_other_cpu - lock access to redirect/other region + * @cpu: the other CPU whose register we want to access + * + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cpu & register @block. This is + * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number + * for convenience. + */ +static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block) +{ + struct cpuinfo_mips *d = &cpu_data[cpu]; + + mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block); +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index 8c519f9827a3036c3a16f3beb60ba182b71665fd..f885051a837876ab19bdd110099a2c3a83ec4ba2 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -8,11 +8,15 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cpc.h +#endif + #ifndef __MIPS_ASM_MIPS_CPC_H__ #define __MIPS_ASM_MIPS_CPC_H__ -#include -#include +#include +#include /* The base address of the CPC registers */ extern void __iomem *mips_cpc_base; @@ -61,89 +65,92 @@ static inline bool mips_cpc_present(void) #define MIPS_CPC_CLCB_OFS 0x2000 #define MIPS_CPC_COCB_OFS 0x4000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CPC_R_(name, off) \ -static inline u32 *addr_cpc_##name(void) \ -{ \ - return (u32 *)(mips_cpc_base + (off)); \ -} \ - \ -static inline u32 read_cpc_##name(void) \ -{ \ - return __raw_readl(mips_cpc_base + (off)); \ -} - -#define BUILD_CPC__W(name, off) \ -static inline void write_cpc_##name(u32 value) \ -{ \ - __raw_writel(value, mips_cpc_base + (off)); \ -} - -#define BUILD_CPC_RW(name, off) \ - BUILD_CPC_R_(name, off) \ - BUILD_CPC__W(name, off) - -#define BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off)) - -#define BUILD_CPC_Cx__W(name, off) \ - BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off)) - -#define BUILD_CPC_Cx_RW(name, off) \ - BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_Cx__W(name, off) - -/* GCB register accessor functions */ -BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00) -BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08) -BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10) -BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18) -BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20) - -/* Core Local & Core Other accessor functions */ -BUILD_CPC_Cx_RW(cmd, 0x00) -BUILD_CPC_Cx_RW(stat_conf, 0x08) -BUILD_CPC_Cx_RW(other, 0x10) -BUILD_CPC_Cx_RW(vp_stop, 0x20) -BUILD_CPC_Cx_RW(vp_run, 0x28) -BUILD_CPC_Cx_RW(vp_running, 0x30) - -/* CPC_Cx_CMD register fields */ -#define CPC_Cx_CMD_SHF 0 -#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0) -#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0) -#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0) -#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0) -#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0) - -/* CPC_Cx_STAT_CONF register fields */ -#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23 -#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23) -#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19 -#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19) -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17 -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17) -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16 -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16) -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15 -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15) - -/* CPC_Cx_OTHER register fields */ -#define CPC_Cx_OTHER_CORENUM_SHF 16 -#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) +#define CPC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) + +#define CPC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) + +#define CPC_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) + +#define CPC_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) + +/* CPC_ACCESS - Control core/IOCU access to CPC registers prior to CM 3 */ +CPC_ACCESSOR_RW(32, 0x000, access) + +/* CPC_SEQDEL - Configure delays between command sequencer steps */ +CPC_ACCESSOR_RW(32, 0x008, seqdel) + +/* CPC_RAIL - Configure the delay from rail power-up to stability */ +CPC_ACCESSOR_RW(32, 0x010, rail) + +/* CPC_RESETLEN - Configure the length of reset sequences */ +CPC_ACCESSOR_RW(32, 0x018, resetlen) + +/* CPC_REVISION - Indicates the revisison of the CPC */ +CPC_ACCESSOR_RO(32, 0x020, revision) + +/* CPC_PWRUP_CTL - Control power to the Coherence Manager (CM) */ +CPC_ACCESSOR_RW(32, 0x030, pwrup_ctl) +#define CPC_PWRUP_CTL_CM_PWRUP BIT(0) + +/* CPC_CONFIG - Mirrors GCR_CONFIG */ +CPC_ACCESSOR_RW(64, 0x138, config) + +/* CPC_SYS_CONFIG - Control cluster endianness */ +CPC_ACCESSOR_RW(32, 0x140, sys_config) +#define CPC_SYS_CONFIG_BE_IMMEDIATE BIT(2) +#define CPC_SYS_CONFIG_BE_STATUS BIT(1) +#define CPC_SYS_CONFIG_BE BIT(0) + +/* CPC_Cx_CMD - Instruct the CPC to take action on a core */ +CPC_CX_ACCESSOR_RW(32, 0x000, cmd) +#define CPC_Cx_CMD GENMASK(3, 0) +#define CPC_Cx_CMD_CLOCKOFF 0x1 +#define CPC_Cx_CMD_PWRDOWN 0x2 +#define CPC_Cx_CMD_PWRUP 0x3 +#define CPC_Cx_CMD_RESET 0x4 + +/* CPC_Cx_STAT_CONF - Indicates core configuration & state */ +CPC_CX_ACCESSOR_RW(32, 0x008, stat_conf) +#define CPC_Cx_STAT_CONF_PWRUPE BIT(23) +#define CPC_Cx_STAT_CONF_SEQSTATE GENMASK(22, 19) +#define CPC_Cx_STAT_CONF_SEQSTATE_D0 0x0 +#define CPC_Cx_STAT_CONF_SEQSTATE_U0 0x1 +#define CPC_Cx_STAT_CONF_SEQSTATE_U1 0x2 +#define CPC_Cx_STAT_CONF_SEQSTATE_U2 0x3 +#define CPC_Cx_STAT_CONF_SEQSTATE_U3 0x4 +#define CPC_Cx_STAT_CONF_SEQSTATE_U4 0x5 +#define CPC_Cx_STAT_CONF_SEQSTATE_U5 0x6 +#define CPC_Cx_STAT_CONF_SEQSTATE_U6 0x7 +#define CPC_Cx_STAT_CONF_SEQSTATE_D1 0x8 +#define CPC_Cx_STAT_CONF_SEQSTATE_D3 0x9 +#define CPC_Cx_STAT_CONF_SEQSTATE_D2 0xa +#define CPC_Cx_STAT_CONF_CLKGAT_IMPL BIT(17) +#define CPC_Cx_STAT_CONF_PWRDN_IMPL BIT(16) +#define CPC_Cx_STAT_CONF_EJTAG_PROBE BIT(15) + +/* CPC_Cx_OTHER - Configure the core-other register block prior to CM 3 */ +CPC_CX_ACCESSOR_RW(32, 0x010, other) +#define CPC_Cx_OTHER_CORENUM GENMASK(23, 16) + +/* CPC_Cx_VP_STOP - Stop Virtual Processors (VPs) within a core from running */ +CPC_CX_ACCESSOR_RW(32, 0x020, vp_stop) + +/* CPC_Cx_VP_START - Start Virtual Processors (VPs) within a core running */ +CPC_CX_ACCESSOR_RW(32, 0x028, vp_run) + +/* CPC_Cx_VP_RUNNING - Indicate which Virtual Processors (VPs) are running */ +CPC_CX_ACCESSOR_RW(32, 0x030, vp_running) + +/* CPC_Cx_CONFIG - Mirrors GCR_Cx_CONFIG */ +CPC_CX_ACCESSOR_RW(32, 0x090, config) #ifdef CONFIG_MIPS_CPC diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h new file mode 100644 index 0000000000000000000000000000000000000000..bf02b5070a989cc11ee39c85ce21ece9ee67990c --- /dev/null +++ b/arch/mips/include/asm/mips-cps.h @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +#define __MIPS_ASM_MIPS_CPS_H__ + +#include +#include + +extern unsigned long __cps_access_bad_size(void) + __compiletime_error("Bad size for CPS accessor"); + +#define CPS_ACCESSOR_A(unit, off, name) \ +static inline void *addr_##unit##_##name(void) \ +{ \ + return mips_##unit##_base + (off); \ +} + +#define CPS_ACCESSOR_R(unit, sz, name) \ +static inline uint##sz##_t read_##unit##_##name(void) \ +{ \ + uint64_t val64; \ + \ + switch (sz) { \ + case 32: \ + return __raw_readl(addr_##unit##_##name()); \ + \ + case 64: \ + if (mips_cm_is64) \ + return __raw_readq(addr_##unit##_##name()); \ + \ + val64 = __raw_readl(addr_##unit##_##name() + 4); \ + val64 <<= 32; \ + val64 |= __raw_readl(addr_##unit##_##name()); \ + return val64; \ + \ + default: \ + return __cps_access_bad_size(); \ + } \ +} + +#define CPS_ACCESSOR_W(unit, sz, name) \ +static inline void write_##unit##_##name(uint##sz##_t val) \ +{ \ + switch (sz) { \ + case 32: \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + case 64: \ + if (mips_cm_is64) { \ + __raw_writeq(val, addr_##unit##_##name()); \ + break; \ + } \ + \ + __raw_writel((uint64_t)val >> 32, \ + addr_##unit##_##name() + 4); \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + default: \ + __cps_access_bad_size(); \ + break; \ + } \ +} + +#define CPS_ACCESSOR_M(unit, sz, name) \ +static inline void change_##unit##_##name(uint##sz##_t mask, \ + uint##sz##_t val) \ +{ \ + uint##sz##_t reg_val = read_##unit##_##name(); \ + reg_val &= ~mask; \ + reg_val |= val; \ + write_##unit##_##name(reg_val); \ +} \ + \ +static inline void set_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, val); \ +} \ + \ +static inline void clear_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, 0); \ +} + +#define CPS_ACCESSOR_RO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) + +#define CPS_ACCESSOR_WO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_W(unit, sz, name) + +#define CPS_ACCESSOR_RW(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) \ + CPS_ACCESSOR_W(unit, sz, name) \ + CPS_ACCESSOR_M(unit, sz, name) + +#include +#include +#include + +/** + * mips_cps_numclusters - return the number of clusters present in the system + * + * Returns the number of clusters in the system. + */ +static inline unsigned int mips_cps_numclusters(void) +{ + unsigned int num_clusters; + + if (mips_cm_revision() < CM_REV_CM3_5) + return 1; + + num_clusters = read_gcr_config() & CM_GCR_CONFIG_NUM_CLUSTERS; + num_clusters >>= __ffs(CM_GCR_CONFIG_NUM_CLUSTERS); + return num_clusters; +} + +/** + * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster + * @cluster: the ID of the cluster whose config we want + * + * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster. + * + * Returns the value of GCR_CONFIG. + */ +static inline uint64_t mips_cps_cluster_config(unsigned int cluster) +{ + uint64_t config; + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we don't have the notion of multiple + * clusters so we can trivially read the GCR_CONFIG register + * within this cluster. + */ + WARN_ON(cluster != 0); + config = read_gcr_config(); + } else { + /* + * From CM 3.5 onwards we read the CPC_CONFIG mirror of + * GCR_CONFIG via the redirect region, since the CPC is always + * powered up allowing us not to need to power up the CM. + */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + config = read_cpc_redir_config(); + mips_cm_unlock_other(); + } + + return config; +} + +/** + * mips_cps_numcores - return the number of cores present in a cluster + * @cluster: the ID of the cluster whose core count we want + * + * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or + * zero if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numcores(unsigned int cluster) +{ + if (!mips_cm_present()) + return 0; + + /* Add one before masking to handle 0xff indicating no cores */ + return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; +} + +/** + * mips_cps_numiocu - return the number of IOCUs present in a cluster + * @cluster: the ID of the cluster whose IOCU count we want + * + * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero + * if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numiocu(unsigned int cluster) +{ + unsigned int num_iocu; + + if (!mips_cm_present()) + return 0; + + num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; + num_iocu >>= __ffs(CM_GCR_CONFIG_NUMIOCU); + return num_iocu; +} + +/** + * mips_cps_numvps - return the number of VPs (threads) supported by a core + * @cluster: the ID of the cluster containing the core we want to examine + * @core: the ID of the core whose VP count we want + * + * Returns the number of Virtual Processors (VPs, ie. hardware threads) that + * are supported by the given @core in the given @cluster. If the core or the + * kernel do not support hardware mutlti-threading this returns 1. + */ +static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) +{ + unsigned int cfg; + + if (!mips_cm_present()) + return 1; + + if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) + return 1; + + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we can only have one cluster & don't have + * CPC_Cx_CONFIG, so we read GCR_Cx_CONFIG. + */ + cfg = read_gcr_co_config(); + } else { + /* + * From CM 3.5 onwards we read CPC_Cx_CONFIG because the CPC is + * always powered, which allows us to not worry about powering + * up the cluster's CM here. + */ + cfg = read_cpc_co_config(); + } + + mips_cm_unlock_other(); + + return (cfg + 1) & CM_GCR_Cx_CONFIG_PVPE; +} + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h new file mode 100644 index 0000000000000000000000000000000000000000..a2badf572632d147b1d470f728c029b36fbcf8b4 --- /dev/null +++ b/arch/mips/include/asm/mips-gic.h @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-gic.h +#endif + +#ifndef __MIPS_ASM_MIPS_GIC_H__ +#define __MIPS_ASM_MIPS_GIC_H__ + +#include + +/* The base address of the GIC registers */ +extern void __iomem *mips_gic_base; + +/* Offsets from the GIC base address to various control blocks */ +#define MIPS_GIC_SHARED_OFS 0x00000 +#define MIPS_GIC_SHARED_SZ 0x08000 +#define MIPS_GIC_LOCAL_OFS 0x08000 +#define MIPS_GIC_LOCAL_SZ 0x04000 +#define MIPS_GIC_REDIR_OFS 0x0c000 +#define MIPS_GIC_REDIR_SZ 0x04000 +#define MIPS_GIC_USER_OFS 0x10000 +#define MIPS_GIC_USER_SZ 0x10000 + +/* For read-only shared registers */ +#define GIC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-write shared registers */ +#define GIC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-only local registers */ +#define GIC_VX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-write local registers */ +#define GIC_VX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-only shared per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ +static inline void __iomem *addr_gic_##name(unsigned int intr) \ +{ \ + return mips_gic_base + (off) + (intr * (stride)); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + return __raw_readl(addr_gic_##name(intr)); \ +} + +/* For read-write shared per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + \ +static inline void write_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + __raw_writel(val, addr_gic_##name(intr)); \ +} + +/* For read-only local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-write local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-only shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_BIT(off, name) \ +static inline void __iomem *addr_gic_##name(void) \ +{ \ + return mips_gic_base + (off); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + unsigned int val; \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + val = __raw_readq(addr) >> intr % 64; \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + val = __raw_readl(addr) >> intr % 32; \ + } \ + \ + return val & 0x1; \ +} + +/* For read-write shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_BIT(off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(off, name) \ + \ +static inline void write_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + __raw_writeq(BIT(intr % 64), addr); \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + __raw_writel(BIT(intr % 32), addr); \ + } \ +} \ + \ +static inline void change_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + uint64_t _val; \ + \ + addr += (intr / 64) * sizeof(uint64_t); \ + _val = __raw_readq(addr); \ + _val &= ~BIT_ULL(intr % 64); \ + _val |= (uint64_t)val << (intr % 64); \ + __raw_writeq(_val, addr); \ + } else { \ + uint32_t _val; \ + \ + addr += (intr / 32) * sizeof(uint32_t); \ + _val = __raw_readl(addr); \ + _val &= ~BIT(intr % 32); \ + _val |= val << (intr % 32); \ + __raw_writel(_val, addr); \ + } \ +} + +/* For read-only local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* For read-write local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* GIC_SH_CONFIG - Information about the GIC configuration */ +GIC_ACCESSOR_RW(32, 0x000, config) +#define GIC_CONFIG_COUNTSTOP BIT(28) +#define GIC_CONFIG_COUNTBITS GENMASK(27, 24) +#define GIC_CONFIG_NUMINTERRUPTS GENMASK(23, 16) +#define GIC_CONFIG_PVPS GENMASK(6, 0) + +/* GIC_SH_COUNTER - Shared global counter value */ +GIC_ACCESSOR_RW(64, 0x010, counter) +GIC_ACCESSOR_RW(32, 0x010, counter_32l) +GIC_ACCESSOR_RW(32, 0x014, counter_32h) + +/* GIC_SH_POL_* - Configures interrupt polarity */ +GIC_ACCESSOR_RW_INTR_BIT(0x100, pol) +#define GIC_POL_ACTIVE_LOW 0 /* when level triggered */ +#define GIC_POL_ACTIVE_HIGH 1 /* when level triggered */ +#define GIC_POL_FALLING_EDGE 0 /* when single-edge triggered */ +#define GIC_POL_RISING_EDGE 1 /* when single-edge triggered */ + +/* GIC_SH_TRIG_* - Configures interrupts to be edge or level triggered */ +GIC_ACCESSOR_RW_INTR_BIT(0x180, trig) +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_EDGE 1 + +/* GIC_SH_DUAL_* - Configures whether interrupts trigger on both edges */ +GIC_ACCESSOR_RW_INTR_BIT(0x200, dual) +#define GIC_DUAL_SINGLE 0 /* when edge-triggered */ +#define GIC_DUAL_DUAL 1 /* when edge-triggered */ + +/* GIC_SH_WEDGE - Write an 'edge', ie. trigger an interrupt */ +GIC_ACCESSOR_RW(32, 0x280, wedge) +#define GIC_WEDGE_RW BIT(31) +#define GIC_WEDGE_INTR GENMASK(7, 0) + +/* GIC_SH_RMASK_* - Reset/clear shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x300, rmask) + +/* GIC_SH_SMASK_* - Set shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x380, smask) + +/* GIC_SH_MASK_* - Read the current shared interrupt mask */ +GIC_ACCESSOR_RO_INTR_BIT(0x400, mask) + +/* GIC_SH_PEND_* - Read currently pending shared interrupts */ +GIC_ACCESSOR_RO_INTR_BIT(0x480, pend) + +/* GIC_SH_MAPx_PIN - Map shared interrupts to a particular CPU pin */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x500, 0x4, map_pin) +#define GIC_MAP_PIN_MAP_TO_PIN BIT(31) +#define GIC_MAP_PIN_MAP_TO_NMI BIT(30) +#define GIC_MAP_PIN_MAP GENMASK(5, 0) + +/* GIC_SH_MAPx_VP - Map shared interrupts to a particular Virtual Processor */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x2000, 0x20, map_vp) + +/* GIC_Vx_CTL - VP-level interrupt control */ +GIC_VX_ACCESSOR_RW(32, 0x000, ctl) +#define GIC_VX_CTL_FDC_ROUTABLE BIT(4) +#define GIC_VX_CTL_SWINT_ROUTABLE BIT(3) +#define GIC_VX_CTL_PERFCNT_ROUTABLE BIT(2) +#define GIC_VX_CTL_TIMER_ROUTABLE BIT(1) +#define GIC_VX_CTL_EIC BIT(0) + +/* GIC_Vx_PEND - Read currently pending local interrupts */ +GIC_VX_ACCESSOR_RO(32, 0x004, pend) + +/* GIC_Vx_MASK - Read the current local interrupt mask */ +GIC_VX_ACCESSOR_RO(32, 0x008, mask) + +/* GIC_Vx_RMASK - Reset/clear local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x00c, rmask) + +/* GIC_Vx_SMASK - Set local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x010, smask) + +/* GIC_Vx_*_MAP - Route local interrupts to the desired pins */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map) + +/* GIC_Vx_WD_MAP - Route the local watchdog timer interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x040, wd_map) + +/* GIC_Vx_COMPARE_MAP - Route the local count/compare interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x044, compare_map) + +/* GIC_Vx_TIMER_MAP - Route the local CPU timer (cp0 count/compare) interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x048, timer_map) + +/* GIC_Vx_FDC_MAP - Route the local fast debug channel interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x04c, fdc_map) + +/* GIC_Vx_PERFCTR_MAP - Route the local performance counter interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x050, perfctr_map) + +/* GIC_Vx_SWINT0_MAP - Route the local software interrupt 0 */ +GIC_VX_ACCESSOR_RW(32, 0x054, swint0_map) + +/* GIC_Vx_SWINT1_MAP - Route the local software interrupt 1 */ +GIC_VX_ACCESSOR_RW(32, 0x058, swint1_map) + +/* GIC_Vx_OTHER - Configure access to other Virtual Processor registers */ +GIC_VX_ACCESSOR_RW(32, 0x080, other) +#define GIC_VX_OTHER_VPNUM GENMASK(5, 0) + +/* GIC_Vx_IDENT - Retrieve the local Virtual Processor's ID */ +GIC_VX_ACCESSOR_RO(32, 0x088, ident) +#define GIC_VX_IDENT_VPNUM GENMASK(5, 0) + +/* GIC_Vx_COMPARE - Value to compare with GIC_SH_COUNTER */ +GIC_VX_ACCESSOR_RW(64, 0x0a0, compare) + +/* GIC_Vx_EIC_SHADOW_SET_BASE - Set shadow register set for each interrupt */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x100, 0x4, eic_shadow_set) + +/** + * enum mips_gic_local_interrupt - GIC local interrupts + * @GIC_LOCAL_INT_WD: GIC watchdog timer interrupt + * @GIC_LOCAL_INT_COMPARE: GIC count/compare interrupt + * @GIC_LOCAL_INT_TIMER: CP0 count/compare interrupt + * @GIC_LOCAL_INT_PERFCTR: Performance counter interrupt + * @GIC_LOCAL_INT_SWINT0: Software interrupt 0 + * @GIC_LOCAL_INT_SWINT1: Software interrupt 1 + * @GIC_LOCAL_INT_FDC: Fast debug channel interrupt + * @GIC_NUM_LOCAL_INTRS: The number of local interrupts + * + * Enumerates interrupts provided by the GIC that are local to a VP. + */ +enum mips_gic_local_interrupt { + GIC_LOCAL_INT_WD, + GIC_LOCAL_INT_COMPARE, + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_SWINT0, + GIC_LOCAL_INT_SWINT1, + GIC_LOCAL_INT_FDC, + GIC_NUM_LOCAL_INTRS +}; + +/** + * mips_gic_present() - Determine whether a GIC is present + * + * Determines whether a MIPS Global Interrupt Controller (GIC) is present in + * the system that the kernel is running on. + * + * Return true if a GIC is present, else false. + */ +static inline bool mips_gic_present(void) +{ + return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base; +} + +/** + * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq + * + * Determine the virq number to use for the coprocessor 0 count/compare + * interrupt, which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_compare_int(void); + +/** + * gic_get_c0_perfcount_int() - Return performance counter interrupt virq + * + * Determine the virq number to use for CPU performance counter interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_perfcount_int(void); + +/** + * gic_get_c0_fdc_int() - Return fast debug channel interrupt virq + * + * Determine the virq number to use for fast debug channel (FDC) interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_fdc_int(void); + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index dbb0eceda2c6d5cf05b347f561fc54cf19f7e635..a6810923b3f0214dfa36eab4a28320a65bba7790 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -48,6 +48,7 @@ #define CP0_ENTRYLO0 $2 #define CP0_ENTRYLO1 $3 #define CP0_CONF $3 +#define CP0_GLOBALNUMBER $3, 1 #define CP0_CONTEXT $4 #define CP0_PAGEMASK $5 #define CP0_SEGCTL0 $5, 2 @@ -147,6 +148,16 @@ #define MIPS_ENTRYLO_XI (_ULCAST_(1) << (BITS_PER_LONG - 2)) #define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1)) +/* + * MIPSr6+ GlobalNumber register definitions + */ +#define MIPS_GLOBALNUMBER_VP_SHF 0 +#define MIPS_GLOBALNUMBER_VP (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_VP_SHF) +#define MIPS_GLOBALNUMBER_CORE_SHF 8 +#define MIPS_GLOBALNUMBER_CORE (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_CORE_SHF) +#define MIPS_GLOBALNUMBER_CLUSTER_SHF 16 +#define MIPS_GLOBALNUMBER_CLUSTER (_ULCAST_(0xf) << MIPS_GLOBALNUMBER_CLUSTER_SHF) + /* * Values for PageMask register */ @@ -1366,29 +1377,32 @@ do { \ #define __write_64bit_c0_split(source, sel, val) \ do { \ + unsigned long long __tmp; \ unsigned long __flags; \ \ local_irq_save(__flags); \ if (sel == 0) \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ - "dsll\t%L0, %L0, 32\n\t" \ + "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ - "dsll\t%M0, %M0, 32\n\t" \ + "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source "\n\t" \ ".set\tmips0" \ - : : "r" (val)); \ + : "=&r,r" (__tmp) \ + : "r,0" (val)); \ else \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ - "dsll\t%L0, %L0, 32\n\t" \ + "dsll\t%L0, %L1, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \ - "dsll\t%M0, %M0, 32\n\t" \ + "dsll\t%M0, %M1, 32\n\t" \ "or\t%L0, %L0, %M0\n\t" \ "dmtc0\t%L0, " #source ", " #sel "\n\t" \ ".set\tmips0" \ - : : "r" (val)); \ + : "=&r,r" (__tmp) \ + : "r,0" (val)); \ local_irq_restore(__flags); \ } while (0) @@ -1446,6 +1460,8 @@ do { \ #define read_c0_conf() __read_32bit_c0_register($3, 0) #define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) +#define read_c0_globalnumber() __read_32bit_c0_register($3, 1) + #define read_c0_context() __read_ulong_c0_register($4, 0) #define write_c0_context(val) __write_ulong_c0_register($4, 0, val) diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index e51add184717f4283ba68b5cc3e446af364e1b2e..06552a965cf4b1c9f848cf831e11d424145439e0 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -114,8 +114,6 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "R5432 " #elif defined CONFIG_CPU_R5500 #define MODULE_PROC_FAMILY "R5500 " -#elif defined CONFIG_CPU_R6000 -#define MODULE_PROC_FAMILY "R6000 " #elif defined CONFIG_CPU_NEVADA #define MODULE_PROC_FAMILY "NEVADA " #elif defined CONFIG_CPU_R8000 diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index e0717d10e650fd0ebcbf734bf7f59ddb0954cc50..a6e6cbebe046e9631055ef3fdaa65ea720b71730 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -84,7 +84,7 @@ nlm_set_nmi_handler(void *handler) */ void nlm_init_boot_cpu(void); unsigned int nlm_get_cpu_frequency(void); -extern struct plat_smp_ops nlm_smp_ops; +extern const struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; /* SWIOTLB */ diff --git a/arch/mips/include/asm/octeon/cvmx-boot-vector.h b/arch/mips/include/asm/octeon/cvmx-boot-vector.h new file mode 100644 index 0000000000000000000000000000000000000000..8db08241d53c54bebc8793df688b7660aaee869d --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-boot-vector.h @@ -0,0 +1,53 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003-2017 Cavium, Inc. + */ + +#ifndef __CVMX_BOOT_VECTOR_H__ +#define __CVMX_BOOT_VECTOR_H__ + +#include + +/* + * The boot vector table is made up of an array of 1024 elements of + * struct cvmx_boot_vector_element. There is one entry for each + * possible MIPS CPUNum, indexed by the CPUNum. + * + * Once cvmx_boot_vector_get() returns a non-NULL value (indicating + * success), NMI to a core will cause execution to transfer to the + * target_ptr location for that core's entry in the vector table. + * + * The struct cvmx_boot_vector_element fields app0, app1, and app2 can + * be used by the application that has set the target_ptr in any + * application specific manner, they are not touched by the vectoring + * code. + * + * The boot vector code clobbers the CP0_DESAVE register, and on + * OCTEON II and later CPUs also clobbers CP0_KScratch2. All GP + * registers are preserved, except on pre-OCTEON II CPUs, where k1 is + * clobbered. + * + */ + + +/* + * Applications install the boot bus code in cvmx-boot-vector.c, which + * uses this magic: + */ +#define OCTEON_BOOT_MOVEABLE_MAGIC1 0xdb00110ad358eacdull + +struct cvmx_boot_vector_element { + /* kseg0 or xkphys address of target code. */ + uint64_t target_ptr; + /* Three application specific arguments. */ + uint64_t app0; + uint64_t app1; + uint64_t app2; +}; + +struct cvmx_boot_vector_element *cvmx_boot_vector_get(void); + +#endif /* __CVMX_BOOT_VECTOR_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 374562507d0befef0394316d353340190be9a826..72d2e403a6e4fcc17062a90bd1c7559987a46bb1 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -255,6 +255,34 @@ extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name); +/** + * Allocate if needed a block of memory from a specific range of the + * free list that was passed to the application by the bootloader, and + * assign it a name in the global named block table. (part of the + * cvmx_bootmem_descriptor_t structure) Named blocks can later be + * freed. If the requested name block is already allocated, return + * the pointer to block of memory. If request cannot be satisfied + * within the address range specified, NULL is returned + * + * @param size Size in bytes of block to allocate + * @param min_addr minimum address of range + * @param max_addr maximum address of range + * @param align Alignment of memory to be allocated. (must be a power of 2) + * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * @param init Initialization function + * + * The initialization function is optional, if omitted the named block + * is initialized to all zeros when it is created, i.e. once. + * + * @return pointer to block of memory, NULL on error + */ +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, + uint64_t min_addr, + uint64_t max_addr, + uint64_t align, + char *name, + void (*init) (void *)); + extern int cvmx_bootmem_free_named(char *name); /** diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h index 0dd0e40c96d4d076c3cc51a27f28c33b86bb0edd..6e61792d9248b4af91b0e64978205c463a24aa49 100644 --- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h @@ -128,6 +128,7 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -143,6 +144,10 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; } @@ -180,6 +185,7 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -195,6 +201,10 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; } diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index e638735cc3ac56c84fc861f293e6f2217b7ba3cb..205ab2ce10f808cffa420bf8e671643f5df27620 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -357,6 +357,34 @@ static inline unsigned int cvmx_get_local_core_num(void) return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1); } +#define CVMX_NODE_BITS (2) /* Number of bits to define a node */ +#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS) +#define CVMX_NODE_IO_SHIFT (36) +#define CVMX_NODE_MEM_SHIFT (40) +#define CVMX_NODE_IO_MASK ((uint64_t)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT) + +static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr, + uint64_t val) +{ + uint64_t composite_csr_addr, node_addr; + + node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + composite_csr_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | node_addr; + + cvmx_write64_uint64(composite_csr_addr, val); + if (((csr_addr >> 40) & 0x7ffff) == (0x118)) + cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT | node_addr); +} + +static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr) +{ + uint64_t node_addr; + + node_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | + (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + return cvmx_read_csr(node_addr); +} + /** * Returns the number of bits set in the provided value. * Simple wrapper for POP instruction. diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 07c0516ef4d5331096be4dd45182ed938a9a96d3..c99c4b6a79f44d3c4bbb4d2144d76fe008e0deaf 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -362,4 +362,6 @@ extern void octeon_fixup_irqs(void); extern struct semaphore octeon_bootbus_sem; +struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block); + #endif /* __ASM_OCTEON_OCTEON_H */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index db7c322f057f936d9c658498a5e7d397303efb09..53b2cb8e59666c268c634866819ebe43aa401810 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -13,7 +13,7 @@ #include -#include +#include #ifdef CONFIG_SMP @@ -26,7 +26,7 @@ struct plat_smp_ops { void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); void (*init_secondary)(void); void (*smp_finish)(void); - void (*boot_secondary)(int cpu, struct task_struct *idle); + int (*boot_secondary)(int cpu, struct task_struct *idle); void (*smp_setup)(void); void (*prepare_cpus)(unsigned int max_cpus); #ifdef CONFIG_HOTPLUG_CPU @@ -35,11 +35,11 @@ struct plat_smp_ops { #endif }; -extern void register_smp_ops(struct plat_smp_ops *ops); +extern void register_smp_ops(const struct plat_smp_ops *ops); static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->smp_setup(); } @@ -57,7 +57,7 @@ static inline void plat_smp_setup(void) /* UP, nothing to do ... */ } -static inline void register_smp_ops(struct plat_smp_ops *ops) +static inline void register_smp_ops(const struct plat_smp_ops *ops) { } @@ -66,7 +66,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) static inline int register_up_smp_ops(void) { #ifdef CONFIG_SMP_UP - extern struct plat_smp_ops up_smp_ops; + extern const struct plat_smp_ops up_smp_ops; register_smp_ops(&up_smp_ops); @@ -79,7 +79,7 @@ static inline int register_up_smp_ops(void) static inline int register_cmp_smp_ops(void) { #ifdef CONFIG_MIPS_CMP - extern struct plat_smp_ops cmp_smp_ops; + extern const struct plat_smp_ops cmp_smp_ops; if (!mips_cm_present()) return -ENODEV; @@ -95,7 +95,7 @@ static inline int register_cmp_smp_ops(void) static inline int register_vsmp_smp_ops(void) { #ifdef CONFIG_MIPS_MT_SMP - extern struct plat_smp_ops vsmp_smp_ops; + extern const struct plat_smp_ops vsmp_smp_ops; register_smp_ops(&vsmp_smp_ops); diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index bab3d41e5987eea7c56af1e08563d65461eb61fb..9e494f8d9c03a13325bc135725ebfec45c01e24c 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -58,7 +58,7 @@ extern void calculate_cpu_foreign_map(void); */ static inline void smp_send_reschedule(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); } @@ -66,14 +66,14 @@ static inline void smp_send_reschedule(int cpu) #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ return mp_ops->cpu_disable(); } static inline void __cpu_die(unsigned int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->cpu_die(cpu); } @@ -97,14 +97,14 @@ int mips_smp_ipi_free(const struct cpumask *mask); static inline void arch_send_call_function_single_ipi(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); } diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index eaa5a4d7d5e572aa91a0e08ab059796a270556bc..5d3563c55e0c224bc62d20815568d61953f9902c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -19,20 +19,43 @@ #include #include +/* Make the addition of cfi info a little easier. */ + .macro cfi_rel_offset reg offset=0 docfi=0 + .if \docfi + .cfi_rel_offset \reg, \offset + .endif + .endm + + .macro cfi_st reg offset=0 docfi=0 + LONG_S \reg, \offset(sp) + cfi_rel_offset \reg, \offset, \docfi + .endm + + .macro cfi_restore reg offset=0 docfi=0 + .if \docfi + .cfi_restore \reg + .endif + .endm + + .macro cfi_ld reg offset=0 docfi=0 + LONG_L \reg, \offset(sp) + cfi_restore \reg \offset \docfi + .endm + #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #define STATMASK 0x3f #else #define STATMASK 0x1f #endif - .macro SAVE_AT + .macro SAVE_AT docfi=0 .set push .set noat - LONG_S $1, PT_R1(sp) + cfi_st $1, PT_R1, \docfi .set pop .endm - .macro SAVE_TEMP + .macro SAVE_TEMP docfi=0 #ifdef CONFIG_CPU_HAS_SMARTMIPS mflhxu v1 LONG_S v1, PT_LO(sp) @@ -44,20 +67,20 @@ mfhi v1 #endif #ifdef CONFIG_32BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S $10, PT_R10(sp) - LONG_S $11, PT_R11(sp) - LONG_S $12, PT_R12(sp) + cfi_st $10, PT_R10, \docfi + cfi_st $11, PT_R11, \docfi + cfi_st $12, PT_R12, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_HI(sp) mflo v1 #endif - LONG_S $13, PT_R13(sp) - LONG_S $14, PT_R14(sp) - LONG_S $15, PT_R15(sp) - LONG_S $24, PT_R24(sp) + cfi_st $13, PT_R13, \docfi + cfi_st $14, PT_R14, \docfi + cfi_st $15, PT_R15, \docfi + cfi_st $24, PT_R24, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_LO(sp) #endif @@ -71,20 +94,28 @@ #endif .endm - .macro SAVE_STATIC - LONG_S $16, PT_R16(sp) - LONG_S $17, PT_R17(sp) - LONG_S $18, PT_R18(sp) - LONG_S $19, PT_R19(sp) - LONG_S $20, PT_R20(sp) - LONG_S $21, PT_R21(sp) - LONG_S $22, PT_R22(sp) - LONG_S $23, PT_R23(sp) - LONG_S $30, PT_R30(sp) + .macro SAVE_STATIC docfi=0 + cfi_st $16, PT_R16, \docfi + cfi_st $17, PT_R17, \docfi + cfi_st $18, PT_R18, \docfi + cfi_st $19, PT_R19, \docfi + cfi_st $20, PT_R20, \docfi + cfi_st $21, PT_R21, \docfi + cfi_st $22, PT_R22, \docfi + cfi_st $23, PT_R23, \docfi + cfi_st $30, PT_R30, \docfi .endm +/* + * get_saved_sp returns the SP for the current CPU by looking in the + * kernelsp array for it. If tosp is set, it stores the current sp in + * k0 and loads the new value in sp. If not, it clobbers k0 and + * stores the new value in k1, leaving sp unaffected. + */ #ifdef CONFIG_SMP - .macro get_saved_sp /* SMP variation */ + + /* SMP variation */ + .macro get_saved_sp docfi=0 tosp=0 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) @@ -97,7 +128,15 @@ #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -106,7 +145,8 @@ LONG_S \stackp, kernelsp(\temp) .endm #else /* !CONFIG_SMP */ - .macro get_saved_sp /* Uniprocessor variation */ + /* Uniprocessor variation */ + .macro get_saved_sp docfi=0 tosp=0 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS /* * Clear BTB (branch target buffer), forbid RAS (return address @@ -135,7 +175,15 @@ daddiu k1, %hi(kernelsp) dsll k1, k1, 16 #endif + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -143,7 +191,7 @@ .endm #endif - .macro SAVE_SOME + .macro SAVE_SOME docfi=0 .set push .set noat .set reorder @@ -151,7 +199,6 @@ sll k0, 3 /* extract cu0 bit */ .set noreorder bltz k0, 8f - move k1, sp #ifdef CONFIG_EVA /* * Flush interAptiv's Return Prediction Stack (RPS) by writing @@ -178,20 +225,26 @@ MTC0 k0, CP0_ENTRYHI #endif .set reorder + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif /* Called from user mode, new stack. */ - get_saved_sp -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -8: move k0, sp - PTR_SUBU sp, k1, PT_SIZE -#else - .set at=k0 -8: PTR_SUBU k1, PT_SIZE + get_saved_sp docfi=\docfi tosp=1 +8: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set at=k1 +#endif + PTR_SUBU sp, PT_SIZE +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS .set noat - move k0, sp - move sp, k1 #endif - LONG_S k0, PT_R29(sp) - LONG_S $3, PT_R3(sp) + .if \docfi + .cfi_def_cfa sp,0 + .endif + cfi_st k0, PT_R29, \docfi + cfi_rel_offset sp, PT_R29, \docfi + cfi_st v1, PT_R3, \docfi /* * You might think that you don't need to save $0, * but the FPU emulator and gdb remote debug stub @@ -199,23 +252,26 @@ */ LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS - LONG_S $2, PT_R2(sp) + cfi_st v0, PT_R2, \docfi LONG_S v1, PT_STATUS(sp) - LONG_S $4, PT_R4(sp) + cfi_st $4, PT_R4, \docfi mfc0 v1, CP0_CAUSE - LONG_S $5, PT_R5(sp) + cfi_st $5, PT_R5, \docfi LONG_S v1, PT_CAUSE(sp) - LONG_S $6, PT_R6(sp) - MFC0 v1, CP0_EPC - LONG_S $7, PT_R7(sp) + cfi_st $6, PT_R6, \docfi + cfi_st ra, PT_R31, \docfi + MFC0 ra, CP0_EPC + cfi_st $7, PT_R7, \docfi #ifdef CONFIG_64BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S v1, PT_EPC(sp) - LONG_S $25, PT_R25(sp) - LONG_S $28, PT_R28(sp) - LONG_S $31, PT_R31(sp) + LONG_S ra, PT_EPC(sp) + .if \docfi + .cfi_rel_offset ra, PT_EPC + .endif + cfi_st $25, PT_R25, \docfi + cfi_st $28, PT_R28, \docfi /* Set thread_info if we're coming from user mode */ mfc0 k0, CP0_STATUS @@ -232,21 +288,21 @@ .set pop .endm - .macro SAVE_ALL - SAVE_SOME - SAVE_AT - SAVE_TEMP - SAVE_STATIC + .macro SAVE_ALL docfi=0 + SAVE_SOME \docfi + SAVE_AT \docfi + SAVE_TEMP \docfi + SAVE_STATIC \docfi .endm - .macro RESTORE_AT + .macro RESTORE_AT docfi=0 .set push .set noat - LONG_L $1, PT_R1(sp) + cfi_ld $1, PT_R1, \docfi .set pop .endm - .macro RESTORE_TEMP + .macro RESTORE_TEMP docfi=0 #ifdef CONFIG_CPU_CAVIUM_OCTEON /* Restore the Octeon multiplier state */ jal octeon_mult_restore @@ -265,33 +321,37 @@ mthi $24 #endif #ifdef CONFIG_32BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $10, PT_R10(sp) - LONG_L $11, PT_R11(sp) - LONG_L $12, PT_R12(sp) - LONG_L $13, PT_R13(sp) - LONG_L $14, PT_R14(sp) - LONG_L $15, PT_R15(sp) - LONG_L $24, PT_R24(sp) + cfi_ld $10, PT_R10, \docfi + cfi_ld $11, PT_R11, \docfi + cfi_ld $12, PT_R12, \docfi + cfi_ld $13, PT_R13, \docfi + cfi_ld $14, PT_R14, \docfi + cfi_ld $15, PT_R15, \docfi + cfi_ld $24, PT_R24, \docfi .endm - .macro RESTORE_STATIC - LONG_L $16, PT_R16(sp) - LONG_L $17, PT_R17(sp) - LONG_L $18, PT_R18(sp) - LONG_L $19, PT_R19(sp) - LONG_L $20, PT_R20(sp) - LONG_L $21, PT_R21(sp) - LONG_L $22, PT_R22(sp) - LONG_L $23, PT_R23(sp) - LONG_L $30, PT_R30(sp) + .macro RESTORE_STATIC docfi=0 + cfi_ld $16, PT_R16, \docfi + cfi_ld $17, PT_R17, \docfi + cfi_ld $18, PT_R18, \docfi + cfi_ld $19, PT_R19, \docfi + cfi_ld $20, PT_R20, \docfi + cfi_ld $21, PT_R21, \docfi + cfi_ld $22, PT_R22, \docfi + cfi_ld $23, PT_R23, \docfi + cfi_ld $30, PT_R30, \docfi + .endm + + .macro RESTORE_SP docfi=0 + cfi_ld sp, PT_R29, \docfi .endm #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -306,30 +366,30 @@ and v0, v1 or v0, a0 mtc0 v0, CP0_STATUS - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET + .macro RESTORE_SP_AND_RET docfi=0 .set push .set noreorder LONG_L k0, PT_EPC(sp) - LONG_L sp, PT_R29(sp) + RESTORE_SP \docfi jr k0 rfe .set pop .endm #else - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -346,24 +406,24 @@ mtc0 v0, CP0_STATUS LONG_L v1, PT_EPC(sp) MTC0 v1, CP0_EPC - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi #ifdef CONFIG_64BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET - LONG_L sp, PT_R29(sp) + .macro RESTORE_SP_AND_RET docfi=0 + RESTORE_SP \docfi #ifdef CONFIG_CPU_MIPSR6 eretnc #else @@ -375,16 +435,12 @@ #endif - .macro RESTORE_SP - LONG_L sp, PT_R29(sp) - .endm - - .macro RESTORE_ALL - RESTORE_TEMP - RESTORE_STATIC - RESTORE_AT - RESTORE_SOME - RESTORE_SP + .macro RESTORE_ALL docfi=0 + RESTORE_TEMP \docfi + RESTORE_STATIC \docfi + RESTORE_AT \docfi + RESTORE_SOME \docfi + RESTORE_SP \docfi .endm /* diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 780ee2c2a2ac54afb0f4adad265387cfe2285f7d..10c4e9c84448cc098e77f4ecf0aa44bfb5f9b365 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -2,6 +2,8 @@ #define _ASM_STACKTRACE_H #include +#include +#include #ifdef CONFIG_KALLSYMS extern int raw_show_trace; @@ -20,6 +22,14 @@ static inline unsigned long unwind_stack(struct task_struct *task, } #endif +#define STR_PTR_LA __stringify(PTR_LA) +#define STR_LONG_S __stringify(LONG_S) +#define STR_LONG_L __stringify(LONG_L) +#define STR_LONGSIZE __stringify(LONGSIZE) + +#define STORE_ONE_REG(r) \ + STR_LONG_S " $" __stringify(r)",("STR_LONGSIZE"*"__stringify(r)")(%1)\n\t" + static __always_inline void prepare_frametrace(struct pt_regs *regs) { #ifndef CONFIG_KALLSYMS @@ -32,21 +42,47 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) __asm__ __volatile__( ".set push\n\t" ".set noat\n\t" -#ifdef CONFIG_64BIT - "1: dla $1, 1b\n\t" - "sd $1, %0\n\t" - "sd $29, %1\n\t" - "sd $31, %2\n\t" -#else - "1: la $1, 1b\n\t" - "sw $1, %0\n\t" - "sw $29, %1\n\t" - "sw $31, %2\n\t" -#endif + /* Store $1 so we can use it */ + STR_LONG_S " $1,"STR_LONGSIZE"(%1)\n\t" + /* Store the PC */ + "1: " STR_PTR_LA " $1, 1b\n\t" + STR_LONG_S " $1,%0\n\t" + STORE_ONE_REG(2) + STORE_ONE_REG(3) + STORE_ONE_REG(4) + STORE_ONE_REG(5) + STORE_ONE_REG(6) + STORE_ONE_REG(7) + STORE_ONE_REG(8) + STORE_ONE_REG(9) + STORE_ONE_REG(10) + STORE_ONE_REG(11) + STORE_ONE_REG(12) + STORE_ONE_REG(13) + STORE_ONE_REG(14) + STORE_ONE_REG(15) + STORE_ONE_REG(16) + STORE_ONE_REG(17) + STORE_ONE_REG(18) + STORE_ONE_REG(19) + STORE_ONE_REG(20) + STORE_ONE_REG(21) + STORE_ONE_REG(22) + STORE_ONE_REG(23) + STORE_ONE_REG(24) + STORE_ONE_REG(25) + STORE_ONE_REG(26) + STORE_ONE_REG(27) + STORE_ONE_REG(28) + STORE_ONE_REG(29) + STORE_ONE_REG(30) + STORE_ONE_REG(31) + /* Restore $1 */ + STR_LONG_L " $1,"STR_LONGSIZE"(%1)\n\t" ".set pop\n\t" - : "=m" (regs->cp0_epc), - "=m" (regs->regs[29]), "=m" (regs->regs[31]) - : : "memory"); + : "=m" (regs->cp0_epc) + : "r" (regs->regs) + : "memory"); } #endif /* _ASM_STACKTRACE_H */ diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 7afda4150a59d928537cddd33e8ed0105d90629b..0673d2d0f2e6dd02ed14d650e5af7b8a3c162b6f 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data[cpu].package) -#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu])) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) #endif diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index d618975359263fd4262de42f3117980175d6533b..6abea5183d7ca160688089cc92741f872de15c4f 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -981,7 +981,7 @@ struct mm16_r3_format { /* Load from global pointer format */ struct mm16_r5_format { /* Load/store from stack pointer format */ __BITFIELD_FIELD(unsigned int opcode : 6, __BITFIELD_FIELD(unsigned int rt : 5, - __BITFIELD_FIELD(signed int simmediate : 5, + __BITFIELD_FIELD(unsigned int imm : 5, __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;)))) }; diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 46c0581256f1d7d226b7b49f2968966d4d39ed49..07f0f4a4b56236cead0ce920035d45922ccdeea8 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -35,11 +35,15 @@ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o -obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o -obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o -obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o +sw-y := r4k_switch.o +sw-$(CONFIG_CPU_R3000) := r2300_switch.o +sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o +sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o +obj-y += $(sw-y) + +obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o +obj-$(CONFIG_CPU_R3000) += r2300_fpu.o +obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index b849fe6aad941bef458f3b3c7ffe1d6f29d46173..d173b49f212daf65701b9b66e9a110745d091150 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -327,8 +327,8 @@ LEAF(mips_cps_get_bootcfg) * to handle contiguous VP numbering, but no such systems yet * exist. */ - mfc0 t9, $3, 1 - andi t9, t9, 0xff + mfc0 t9, CP0_GLOBALNUMBER + andi t9, t9, MIPS_GLOBALNUMBER_VP #elif defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d08afc7dc5072db9c55d4ca056a4422c03ab927e..cf3fd549e16d05a6e666ee8d96e4bde06878c650 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -326,7 +326,7 @@ static int __init fpu_disable(char *s) __setup("nofpu", fpu_disable); -int mips_dsp_disabled; +static int mips_dsp_disabled; static int __init dsp_disable(char *s) { @@ -919,9 +919,12 @@ static void decode_configs(struct cpuinfo_mips *c) #ifndef CONFIG_MIPS_CPS if (cpu_has_mips_r2_r6) { - c->core = get_ebase_cpunum(); + unsigned int core; + + core = get_ebase_cpunum(); if (cpu_has_mipsmt) - c->core >>= fls(core_nvpes()) - 1; + core >>= fls(core_nvpes()) - 1; + cpu_set_core(c, core); } #endif } @@ -1394,24 +1397,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_DIVEC | MIPS_CPU_LLSC; c->tlbsize = 48; break; - case PRID_IMP_R6000: - c->cputype = CPU_R6000; - __cpu_name[cpu] = "R6000"; - set_isa(c, MIPS_CPU_ISA_II); - c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; - c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | - MIPS_CPU_LLSC; - c->tlbsize = 32; - break; - case PRID_IMP_R6000A: - c->cputype = CPU_R6000A; - __cpu_name[cpu] = "R6000A"; - set_isa(c, MIPS_CPU_ISA_II); - c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; - c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | - MIPS_CPU_LLSC; - c->tlbsize = 32; - break; case PRID_IMP_RM7000: c->cputype = CPU_RM7000; __cpu_name[cpu] = "RM7000"; @@ -2113,3 +2098,35 @@ void cpu_report(void) if (cpu_has_msa) pr_info("MSA revision is: %08x\n", c->msa_id); } + +void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster) +{ + /* Ensure the core number fits in the field */ + WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >> + MIPS_GLOBALNUMBER_CLUSTER_SHF)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER; + cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF; +} + +void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core) +{ + /* Ensure the core number fits in the field */ + WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE; + cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF; +} + +void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) +{ + /* Ensure the VP(E) ID fits in the field */ + WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF)); + + /* Ensure we're not using VP(E)s without support */ + WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) && + !IS_ENABLED(CONFIG_CPU_MIPSR6)); + + cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; + cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; +} diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 5c429d70e17f6f24cbcfd0fa912c67eccd11f28f..0828d6d963b7294291203500caa95592d962981c 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -87,6 +87,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool elf32; u32 flags; int ret; + loff_t pos; elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags; @@ -108,21 +109,16 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, if (phdr32->p_filesz < sizeof(abiflags)) return -EINVAL; - - ret = kernel_read(elf, phdr32->p_offset, - (char *)&abiflags, - sizeof(abiflags)); + pos = phdr32->p_offset; } else { if (phdr64->p_type != PT_MIPS_ABIFLAGS) return 0; if (phdr64->p_filesz < sizeof(abiflags)) return -EINVAL; - - ret = kernel_read(elf, phdr64->p_offset, - (char *)&abiflags, - sizeof(abiflags)); + pos = phdr64->p_offset; } + ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos); if (ret < 0) return ret; if (ret != sizeof(abiflags)) diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index ae810da4d499e6282638d8edadb722078b5f04cb..37b9383eacd3e84872b1d2d7d1361cf1b637522f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -150,6 +150,7 @@ LEAF(__r4k_wait) .align 5 BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) + .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS /* * Check to see if the interrupted code has just disabled @@ -181,7 +182,7 @@ NESTED(handle_int, PT_SIZE, sp) 1: .set pop #endif - SAVE_ALL + SAVE_ALL docfi=1 CLI TRACE_IRQS_OFF @@ -269,8 +270,8 @@ NESTED(except_vec_ejtag_debug, 0, sp) */ BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) - SAVE_SOME - SAVE_AT + SAVE_SOME docfi=1 + SAVE_AT docfi=1 .set push .set noreorder PTR_LA v1, except_vec_vi_handler @@ -396,6 +397,7 @@ NESTED(except_vec_nmi, 0, sp) __FINIT NESTED(nmi_handler, PT_SIZE, sp) + .cfi_signal_frame .set push .set noat /* @@ -478,6 +480,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .macro __BUILD_HANDLER exception handler clear verbose ext .align 5 NESTED(handle_\exception, PT_SIZE, sp) + .cfi_signal_frame .set noat SAVE_ALL FEXPORT(handle_\exception\ext) @@ -485,8 +488,8 @@ NESTED(nmi_handler, PT_SIZE, sp) .set at __BUILD_\verbose \exception move a0, sp - PTR_LA ra, ret_from_exception - j do_\handler + jal do_\handler + j ret_from_exception END(handle_\exception) .endm diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 60ab4c44d30557b285a90cc28e7b8ae1dd2ca21a..7c246b69c5458344fb1dc7b03c79d54fef53f528 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -11,6 +11,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include #include #include #include diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index cb0c57f860d468fab42cc60aefc80c1c1efe161a..e91c8c4e2eb59e69c76c516bd3fbc99f23f21c8b 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -12,10 +12,10 @@ #include #include -#include +#include #include -void __iomem *mips_cm_base; +void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; @@ -167,8 +167,8 @@ phys_addr_t __mips_cm_l2sync_phys_base(void) * current location. */ base_reg = read_gcr_l2_only_sync_base(); - if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK) - return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK; + if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN) + return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE; /* Default to following the CM */ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; @@ -183,19 +183,19 @@ static void mips_cm_probe_l2sync(void) phys_addr_t addr; /* L2-only sync was introduced with CM major revision 6 */ - major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> - CM_GCR_REV_MAJOR_SHF; + major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >> + __ffs(CM_GCR_REV_MAJOR); if (major_rev < 6) return; /* Find a location for the L2 sync region */ addr = mips_cm_l2sync_phys_base(); - BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr); + BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr); if (!addr) return; /* Set the region base address & enable it */ - write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK); + write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN); /* Map the region */ mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE); @@ -211,41 +211,39 @@ int mips_cm_probe(void) * No need to probe again if we have already been * here before. */ - if (mips_cm_base) + if (mips_gcr_base) return 0; addr = mips_cm_phys_base(); - BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr); + BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr); if (!addr) return -ENODEV; - mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); - if (!mips_cm_base) + mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); + if (!mips_gcr_base) return -ENXIO; /* sanity check that we're looking at a CM */ base_reg = read_gcr_base(); - if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) { + if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) { pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n", (unsigned long)addr); - mips_cm_base = NULL; + mips_gcr_base = NULL; return -ENODEV; } /* set default target to memory */ - base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK; - base_reg |= CM_GCR_BASE_CMDEFTGT_MEM; - write_gcr_base(base_reg); + change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM); /* disable CM regions */ - write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); - write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK); - write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK); /* probe for an L2-only sync region */ mips_cm_probe_l2sync(); @@ -259,16 +257,27 @@ int mips_cm_probe(void) return 0; } -void mips_cm_lock_other(unsigned int core, unsigned int vp) +void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block) { - unsigned curr_core; + unsigned int curr_core, cm_rev; u32 val; + cm_rev = mips_cm_revision(); preempt_disable(); - if (mips_cm_revision() >= CM_REV_CM3) { - val = core << CM3_GCR_Cx_OTHER_CORE_SHF; - val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + if (cm_rev >= CM_REV_CM3) { + val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); + val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); + + if (cm_rev >= CM_REV_CM3_5) { + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); + val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); + } else { + WARN_ON(cluster != 0); + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); + } /* * We need to disable interrupts in SMP systems in order to @@ -282,18 +291,20 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), *this_cpu_ptr(&cm_core_lock_flags)); } else { + WARN_ON(cluster != 0); WARN_ON(vp != 0); + WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* * We only have a GCR_CL_OTHER per core in systems with * CM 2.5 & older, so have to ensure other VP(E)s don't * race with us. */ - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); - val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; + val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM); } write_gcr_cl_other(val); @@ -310,7 +321,7 @@ void mips_cm_unlock_other(void) unsigned int curr_core; if (mips_cm_revision() < CM_REV_CM3) { - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); } else { @@ -332,13 +343,13 @@ void mips_cm_error_report(void) return; revision = mips_cm_revision(); + cm_error = read_gcr_error_cause(); + cm_addr = read_gcr_error_addr(); + cm_other = read_gcr_error_mult(); if (revision < CM_REV_CM3) { /* CM2 */ - cm_error = read_gcr_error_cause(); - cm_addr = read_gcr_error_addr(); - cm_other = read_gcr_error_mult(); - cause = cm_error >> CM_GCR_ERROR_CAUSE_ERRTYPE_SHF; - ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF; + cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE); + ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); if (!cause) return; @@ -380,11 +391,8 @@ void mips_cm_error_report(void) ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits; ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit; - cm_error = read64_gcr_error_cause(); - cm_addr = read64_gcr_error_addr(); - cm_other = read64_gcr_error_mult(); - cause = cm_error >> CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF; - ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF; + cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE); + ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); if (!cause) return; diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index a4964c334cab66eba2e41e34c25db281a1729555..f66b05ebf637d4c6542df58c4d510e16b88a2b8c 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -12,8 +12,7 @@ #include #include -#include -#include +#include void __iomem *mips_cpc_base; @@ -40,13 +39,13 @@ static phys_addr_t mips_cpc_phys_base(void) if (!mips_cm_present()) return 0; - if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK)) + if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX)) return 0; /* If the CPC is already enabled, leave it so */ cpc_base = read_gcr_cpc_base(); - if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) - return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; + if (cpc_base & CM_GCR_CPC_BASE_CPCEN) + return cpc_base & CM_GCR_CPC_BASE_CPCBASE; /* Otherwise, use the default address */ cpc_base = mips_cpc_default_phys_base(); @@ -54,7 +53,7 @@ static phys_addr_t mips_cpc_phys_base(void) return cpc_base; /* Enable the CPC, mapped at the default address */ - write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); + write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN); return cpc_base; } @@ -86,10 +85,10 @@ void mips_cpc_lock_other(unsigned int core) return; preempt_disable(); - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); - write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); + write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM)); /* * Ensure the core-other region reflects the appropriate core & @@ -106,7 +105,7 @@ void mips_cpc_unlock_other(void) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; - curr_core = current_cpu_data.core; + curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); preempt_enable(); diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index ae64c8f56a8c788450876bd368bc6dba8cab54a7..eb18b186e858c6bec7113785beca3a8a905fdb0e 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -46,9 +46,11 @@ #define LL "ll " #define SC "sc " -DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); -DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); -DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); +#ifdef CONFIG_DEBUG_FS +static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); +static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); +static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); +#endif extern const unsigned int fpucondbit[8]; @@ -600,7 +602,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir) } /* R6 removed instructions for the SPECIAL opcode */ -static struct r2_decoder_table spec_op_table[] = { +static const struct r2_decoder_table spec_op_table[] = { { 0xfc1ff83f, 0x00000008, jr_func }, { 0xfc00ffff, 0x00000018, mult_func }, { 0xfc00ffff, 0x00000019, multu_func }, @@ -867,7 +869,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir) } /* R6 removed instructions for the SPECIAL2 opcode */ -static struct r2_decoder_table spec2_op_table[] = { +static const struct r2_decoder_table spec2_op_table[] = { { 0xfc00ffff, 0x70000000, madd_func }, { 0xfc00ffff, 0x70000001, maddu_func }, { 0xfc0007ff, 0x70000002, mul_func }, @@ -881,9 +883,9 @@ static struct r2_decoder_table spec2_op_table[] = { }; static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, - struct r2_decoder_table *table) + const struct r2_decoder_table *table) { - struct r2_decoder_table *p; + const struct r2_decoder_table *p; int err; for (p = table; p->func; p++) { diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 3375745b91980013d76ad2e7d9cd632d31b26af9..e42113fe2762b5e8298f40a64d0d635799334d5d 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -10,12 +10,13 @@ * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ +#include +#include +#include +#include +#include +#include -#define USE_ALTERNATE_RESUME_IMPL 1 - .set push - .set arch=mips64r2 -#include "r4k_switch.S" - .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9e6c74bf66c485cc3c1a23ee1d298c1b6f81dad5..6668f67a61c3a2bb09fc23d9e2172d7a738020e1 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event) return -ENOENT; } - if ((unsigned int)event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) + if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; if (!atomic_inc_not_zero(&active_events)) { diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index d99416094ba964a356bfdf83946ac945f9d600d5..4655017f2377d0246cc5c9604eba39bbc98366c2 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -17,8 +17,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -49,7 +48,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], nc_asm_enter); /* Bitmap indicating which states are supported by the system */ -DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); +static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); /* * Indicates the number of coupled VPEs ready to operate in a non-coherent @@ -114,7 +113,7 @@ static void coupled_barrier(atomic_t *a, unsigned online) int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); - unsigned core = current_cpu_data.core; + unsigned core = cpu_core(¤t_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; @@ -486,7 +485,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); @@ -569,8 +568,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * rest will just be performing a rather unusual nop. */ uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 - ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK - : CM3_GCR_Cx_COHERENCE_COHEN_MSK); + ? CM_GCR_Cx_COHERENCE_COHDOMAINEN + : CM3_GCR_Cx_COHERENCE_COHEN); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); @@ -640,7 +639,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -692,7 +691,7 @@ static int __init cps_pm_init(void) /* Detect whether a CPC is present */ if (mips_cpc_present()) { /* Detect whether clock gating is implemented */ - if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) set_bit(CPS_PM_CLOCK_GATED, state_support); else pr_warn("pm-cps: CPC does not support clock gating\n"); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 70604c753aa4e3175087193fb74d2a65a07f098d..bd9bf528f19bae31896568ce00c5b41c1694af14 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -134,13 +134,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); - seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); + seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n])); #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) if (cpu_has_mipsmt) - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); + seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); else if (cpu_has_vp) - seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id); + seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); #endif sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 5351e1f3950d158aaa5ff2590c32734862a79834..c5ff6bfe2825b95bdbf5042ed636ac71b3820615 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -208,13 +208,13 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) * * microMIPS is way more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { + if (mm_insn_16bit(ip->word >> 16)) { switch (ip->mm16_r5_format.opcode) { case mm_swsp16_op: if (ip->mm16_r5_format.rt != 31) return 0; - *poff = ip->mm16_r5_format.simmediate; + *poff = ip->mm16_r5_format.imm; *poff = (*poff << 2) / sizeof(ulong); return 1; @@ -287,7 +287,7 @@ static inline int is_jump_ins(union mips_instruction *ip) * * microMIPS is kind of more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { + if (mm_insn_16bit(ip->word >> 16)) { if ((ip->mm16_r5_format.opcode == mm_pool16c_op && (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) return 1; @@ -313,9 +313,11 @@ static inline int is_jump_ins(union mips_instruction *ip) #endif } -static inline int is_sp_move_ins(union mips_instruction *ip) +static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) { #ifdef CONFIG_CPU_MICROMIPS + unsigned short tmp; + /* * addiusp -imm * addius5 sp,-imm @@ -324,21 +326,40 @@ static inline int is_sp_move_ins(union mips_instruction *ip) * * microMIPS is not more fun... */ - if (mm_insn_16bit(ip->halfword[1])) { - return (ip->mm16_r3_format.opcode == mm_pool16d_op && - ip->mm16_r3_format.simmediate && mm_addiusp_func) || - (ip->mm16_r5_format.opcode == mm_pool16d_op && - ip->mm16_r5_format.rt == 29); + if (mm_insn_16bit(ip->word >> 16)) { + if (ip->mm16_r3_format.opcode == mm_pool16d_op && + ip->mm16_r3_format.simmediate & mm_addiusp_func) { + tmp = ip->mm_b0_format.simmediate >> 1; + tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100; + if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */ + tmp ^= 0x100; + *frame_size = -(signed short)(tmp << 2); + return 1; + } + if (ip->mm16_r5_format.opcode == mm_pool16d_op && + ip->mm16_r5_format.rt == 29) { + tmp = ip->mm16_r5_format.imm >> 1; + *frame_size = -(signed short)(tmp & 0xf); + return 1; + } + return 0; } - return ip->mm_i_format.opcode == mm_addiu32_op && - ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; + if (ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) { + *frame_size = -ip->i_format.simmediate; + return 1; + } #else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; - if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + + if (ip->i_format.opcode == addiu_op || + ip->i_format.opcode == daddiu_op) { + *frame_size = -ip->i_format.simmediate; return 1; + } #endif return 0; } @@ -348,7 +369,9 @@ static int get_frame_info(struct mips_frame_info *info) bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); union mips_instruction insn, *ip, *ip_end; const unsigned int max_insns = 128; + unsigned int last_insn_size = 0; unsigned int i; + bool saw_jump = false; info->pc_offset = -1; info->frame_size = 0; @@ -359,47 +382,44 @@ static int get_frame_info(struct mips_frame_info *info) ip_end = (void *)ip + info->func_size; - for (i = 0; i < max_insns && ip < ip_end; i++, ip++) { + for (i = 0; i < max_insns && ip < ip_end; i++) { + ip = (void *)ip + last_insn_size; if (is_mmips && mm_insn_16bit(ip->halfword[0])) { - insn.halfword[0] = 0; - insn.halfword[1] = ip->halfword[0]; + insn.word = ip->halfword[0] << 16; + last_insn_size = 2; } else if (is_mmips) { - insn.halfword[0] = ip->halfword[1]; - insn.halfword[1] = ip->halfword[0]; + insn.word = ip->halfword[0] << 16 | ip->halfword[1]; + last_insn_size = 4; } else { insn.word = ip->word; + last_insn_size = 4; } - if (is_jump_ins(&insn)) - break; - if (!info->frame_size) { - if (is_sp_move_ins(&insn)) - { -#ifdef CONFIG_CPU_MICROMIPS - if (mm_insn_16bit(ip->halfword[0])) - { - unsigned short tmp; - - if (ip->halfword[0] & mm_addiusp_func) - { - tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); - info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); - } else { - tmp = (ip->halfword[0] >> 1); - info->frame_size = -(signed short)(tmp & 0xf); - } - ip = (void *) &ip->halfword[1]; - ip--; - } else -#endif - info->frame_size = - ip->i_format.simmediate; - } + is_sp_move_ins(&insn, &info->frame_size); + continue; + } else if (!saw_jump && is_jump_ins(ip)) { + /* + * If we see a jump instruction, we are finished + * with the frame save. + * + * Some functions can have a shortcut return at + * the beginning of the function, so don't start + * looking for jump instruction until we see the + * frame setup. + * + * The RA save instruction can get put into the + * delay slot of the jump instruction, so look + * at the next instruction, too. + */ + saw_jump = true; continue; } if (info->pc_offset == -1 && is_ra_save_ins(&insn, &info->pc_offset)) break; + if (saw_jump) + break; } if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 918f2f6d3861a87dccc85f5256bf9b89cc1d56d3..3062ba66c563549010d02682118807abb34f41f4 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -12,7 +12,9 @@ * Copyright (c) 1998 Harald Koerfgen */ #include +#include #include +#include #include #include #include @@ -31,9 +33,85 @@ PTR 9b+4,bad_stack; \ .previous - .set noreorder .set mips1 +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) +EXPORT_SYMBOL(_save_fp) + fpu_save_single a0, t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) + fpu_restore_single a0, t1 # clobbers t1 + jr ra + END(_restore_fp) + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * The value to initialize fcr31 to comes in $a0. + */ + + .set push + SET_HARDFLOAT + +LEAF(_init_fpu) + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS + + ctc1 a0, fcr31 + + li t0, -1 + + mtc1 t0, $f0 + mtc1 t0, $f1 + mtc1 t0, $f2 + mtc1 t0, $f3 + mtc1 t0, $f4 + mtc1 t0, $f5 + mtc1 t0, $f6 + mtc1 t0, $f7 + mtc1 t0, $f8 + mtc1 t0, $f9 + mtc1 t0, $f10 + mtc1 t0, $f11 + mtc1 t0, $f12 + mtc1 t0, $f13 + mtc1 t0, $f14 + mtc1 t0, $f15 + mtc1 t0, $f16 + mtc1 t0, $f17 + mtc1 t0, $f18 + mtc1 t0, $f19 + mtc1 t0, $f20 + mtc1 t0, $f21 + mtc1 t0, $f22 + mtc1 t0, $f23 + mtc1 t0, $f24 + mtc1 t0, $f25 + mtc1 t0, $f26 + mtc1 t0, $f27 + mtc1 t0, $f28 + mtc1 t0, $f29 + mtc1 t0, $f30 + mtc1 t0, $f31 + jr ra + END(_init_fpu) + + .set pop + + .set noreorder + /** * _save_fp_context() - save FP context from the FPU * @a0 - pointer to fpregs field of sigcontext diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 1049eeafd97d79ac8198e2666987b69f2deb36ba..e57703b1de50264814ce3a96483721b2556c77b2 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -25,12 +25,6 @@ .set mips1 .align 5 -/* - * Offset to the current process status flags, the first 32 bytes of the - * stack are not used. - */ -#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) - /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) @@ -68,78 +62,3 @@ LEAF(resume) move v0, a0 jr ra END(resume) - -/* - * Save a thread's fp context. - */ -LEAF(_save_fp) -EXPORT_SYMBOL(_save_fp) - fpu_save_single a0, t1 # clobbers t1 - jr ra - END(_save_fp) - -/* - * Restore a thread's fp context. - */ -LEAF(_restore_fp) - fpu_restore_single a0, t1 # clobbers t1 - jr ra - END(_restore_fp) - -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - - ctc1 a0, fcr31 - - li t0, -1 - - mtc1 t0, $f0 - mtc1 t0, $f1 - mtc1 t0, $f2 - mtc1 t0, $f3 - mtc1 t0, $f4 - mtc1 t0, $f5 - mtc1 t0, $f6 - mtc1 t0, $f7 - mtc1 t0, $f8 - mtc1 t0, $f9 - mtc1 t0, $f10 - mtc1 t0, $f11 - mtc1 t0, $f12 - mtc1 t0, $f13 - mtc1 t0, $f14 - mtc1 t0, $f15 - mtc1 t0, $f16 - mtc1 t0, $f17 - mtc1 t0, $f18 - mtc1 t0, $f19 - mtc1 t0, $f20 - mtc1 t0, $f21 - mtc1 t0, $f22 - mtc1 t0, $f23 - mtc1 t0, $f24 - mtc1 t0, $f25 - mtc1 t0, $f26 - mtc1 t0, $f27 - mtc1 t0, $f28 - mtc1 t0, $f29 - mtc1 t0, $f30 - mtc1 t0, $f31 - jr ra - END(_init_fpu) - - .set pop diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 56d86b09c917b0de80efca96ac357ef3e8a63c7e..0a83b1708b3cbed5bba0f4781107122cc0c77569 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,201 @@ .previous .endm +/* + * Save a thread's fp context. + */ +LEAF(_save_fp) +EXPORT_SYMBOL(_save_fp) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) + mfc0 t0, CP0_STATUS +#endif + fpu_save_double a0 t0 t1 # clobbers t1 + jr ra + END(_save_fp) + +/* + * Restore a thread's fp context. + */ +LEAF(_restore_fp) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) + mfc0 t0, CP0_STATUS +#endif + fpu_restore_double a0 t0 t1 # clobbers t1 + jr ra + END(_restore_fp) + +#ifdef CONFIG_CPU_HAS_MSA + +/* + * Save a thread's MSA vector context. + */ +LEAF(_save_msa) +EXPORT_SYMBOL(_save_msa) + msa_save_all a0 + jr ra + END(_save_msa) + +/* + * Restore a thread's MSA vector context. + */ +LEAF(_restore_msa) + msa_restore_all a0 + jr ra + END(_restore_msa) + +LEAF(_init_msa_upper) + msa_init_all_upper + jr ra + END(_init_msa_upper) + +#endif + +/* + * Load the FPU with signalling NANS. This bit pattern we're using has + * the property that no matter whether considered as single or as double + * precision represents signaling NANS. + * + * The value to initialize fcr31 to comes in $a0. + */ + + .set push + SET_HARDFLOAT + +LEAF(_init_fpu) + mfc0 t0, CP0_STATUS + li t1, ST0_CU1 + or t0, t1 + mtc0 t0, CP0_STATUS + enable_fpu_hazard + + ctc1 a0, fcr31 + + li t1, -1 # SNaN + +#ifdef CONFIG_64BIT + sll t0, t0, 5 + bgez t0, 1f # 16 / 32 register mode? + + dmtc1 t1, $f1 + dmtc1 t1, $f3 + dmtc1 t1, $f5 + dmtc1 t1, $f7 + dmtc1 t1, $f9 + dmtc1 t1, $f11 + dmtc1 t1, $f13 + dmtc1 t1, $f15 + dmtc1 t1, $f17 + dmtc1 t1, $f19 + dmtc1 t1, $f21 + dmtc1 t1, $f23 + dmtc1 t1, $f25 + dmtc1 t1, $f27 + dmtc1 t1, $f29 + dmtc1 t1, $f31 +1: +#endif + +#ifdef CONFIG_CPU_MIPS32 + mtc1 t1, $f0 + mtc1 t1, $f1 + mtc1 t1, $f2 + mtc1 t1, $f3 + mtc1 t1, $f4 + mtc1 t1, $f5 + mtc1 t1, $f6 + mtc1 t1, $f7 + mtc1 t1, $f8 + mtc1 t1, $f9 + mtc1 t1, $f10 + mtc1 t1, $f11 + mtc1 t1, $f12 + mtc1 t1, $f13 + mtc1 t1, $f14 + mtc1 t1, $f15 + mtc1 t1, $f16 + mtc1 t1, $f17 + mtc1 t1, $f18 + mtc1 t1, $f19 + mtc1 t1, $f20 + mtc1 t1, $f21 + mtc1 t1, $f22 + mtc1 t1, $f23 + mtc1 t1, $f24 + mtc1 t1, $f25 + mtc1 t1, $f26 + mtc1 t1, $f27 + mtc1 t1, $f28 + mtc1 t1, $f29 + mtc1 t1, $f30 + mtc1 t1, $f31 + +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) + .set push + .set MIPS_ISA_LEVEL_RAW + .set fp=64 + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip setting upper 32b + + mthc1 t1, $f0 + mthc1 t1, $f1 + mthc1 t1, $f2 + mthc1 t1, $f3 + mthc1 t1, $f4 + mthc1 t1, $f5 + mthc1 t1, $f6 + mthc1 t1, $f7 + mthc1 t1, $f8 + mthc1 t1, $f9 + mthc1 t1, $f10 + mthc1 t1, $f11 + mthc1 t1, $f12 + mthc1 t1, $f13 + mthc1 t1, $f14 + mthc1 t1, $f15 + mthc1 t1, $f16 + mthc1 t1, $f17 + mthc1 t1, $f18 + mthc1 t1, $f19 + mthc1 t1, $f20 + mthc1 t1, $f21 + mthc1 t1, $f22 + mthc1 t1, $f23 + mthc1 t1, $f24 + mthc1 t1, $f25 + mthc1 t1, $f26 + mthc1 t1, $f27 + mthc1 t1, $f28 + mthc1 t1, $f29 + mthc1 t1, $f30 + mthc1 t1, $f31 +1: .set pop +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ +#else + .set MIPS_ISA_ARCH_LEVEL_RAW + dmtc1 t1, $f0 + dmtc1 t1, $f2 + dmtc1 t1, $f4 + dmtc1 t1, $f6 + dmtc1 t1, $f8 + dmtc1 t1, $f10 + dmtc1 t1, $f12 + dmtc1 t1, $f14 + dmtc1 t1, $f16 + dmtc1 t1, $f18 + dmtc1 t1, $f20 + dmtc1 t1, $f22 + dmtc1 t1, $f24 + dmtc1 t1, $f26 + dmtc1 t1, $f28 + dmtc1 t1, $f30 +#endif + jr ra + END(_init_fpu) + + .set pop /* SET_HARDFLOAT */ + .set noreorder /** diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 7b386d54fd656cd6a2789fe79a72466ee8b0695b..17cf9341c1cf0c1ad34dc0362b85ee550a82f9ca 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -12,8 +12,6 @@ */ #include #include -#include -#include #include #include #include @@ -22,10 +20,6 @@ #include -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ -#undef fp - -#ifndef USE_ALTERNATE_RESUME_IMPL /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti) @@ -63,200 +57,3 @@ move v0, a0 jr ra END(resume) - -#endif /* USE_ALTERNATE_RESUME_IMPL */ - -/* - * Save a thread's fp context. - */ -LEAF(_save_fp) -EXPORT_SYMBOL(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) - mfc0 t0, CP0_STATUS -#endif - fpu_save_double a0 t0 t1 # clobbers t1 - jr ra - END(_save_fp) - -/* - * Restore a thread's fp context. - */ -LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) - mfc0 t0, CP0_STATUS -#endif - fpu_restore_double a0 t0 t1 # clobbers t1 - jr ra - END(_restore_fp) - -#ifdef CONFIG_CPU_HAS_MSA - -/* - * Save a thread's MSA vector context. - */ -LEAF(_save_msa) -EXPORT_SYMBOL(_save_msa) - msa_save_all a0 - jr ra - END(_save_msa) - -/* - * Restore a thread's MSA vector context. - */ -LEAF(_restore_msa) - msa_restore_all a0 - jr ra - END(_restore_msa) - -LEAF(_init_msa_upper) - msa_init_all_upper - jr ra - END(_init_msa_upper) - -#endif - -/* - * Load the FPU with signalling NANS. This bit pattern we're using has - * the property that no matter whether considered as single or as double - * precision represents signaling NANS. - * - * The value to initialize fcr31 to comes in $a0. - */ - - .set push - SET_HARDFLOAT - -LEAF(_init_fpu) - mfc0 t0, CP0_STATUS - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_STATUS - enable_fpu_hazard - - ctc1 a0, fcr31 - - li t1, -1 # SNaN - -#ifdef CONFIG_64BIT - sll t0, t0, 5 - bgez t0, 1f # 16 / 32 register mode? - - dmtc1 t1, $f1 - dmtc1 t1, $f3 - dmtc1 t1, $f5 - dmtc1 t1, $f7 - dmtc1 t1, $f9 - dmtc1 t1, $f11 - dmtc1 t1, $f13 - dmtc1 t1, $f15 - dmtc1 t1, $f17 - dmtc1 t1, $f19 - dmtc1 t1, $f21 - dmtc1 t1, $f23 - dmtc1 t1, $f25 - dmtc1 t1, $f27 - dmtc1 t1, $f29 - dmtc1 t1, $f31 -1: -#endif - -#ifdef CONFIG_CPU_MIPS32 - mtc1 t1, $f0 - mtc1 t1, $f1 - mtc1 t1, $f2 - mtc1 t1, $f3 - mtc1 t1, $f4 - mtc1 t1, $f5 - mtc1 t1, $f6 - mtc1 t1, $f7 - mtc1 t1, $f8 - mtc1 t1, $f9 - mtc1 t1, $f10 - mtc1 t1, $f11 - mtc1 t1, $f12 - mtc1 t1, $f13 - mtc1 t1, $f14 - mtc1 t1, $f15 - mtc1 t1, $f16 - mtc1 t1, $f17 - mtc1 t1, $f18 - mtc1 t1, $f19 - mtc1 t1, $f20 - mtc1 t1, $f21 - mtc1 t1, $f22 - mtc1 t1, $f23 - mtc1 t1, $f24 - mtc1 t1, $f25 - mtc1 t1, $f26 - mtc1 t1, $f27 - mtc1 t1, $f28 - mtc1 t1, $f29 - mtc1 t1, $f30 - mtc1 t1, $f31 - -#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) - .set push - .set MIPS_ISA_LEVEL_RAW - .set fp=64 - sll t0, t0, 5 # is Status.FR set? - bgez t0, 1f # no: skip setting upper 32b - - mthc1 t1, $f0 - mthc1 t1, $f1 - mthc1 t1, $f2 - mthc1 t1, $f3 - mthc1 t1, $f4 - mthc1 t1, $f5 - mthc1 t1, $f6 - mthc1 t1, $f7 - mthc1 t1, $f8 - mthc1 t1, $f9 - mthc1 t1, $f10 - mthc1 t1, $f11 - mthc1 t1, $f12 - mthc1 t1, $f13 - mthc1 t1, $f14 - mthc1 t1, $f15 - mthc1 t1, $f16 - mthc1 t1, $f17 - mthc1 t1, $f18 - mthc1 t1, $f19 - mthc1 t1, $f20 - mthc1 t1, $f21 - mthc1 t1, $f22 - mthc1 t1, $f23 - mthc1 t1, $f24 - mthc1 t1, $f25 - mthc1 t1, $f26 - mthc1 t1, $f27 - mthc1 t1, $f28 - mthc1 t1, $f29 - mthc1 t1, $f30 - mthc1 t1, $f31 -1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ -#else - .set MIPS_ISA_ARCH_LEVEL_RAW - dmtc1 t1, $f0 - dmtc1 t1, $f2 - dmtc1 t1, $f4 - dmtc1 t1, $f6 - dmtc1 t1, $f8 - dmtc1 t1, $f10 - dmtc1 t1, $f12 - dmtc1 t1, $f14 - dmtc1 t1, $f16 - dmtc1 t1, $f18 - dmtc1 t1, $f20 - dmtc1 t1, $f22 - dmtc1 t1, $f24 - dmtc1 t1, $f26 - dmtc1 t1, $f28 - dmtc1 t1, $f30 -#endif - jr ra - END(_init_fpu) - - .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S deleted file mode 100644 index 9cc7bfab3419a80e02ae344b6bd154e154370864..0000000000000000000000000000000000000000 --- a/arch/mips/kernel/r6000_fpu.S +++ /dev/null @@ -1,99 +0,0 @@ -/* - * r6000_fpu.S: Save/restore floating point context for signal handlers. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1996 by Ralf Baechle - * - * Multi-arch abstraction and asm macros for easier reading: - * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - */ -#include -#include -#include -#include -#include - - .set noreorder - .set mips2 - .set push - SET_HARDFLOAT - -/** - * _save_fp_context() - save FP context from the FPU - * @a0 - pointer to fpregs field of sigcontext - * @a1 - pointer to fpc_csr field of sigcontext - * - * Save FP context, including the 32 FP data registers and the FP - * control & status register, from the FPU to signal context. - */ - LEAF(_save_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - bgez t0,1f - nop - - cfc1 t1,fcr31 - /* Store the 16 double precision registers */ - sdc1 $f0,0(a0) - sdc1 $f2,16(a0) - sdc1 $f4,32(a0) - sdc1 $f6,48(a0) - sdc1 $f8,64(a0) - sdc1 $f10,80(a0) - sdc1 $f12,96(a0) - sdc1 $f14,112(a0) - sdc1 $f16,128(a0) - sdc1 $f18,144(a0) - sdc1 $f20,160(a0) - sdc1 $f22,176(a0) - sdc1 $f24,192(a0) - sdc1 $f26,208(a0) - sdc1 $f28,224(a0) - sdc1 $f30,240(a0) - jr ra - sw t0,(a1) -1: jr ra - nop - END(_save_fp_context) - -/** - * _restore_fp_context() - restore FP context to the FPU - * @a0 - pointer to fpregs field of sigcontext - * @a1 - pointer to fpc_csr field of sigcontext - * - * Restore FP context, including the 32 FP data registers and the FP - * control & status register, from signal context to the FPU. - */ - LEAF(_restore_fp_context) - mfc0 t0,CP0_STATUS - sll t0,t0,2 - - bgez t0,1f - lw t0,(a1) - /* Restore the 16 double precision registers */ - ldc1 $f0,0(a0) - ldc1 $f2,16(a0) - ldc1 $f4,32(a0) - ldc1 $f6,48(a0) - ldc1 $f8,64(a0) - ldc1 $f10,80(a0) - ldc1 $f12,96(a0) - ldc1 $f14,112(a0) - ldc1 $f16,128(a0) - ldc1 $f18,144(a0) - ldc1 $f20,160(a0) - ldc1 $f22,176(a0) - ldc1 $f24,192(a0) - ldc1 $f26,208(a0) - ldc1 $f28,224(a0) - ldc1 $f30,240(a0) - jr ra - ctc1 t0,fcr31 -1: jr ra - nop - END(_restore_fp_context) - - .set pop /* SET_HARDFLOAT */ diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 1b070a76fcdd4c2e5f62a84bfff8e40ad0a8721b..406072e26752052c83b04ffacc254d901a668509 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -179,7 +179,7 @@ static void bmips_prepare_cpus(unsigned int max_cpus) /* * Tell the hardware to boot CPUx - runs on CPU0 */ -static void bmips_boot_secondary(int cpu, struct task_struct *idle) +static int bmips_boot_secondary(int cpu, struct task_struct *idle) { bmips_smp_boot_sp = __KSTK_TOS(idle); bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); @@ -231,6 +231,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) } cpumask_set_cpu(cpu, &bmips_booted_mask); } + + return 0; } /* @@ -245,7 +247,7 @@ static void bmips_init_secondary(void) break; case CPU_BMIPS5000: write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); - current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3; + cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3); break; } } @@ -409,7 +411,7 @@ void __ref play_dead(void) #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops bmips43xx_smp_ops = { +const struct plat_smp_ops bmips43xx_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, @@ -423,7 +425,7 @@ struct plat_smp_ops bmips43xx_smp_ops = { #endif }; -struct plat_smp_ops bmips5000_smp_ops = { +const struct plat_smp_ops bmips5000_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 76923349b4fe16135a97247734a9d803788ae5ca..05295a4909f14affe37487c634b2a12c32c18e16 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -78,7 +77,7 @@ static void cmp_smp_finish(void) * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp */ -static void cmp_boot_secondary(int cpu, struct task_struct *idle) +static int cmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); @@ -95,6 +94,7 @@ static void cmp_boot_secondary(int cpu, struct task_struct *idle) #endif amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); + return 0; } /* @@ -148,7 +148,7 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) } -struct plat_smp_ops cmp_smp_ops = { +const struct plat_smp_ops cmp_smp_ops = { .send_ipi_single = mips_smp_send_ipi_single, .send_ipi_mask = mips_smp_send_ipi_mask, .init_secondary = cmp_init_secondary, diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index f832e99ad4c3879052d924c88750c79c97dc0f78..0063122c85da809853e9bf8ab598495f1f7bb43d 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -19,8 +18,7 @@ #include #include -#include -#include +#include #include #include #include @@ -41,55 +39,58 @@ static int __init setup_nothreads(char *s) } early_param("nothreads", setup_nothreads); -static unsigned core_vpe_count(unsigned core) +static unsigned core_vpe_count(unsigned int cluster, unsigned core) { - unsigned cfg; - if (threads_disabled) return 1; - if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) - && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) - return 1; - - mips_cm_lock_other(core, 0); - cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; - mips_cm_unlock_other(); - return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + return mips_cps_numvps(cluster, core); } static void __init cps_smp_setup(void) { - unsigned int ncores, nvpes, core_vpes; + unsigned int nclusters, ncores, nvpes, core_vpes; unsigned long core_entry; - int c, v; + int cl, c, v; /* Detect & record VPE topology */ - ncores = mips_cm_numcores(); + nvpes = 0; + nclusters = mips_cps_numclusters(); pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); - for (c = nvpes = 0; c < ncores; c++) { - core_vpes = core_vpe_count(c); - pr_cont("%c%u", c ? ',' : '{', core_vpes); - - /* Use the number of VPEs in core 0 for smp_num_siblings */ - if (!c) - smp_num_siblings = core_vpes; - - for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { - cpu_data[nvpes + v].core = c; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) - cpu_data[nvpes + v].vpe_id = v; -#endif + for (cl = 0; cl < nclusters; cl++) { + if (cl > 0) + pr_cont(","); + pr_cont("{"); + + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(cl, c); + + if (c > 0) + pr_cont(","); + pr_cont("%u", core_vpes); + + /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ + if (!cl && !c) + smp_num_siblings = core_vpes; + + for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { + cpu_set_cluster(&cpu_data[nvpes + v], cl); + cpu_set_core(&cpu_data[nvpes + v], c); + cpu_set_vpe_id(&cpu_data[nvpes + v], v); + } + + nvpes += core_vpes; } - nvpes += core_vpes; + pr_cont("}"); } - pr_cont("} total %u\n", nvpes); + pr_cont(" total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, true); - set_cpu_present(v, true); + set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -121,7 +122,7 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { unsigned ncores, core_vpes, c, cca; - bool cca_unsuitable; + bool cca_unsuitable, cores_limited; u32 *entry_code; mips_mt_set_cpuoptions(); @@ -141,19 +142,22 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) } /* Warn the user if the CCA prevents multi-core */ - ncores = mips_cm_numcores(); - if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) { + cores_limited = false; + if (cca_unsuitable || cpu_has_dc_aliases) { + for_each_present_cpu(c) { + if (cpus_are_siblings(smp_processor_id(), c)) + continue; + + set_cpu_present(c, false); + cores_limited = true; + } + } + if (cores_limited) pr_warn("Using only one core due to %s%s%s\n", cca_unsuitable ? "unsuitable CCA" : "", (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", cpu_has_dc_aliases ? "dcache aliasing" : ""); - for_each_present_cpu(c) { - if (cpu_data[c].core) - set_cpu_present(c, false); - } - } - /* * Patch the start of mips_cps_core_entry to provide: * @@ -168,6 +172,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) __sync(); /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(0); mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), GFP_KERNEL); if (!mips_cps_core_bootcfg) { @@ -177,7 +182,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) /* Allocate VPE boot configuration structs */ for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(c); + core_vpes = core_vpe_count(0, c); mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, sizeof(*mips_cps_core_bootcfg[c].vpe_config), GFP_KERNEL); @@ -189,7 +194,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) } /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, + atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; @@ -212,11 +217,11 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) static void boot_core(unsigned int core, unsigned int vpe_id) { - u32 access, stat, seq_state; + u32 stat, seq_state; unsigned timeout; /* Select the appropriate core */ - mips_cm_lock_other(core, 0); + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); @@ -225,12 +230,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_coherence(0); /* Start it with the legacy memory map and exception base */ - write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB); + write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - access = read_gcr_access(); - access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); - write_gcr_access(access); + set_gcr_access(1 << core); if (mips_cpc_present()) { /* Reset the core */ @@ -253,7 +256,8 @@ static void boot_core(unsigned int core, unsigned int vpe_id) timeout = 100; while (true) { stat = read_cpc_co_stat_conf(); - seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK; + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); /* U6 == coherent execution, ie. the core is up */ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) @@ -285,15 +289,15 @@ static void boot_core(unsigned int core, unsigned int vpe_id) static void remote_vpe_boot(void *dummy) { - unsigned core = current_cpu_data.core; + unsigned core = cpu_core(¤t_cpu_data); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } -static void cps_boot_secondary(int cpu, struct task_struct *idle) +static int cps_boot_secondary(int cpu, struct task_struct *idle) { - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; @@ -301,6 +305,10 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) unsigned int remote; int err; + /* We don't yet support booting CPUs in other clusters */ + if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(¤t_cpu_data)) + return -ENOSYS; + vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -316,16 +324,16 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) } if (cpu_has_vp) { - mips_cm_lock_other(core, vpe_id); + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_co_reset_base(core_entry); mips_cm_unlock_other(); } - if (core != current_cpu_data.core) { + if (!cpus_are_siblings(cpu, smp_processor_id())) { /* Boot a VPE on another powered up core */ for (remote = 0; remote < NR_CPUS; remote++) { - if (cpu_data[remote].core != core) + if (!cpus_are_siblings(cpu, remote)) continue; if (cpu_online(remote)) break; @@ -349,6 +357,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) mips_cps_boot_vpes(core_cfg, vpe_id); out: preempt_enable(); + return 0; } static void cps_init_secondary(void) @@ -358,7 +367,7 @@ static void cps_init_secondary(void) dmt(); if (mips_cm_revision() >= CM_REV_CM3) { - unsigned ident = gic_read_local_vp_id(); + unsigned int ident = read_gic_vl_ident(); /* * Ensure that our calculation of the VP ID matches up with @@ -402,7 +411,7 @@ static int cps_cpu_disable(void) if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; + core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -424,15 +433,17 @@ void play_dead(void) local_irq_disable(); idle_task_exit(); cpu = smp_processor_id(); - core = cpu_data[cpu].core; + core = cpu_core(&cpu_data[cpu]); cpu_death = CPU_DEATH_POWER; pr_debug("CPU%d going offline\n", cpu); if (cpu_has_mipsmt || cpu_has_vp) { + core = cpu_core(&cpu_data[cpu]); + /* Look for another online VPE within the core */ for_each_online_cpu(cpu_death_sibling) { - if (cpu_data[cpu_death_sibling].core != core) + if (!cpus_are_siblings(cpu, cpu_death_sibling)) continue; /* @@ -488,7 +499,7 @@ static void wait_for_sibling_halt(void *ptr_cpu) static void cps_cpu_die(unsigned int cpu) { - unsigned core = cpu_data[cpu].core; + unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; @@ -519,10 +530,11 @@ static void cps_cpu_die(unsigned int cpu) */ fail_time = ktime_add_ms(ktime_get(), 2000); do { - mips_cm_lock_other(core, 0); + mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cpc_lock_other(core); stat = read_cpc_co_stat_conf(); - stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; + stat &= CPC_Cx_STAT_CONF_SEQSTATE; + stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); mips_cpc_unlock_other(); mips_cm_unlock_other(); @@ -544,7 +556,7 @@ static void cps_cpu_die(unsigned int cpu) */ if (WARN(ktime_after(ktime_get(), fail_time), "CPU%u hasn't powered down, seq. state %u\n", - cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF)) + cpu, stat)) break; } while (1); @@ -562,7 +574,7 @@ static void cps_cpu_die(unsigned int cpu) panic("Failed to call remote sibling CPU\n"); } else if (cpu_has_vp) { do { - mips_cm_lock_other(core, vpe_id); + mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); stat = read_cpc_co_vp_running(); mips_cm_unlock_other(); } while (stat & (1 << vpe_id)); @@ -571,7 +583,7 @@ static void cps_cpu_die(unsigned int cpu) #endif /* CONFIG_HOTPLUG_CPU */ -static struct plat_smp_ops cps_smp_ops = { +static const struct plat_smp_ops cps_smp_ops = { .smp_setup = cps_smp_setup, .prepare_cpus = cps_prepare_cpus, .boot_secondary = cps_boot_secondary, @@ -587,7 +599,7 @@ static struct plat_smp_ops cps_smp_ops = { bool mips_cps_smp_in_use(void) { - extern struct plat_smp_ops *mp_ops; + extern const struct plat_smp_ops *mp_ops; return mp_ops == &cps_smp_ops; } @@ -599,7 +611,7 @@ int register_cps_smp_ops(void) } /* check we have a GIC - we need one for IPIs */ - if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { + if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) { pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); return -ENODEV; } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index ed6b4df583ea0d420e9db7c75267fa419a4c4c8f..94ab3276b48c491cb0fcc12f167fb15080e48595 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -36,6 +35,7 @@ #include #include #include +#include static void __init smvp_copy_vpe_config(void) { @@ -83,7 +83,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, if (tc != 0) smvp_copy_vpe_config(); - cpu_data[ncpu].vpe_id = tc; + cpu_set_vpe_id(&cpu_data[ncpu], tc); return ncpu; } @@ -118,14 +118,12 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) static void vsmp_init_secondary(void) { -#ifdef CONFIG_MIPS_GIC /* This is Malta specific: IPI,performance and timer interrupts */ - if (gic_present) + if (mips_gic_present()) change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); else -#endif change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7); } @@ -152,7 +150,7 @@ static void vsmp_smp_finish(void) * (unsigned long)idle->thread_info the gp * assumes a 1:1 mapping of TC => VPE */ -static void vsmp_boot_secondary(int cpu, struct task_struct *idle) +static int vsmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); dvpe(); @@ -184,6 +182,8 @@ static void vsmp_boot_secondary(int cpu, struct task_struct *idle) clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(EVPE_ENABLE); + + return 0; } /* @@ -239,7 +239,7 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus) mips_mt_set_cpuoptions(); } -struct plat_smp_ops vsmp_smp_ops = { +const struct plat_smp_ops vsmp_smp_ops = { .send_ipi_single = mips_smp_send_ipi_single, .send_ipi_mask = mips_smp_send_ipi_mask, .init_secondary = vsmp_init_secondary, diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c index 17878d71ef2bc9f5238c7062b0f09cd5d1ca644b..525d3196f793f254d2620cd8a3f7a828547be3fa 100644 --- a/arch/mips/kernel/smp-up.c +++ b/arch/mips/kernel/smp-up.c @@ -39,8 +39,9 @@ static void up_smp_finish(void) /* * Firmware CPU startup hook */ -static void up_boot_secondary(int cpu, struct task_struct *idle) +static int up_boot_secondary(int cpu, struct task_struct *idle) { + return 0; } static void __init up_smp_setup(void) @@ -63,7 +64,7 @@ static void up_cpu_die(unsigned int cpu) } #endif -struct plat_smp_ops up_smp_ops = { +const struct plat_smp_ops up_smp_ops = { .send_ipi_single = up_send_ipi_single, .send_ipi_mask = up_send_ipi_mask, .init_secondary = up_init_secondary, diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index c7cbddfcdc3b88d07a7538611e4d377588384a92..bbe19b64def5b6c863e9d2e11f0ad36609d0bfde 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -96,8 +96,7 @@ static inline void set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu(i, &cpu_sibling_setup_map) { - if (cpu_data[cpu].package == cpu_data[i].package && - cpu_data[cpu].core == cpu_data[i].core) { + if (cpus_are_siblings(cpu, i)) { cpumask_set_cpu(i, &cpu_sibling_map[cpu]); cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } @@ -134,8 +133,7 @@ void calculate_cpu_foreign_map(void) for_each_online_cpu(i) { core_present = 0; for_each_cpu(k, &temp_foreign_map) - if (cpu_data[i].package == cpu_data[k].package && - cpu_data[i].core == cpu_data[k].core) + if (cpus_are_siblings(i, k)) core_present = 1; if (!core_present) cpumask_set_cpu(i, &temp_foreign_map); @@ -146,10 +144,10 @@ void calculate_cpu_foreign_map(void) &temp_foreign_map, &cpu_sibling_map[i]); } -struct plat_smp_ops *mp_ops; +const struct plat_smp_ops *mp_ops; EXPORT_SYMBOL(mp_ops); -void register_smp_ops(struct plat_smp_ops *ops) +void register_smp_ops(const struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); @@ -186,13 +184,13 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (mips_cpc_present()) { for_each_cpu(cpu, mask) { - core = cpu_data[cpu].core; - - if (core == current_cpu_data.core) + if (cpus_are_siblings(cpu, smp_processor_id())) continue; + core = cpu_core(&cpu_data[cpu]); + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { - mips_cm_lock_other(core, 0); + mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); mips_cpc_unlock_other(); @@ -441,7 +439,11 @@ void smp_prepare_boot_cpu(void) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - mp_ops->boot_secondary(cpu, tidle); + int err; + + err = mp_ops->boot_secondary(cpu, tidle); + if (err) + return err; /* * We must check for timeout here, as the CPU will not be marked diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index c036157fb891ff7e9a76c461b5ed0873c56743b9..a6ebc8135112e5c9c22e5598bf25556469f9dbb4 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -72,20 +72,6 @@ EXPORT_SYMBOL(perf_irq); unsigned int mips_hpt_frequency; EXPORT_SYMBOL_GPL(mips_hpt_frequency); -/* - * This function exists in order to cause an error due to a duplicate - * definition if platform code should have its own implementation. The hook - * to use instead is plat_time_init. plat_time_init does not receive the - * irqaction pointer argument anymore. This is because any function which - * initializes an interrupt timer now takes care of its own request_irq rsp. - * setup_irq calls and each clock_event_device should use its own - * struct irqrequest. - */ -void __init plat_timer_setup(void) -{ - BUG(); -} - static __init int cpu_has_mfc0_count_bug(void) { switch (current_cpu_type()) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 2bf41499334777e7a8682453222427dd084765ee..5669d3b8bd382760aa4f7cefa243a13195035928 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -50,9 +50,8 @@ #include #include #include -#include +#include #include -#include #include #include #include @@ -734,8 +733,7 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, si.si_code = FPE_FLTUND; else if (fcr31 & FPU_CSR_INE_X) si.si_code = FPE_FLTRES; - else - return; /* Broken hardware? */ + force_sig_info(SIGFPE, &si, tsk); } @@ -1673,7 +1671,7 @@ static inline void parity_protection_init(void) /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); - if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) || + if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) || !(cp0_ectl & ERRCTL_PE)) { /* * One of L1 or L2 ECC checking isn't supported, @@ -1693,12 +1691,12 @@ static inline void parity_protection_init(void) /* Configure L2 ECC checking */ if (l2parity) - gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN; else - gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN; write_gcr_err_control(gcr_ectl); gcr_ectl = read_gcr_err_control(); - gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK; + gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); pr_info("Cache parity protection %sabled\n", @@ -2428,21 +2426,6 @@ void __init trap_init(void) set_except_vector(EXCCODE_TR, handle_tr); set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe); - if (current_cpu_type() == CPU_R6000 || - current_cpu_type() == CPU_R6000A) { - /* - * The R6000 is the only R-series CPU that features a machine - * check exception (similar to the R4000 cache error) and - * unaligned ldc1/sdc1 exception. The handlers have not been - * written yet. Well, anyway there is no R6000 machine on the - * current list of targets for Linux/MIPS. - * (Duh, crap, there is someone with a triple R6k machine) - */ - //set_except_vector(14, handle_mc); - //set_except_vector(15, handle_ndc); - } - - if (board_nmi_handler_setup) board_nmi_handler_setup(); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 5eaf2578ac0407b076b78bd7f8842a49e0d1ba82..2d0b912f9e3e40a987a5a481c7066bf46dcf4acb 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1378,7 +1378,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ -const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; +static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; static void emulate_load_store_microMIPS(struct pt_regs *regs, void __user *addr) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 093517e85a6cdca8a550042dbd06a7c4b2a93ae5..019035d7225c4fd942c96c6628b6605f8d2af1b4 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -13,13 +13,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include /* Kernel-provided data used by the VDSO. */ @@ -99,9 +99,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr; + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; struct vm_area_struct *vma; - struct resource gic_res; int ret; if (down_write_killable(&mm->mmap_sem)) @@ -125,7 +124,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * only map a page even though the total area is 64K, as we only need * the counter registers at the start. */ - gic_size = gic_present ? PAGE_SIZE : 0; + gic_size = mips_gic_present() ? PAGE_SIZE : 0; vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; @@ -148,13 +147,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { - ret = gic_get_usm_range(&gic_res); - if (ret) - goto out; + gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; - ret = io_remap_pfn_range(vma, base, - gic_res.start >> PAGE_SHIFT, - gic_size, + ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(PAGE_READONLY)); if (ret) goto out; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index bce2a6431430366ee626ddc4f6282665caeb7f06..d535edc01434117a8809fc21fb152226e0b46521 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -514,7 +514,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, dvcpu->arch.wait = 0; - if (swait_active(&dvcpu->wq)) + if (swq_has_sleeper(&dvcpu->wq)) swake_up(&dvcpu->wq); return 0; @@ -1179,7 +1179,7 @@ static void kvm_mips_comparecount_func(unsigned long data) kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; - if (swait_active(&vcpu->wq)) + if (swq_has_sleeper(&vcpu->wq)) swake_up(&vcpu->wq); } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 177769dbb0e85df724bd5da31951a46bfe544842..35bc69b78268eff635182d3144f01cd40d41b117 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -17,6 +17,8 @@ config SOC_XWAY bool "XWAY" select SOC_TYPE_XWAY select HW_HAS_PCI + select MFD_SYSCON + select MFD_CORE config SOC_FALCON bool "FALCON" diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c index 7a535d72f541afbe03f4c413afc92756f505e634..058b85578cf780e767ae59c39ba959a6aa15ec0c 100644 --- a/arch/mips/lantiq/falcon/reset.c +++ b/arch/mips/lantiq/falcon/reset.c @@ -15,27 +15,14 @@ #include -/* CPU0 Reset Source Register */ -#define SYS1_CPU0RS 0x0040 -/* reset cause mask */ -#define CPU0RS_MASK 0x0003 -/* CPU0 Boot Mode Register */ -#define SYS1_BM 0x00a0 -/* boot mode mask */ -#define BM_MASK 0x0005 - -/* allow platform code to find out what surce we booted from */ +/* + * Dummy implementation. Used to allow platform code to find out what + * source was booted from + */ unsigned char ltq_boot_select(void) { - return ltq_sys1_r32(SYS1_BM) & BM_MASK; -} - -/* allow the watchdog driver to find out what the boot reason was */ -int ltq_reset_cause(void) -{ - return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK; + return BS_SPI; } -EXPORT_SYMBOL_GPL(ltq_reset_cause); #define BOOT_REG_BASE (KSEG1 | 0x1F200000) #define BOOT_PW1_REG (BOOT_REG_BASE | 0x20) diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 33728b7af4267b26902781ef2691a064536c49dd..f0bc3312ed1103bea83c69338e1cf4cc010619cf 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -61,10 +61,6 @@ /* we have a cascade of 8 irqs */ #define MIPS_CPU_IRQ_CASCADE 8 -#ifdef CONFIG_MIPS_MT_SMP -int gic_present; -#endif - static int exin_avail; static u32 ltq_eiu_irq[MAX_EIU]; static void __iomem *ltq_icu_membase[MAX_IM]; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 96773bed8a8a52cdbe33308cd23f9cd6069665bd..9ff7ccde9de0e8d9899018d3d9152e4a243408e8 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -117,7 +117,7 @@ void __init prom_init(void) int __init plat_of_setup(void) { - return __dt_register_buses(soc_info.compatible, "simple-bus"); + return of_platform_default_populate(NULL, NULL, NULL); } arch_initcall(plat_of_setup); diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile index a2edc538f4779df646822b52cb7737ad13b64dcb..fbb0747c70b78b26231a6b51e3eb2f3267c94dd6 100644 --- a/arch/mips/lantiq/xway/Makefile +++ b/arch/mips/lantiq/xway/Makefile @@ -1,5 +1,3 @@ -obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o +obj-y := prom.o sysctrl.o clk.o dma.o gptu.o dcdc.o obj-y += vmmc.o - -obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c deleted file mode 100644 index 83fd65d76e81d836b982534054f2afe1e7676172..0000000000000000000000000000000000000000 --- a/arch/mips/lantiq/xway/reset.c +++ /dev/null @@ -1,387 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include "../prom.h" - -/* reset request register */ -#define RCU_RST_REQ 0x0010 -/* reset status register */ -#define RCU_RST_STAT 0x0014 -/* vr9 gphy registers */ -#define RCU_GFS_ADD0_XRX200 0x0020 -#define RCU_GFS_ADD1_XRX200 0x0068 -/* xRX300 gphy registers */ -#define RCU_GFS_ADD0_XRX300 0x0020 -#define RCU_GFS_ADD1_XRX300 0x0058 -#define RCU_GFS_ADD2_XRX300 0x00AC -/* xRX330 gphy registers */ -#define RCU_GFS_ADD0_XRX330 0x0020 -#define RCU_GFS_ADD1_XRX330 0x0058 -#define RCU_GFS_ADD2_XRX330 0x00AC -#define RCU_GFS_ADD3_XRX330 0x0264 - -/* xbar BE flag */ -#define RCU_AHB_ENDIAN 0x004C -#define RCU_VR9_BE_AHB1S 0x00000008 - -/* reboot bit */ -#define RCU_RD_GPHY0_XRX200 BIT(31) -#define RCU_RD_SRST BIT(30) -#define RCU_RD_GPHY1_XRX200 BIT(29) -/* xRX300 bits */ -#define RCU_RD_GPHY0_XRX300 BIT(31) -#define RCU_RD_GPHY1_XRX300 BIT(29) -#define RCU_RD_GPHY2_XRX300 BIT(28) -/* xRX330 bits */ -#define RCU_RD_GPHY0_XRX330 BIT(31) -#define RCU_RD_GPHY1_XRX330 BIT(29) -#define RCU_RD_GPHY2_XRX330 BIT(28) -#define RCU_RD_GPHY3_XRX330 BIT(10) - -/* reset cause */ -#define RCU_STAT_SHIFT 26 -/* boot selection */ -#define RCU_BOOT_SEL(x) ((x >> 18) & 0x7) -#define RCU_BOOT_SEL_XRX200(x) (((x >> 17) & 0xf) | ((x >> 8) & 0x10)) - -/* dwc2 USB configuration registers */ -#define RCU_USB1CFG 0x0018 -#define RCU_USB2CFG 0x0034 - -/* USB DMA endianness bits */ -#define RCU_USBCFG_HDSEL_BIT BIT(11) -#define RCU_USBCFG_HOST_END_BIT BIT(10) -#define RCU_USBCFG_SLV_END_BIT BIT(9) - -/* USB reset bits */ -#define RCU_USBRESET 0x0010 - -#define USBRESET_BIT BIT(4) - -#define RCU_USBRESET2 0x0048 - -#define USB1RESET_BIT BIT(4) -#define USB2RESET_BIT BIT(5) - -#define RCU_CFG1A 0x0038 -#define RCU_CFG1B 0x003C - -/* USB PMU devices */ -#define PMU_AHBM BIT(15) -#define PMU_USB0 BIT(6) -#define PMU_USB1 BIT(27) - -/* USB PHY PMU devices */ -#define PMU_USB0_P BIT(0) -#define PMU_USB1_P BIT(26) - -/* remapped base addr of the reset control unit */ -static void __iomem *ltq_rcu_membase; -static struct device_node *ltq_rcu_np; -static DEFINE_SPINLOCK(ltq_rcu_lock); - -static void ltq_rcu_w32(uint32_t val, uint32_t reg_off) -{ - ltq_w32(val, ltq_rcu_membase + reg_off); -} - -static uint32_t ltq_rcu_r32(uint32_t reg_off) -{ - return ltq_r32(ltq_rcu_membase + reg_off); -} - -static void ltq_rcu_w32_mask(uint32_t clr, uint32_t set, uint32_t reg_off) -{ - unsigned long flags; - - spin_lock_irqsave(<q_rcu_lock, flags); - ltq_rcu_w32((ltq_rcu_r32(reg_off) & ~(clr)) | (set), reg_off); - spin_unlock_irqrestore(<q_rcu_lock, flags); -} - -/* This function is used by the watchdog driver */ -int ltq_reset_cause(void) -{ - u32 val = ltq_rcu_r32(RCU_RST_STAT); - return val >> RCU_STAT_SHIFT; -} -EXPORT_SYMBOL_GPL(ltq_reset_cause); - -/* allow platform code to find out what source we booted from */ -unsigned char ltq_boot_select(void) -{ - u32 val = ltq_rcu_r32(RCU_RST_STAT); - - if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) - return RCU_BOOT_SEL_XRX200(val); - - return RCU_BOOT_SEL(val); -} - -struct ltq_gphy_reset { - u32 rd; - u32 addr; -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx200_gphy[] = { - {RCU_RD_GPHY0_XRX200, RCU_GFS_ADD0_XRX200}, - {RCU_RD_GPHY1_XRX200, RCU_GFS_ADD1_XRX200}, -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx300_gphy[] = { - {RCU_RD_GPHY0_XRX300, RCU_GFS_ADD0_XRX300}, - {RCU_RD_GPHY1_XRX300, RCU_GFS_ADD1_XRX300}, - {RCU_RD_GPHY2_XRX300, RCU_GFS_ADD2_XRX300}, -}; - -/* reset / boot a gphy */ -static struct ltq_gphy_reset xrx330_gphy[] = { - {RCU_RD_GPHY0_XRX330, RCU_GFS_ADD0_XRX330}, - {RCU_RD_GPHY1_XRX330, RCU_GFS_ADD1_XRX330}, - {RCU_RD_GPHY2_XRX330, RCU_GFS_ADD2_XRX330}, - {RCU_RD_GPHY3_XRX330, RCU_GFS_ADD3_XRX330}, -}; - -static void xrx200_gphy_boot_addr(struct ltq_gphy_reset *phy_regs, - dma_addr_t dev_addr) -{ - ltq_rcu_w32_mask(0, phy_regs->rd, RCU_RST_REQ); - ltq_rcu_w32(dev_addr, phy_regs->addr); - ltq_rcu_w32_mask(phy_regs->rd, 0, RCU_RST_REQ); -} - -/* reset and boot a gphy. these phys only exist on xrx200 SoC */ -int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr) -{ - struct clk *clk; - - if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) { - dev_err(dev, "this SoC has no GPHY\n"); - return -EINVAL; - } - - if (of_machine_is_compatible("lantiq,vr9")) { - clk = clk_get_sys("1f203000.rcu", "gphy"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - clk_enable(clk); - } - - dev_info(dev, "booting GPHY%u firmware at %X\n", id, dev_addr); - - if (of_machine_is_compatible("lantiq,vr9")) { - if (id >= ARRAY_SIZE(xrx200_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx200_gphy[id], dev_addr); - } else if (of_machine_is_compatible("lantiq,ar10")) { - if (id >= ARRAY_SIZE(xrx300_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx300_gphy[id], dev_addr); - } else if (of_machine_is_compatible("lantiq,grx390")) { - if (id >= ARRAY_SIZE(xrx330_gphy)) { - dev_err(dev, "%u is an invalid gphy id\n", id); - return -EINVAL; - } - xrx200_gphy_boot_addr(&xrx330_gphy[id], dev_addr); - } - return 0; -} - -/* reset a io domain for u micro seconds */ -void ltq_reset_once(unsigned int module, ulong u) -{ - ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ); - udelay(u); - ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ); -} - -static int ltq_assert_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - u32 val; - - if (id < 8) - return -1; - - val = ltq_rcu_r32(RCU_RST_REQ); - val |= BIT(id); - ltq_rcu_w32(val, RCU_RST_REQ); - - return 0; -} - -static int ltq_deassert_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - u32 val; - - if (id < 8) - return -1; - - val = ltq_rcu_r32(RCU_RST_REQ); - val &= ~BIT(id); - ltq_rcu_w32(val, RCU_RST_REQ); - - return 0; -} - -static int ltq_reset_device(struct reset_controller_dev *rcdev, - unsigned long id) -{ - ltq_assert_device(rcdev, id); - return ltq_deassert_device(rcdev, id); -} - -static const struct reset_control_ops reset_ops = { - .reset = ltq_reset_device, - .assert = ltq_assert_device, - .deassert = ltq_deassert_device, -}; - -static struct reset_controller_dev reset_dev = { - .ops = &reset_ops, - .owner = THIS_MODULE, - .nr_resets = 32, - .of_reset_n_cells = 1, -}; - -void ltq_rst_init(void) -{ - reset_dev.of_node = of_find_compatible_node(NULL, NULL, - "lantiq,xway-reset"); - if (!reset_dev.of_node) - pr_err("Failed to find reset controller node"); - else - reset_controller_register(&reset_dev); -} - -static void ltq_machine_restart(char *command) -{ - u32 val = ltq_rcu_r32(RCU_RST_REQ); - - if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) - val |= RCU_RD_GPHY1_XRX200 | RCU_RD_GPHY0_XRX200; - - val |= RCU_RD_SRST; - - local_irq_disable(); - ltq_rcu_w32(val, RCU_RST_REQ); - unreachable(); -} - -static void ltq_machine_halt(void) -{ - local_irq_disable(); - unreachable(); -} - -static void ltq_machine_power_off(void) -{ - local_irq_disable(); - unreachable(); -} - -static void ltq_usb_init(void) -{ - /* Power for USB cores 1 & 2 */ - ltq_pmu_enable(PMU_AHBM); - ltq_pmu_enable(PMU_USB0); - ltq_pmu_enable(PMU_USB1); - - ltq_rcu_w32(ltq_rcu_r32(RCU_CFG1A) | BIT(0), RCU_CFG1A); - ltq_rcu_w32(ltq_rcu_r32(RCU_CFG1B) | BIT(0), RCU_CFG1B); - - /* Enable USB PHY power for cores 1 & 2 */ - ltq_pmu_enable(PMU_USB0_P); - ltq_pmu_enable(PMU_USB1_P); - - /* Configure cores to host mode */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USB1CFG) & ~RCU_USBCFG_HDSEL_BIT, - RCU_USB1CFG); - ltq_rcu_w32(ltq_rcu_r32(RCU_USB2CFG) & ~RCU_USBCFG_HDSEL_BIT, - RCU_USB2CFG); - - /* Select DMA endianness (Host-endian: big-endian) */ - ltq_rcu_w32((ltq_rcu_r32(RCU_USB1CFG) & ~RCU_USBCFG_SLV_END_BIT) - | RCU_USBCFG_HOST_END_BIT, RCU_USB1CFG); - ltq_rcu_w32(ltq_rcu_r32((RCU_USB2CFG) & ~RCU_USBCFG_SLV_END_BIT) - | RCU_USBCFG_HOST_END_BIT, RCU_USB2CFG); - - /* Hard reset USB state machines */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET) | USBRESET_BIT, RCU_USBRESET); - udelay(50 * 1000); - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET) & ~USBRESET_BIT, RCU_USBRESET); - - /* Soft reset USB state machines */ - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET2) - | USB1RESET_BIT | USB2RESET_BIT, RCU_USBRESET2); - udelay(50 * 1000); - ltq_rcu_w32(ltq_rcu_r32(RCU_USBRESET2) - & ~(USB1RESET_BIT | USB2RESET_BIT), RCU_USBRESET2); -} - -static int __init mips_reboot_setup(void) -{ - struct resource res; - - ltq_rcu_np = of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway"); - if (!ltq_rcu_np) - ltq_rcu_np = of_find_compatible_node(NULL, NULL, - "lantiq,rcu-xrx200"); - - /* check if all the reset register range is available */ - if (!ltq_rcu_np) - panic("Failed to load reset resources from devicetree"); - - if (of_address_to_resource(ltq_rcu_np, 0, &res)) - panic("Failed to get rcu memory range"); - - if (!request_mem_region(res.start, resource_size(&res), res.name)) - pr_err("Failed to request rcu memory"); - - ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res)); - if (!ltq_rcu_membase) - panic("Failed to remap core memory"); - - if (of_machine_is_compatible("lantiq,ar9") || - of_machine_is_compatible("lantiq,vr9")) - ltq_usb_init(); - - if (of_machine_is_compatible("lantiq,vr9")) - ltq_rcu_w32(ltq_rcu_r32(RCU_AHB_ENDIAN) | RCU_VR9_BE_AHB1S, - RCU_AHB_ENDIAN); - - _machine_restart = ltq_machine_restart; - _machine_halt = ltq_machine_halt; - pm_power_off = ltq_machine_power_off; - - return 0; -} - -arch_initcall(mips_reboot_setup); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 95bec460b651fd1cdad1b1e262463408742c7795..7611c3013793feac519b07025b70206efa320397 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -145,15 +145,7 @@ static u32 pmu_clk_cr_b[] = { #define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y)) #define pmu_r32(x) ltq_r32(pmu_membase + (x)) -#define XBAR_ALWAYS_LAST 0x430 -#define XBAR_FPI_BURST_EN BIT(1) -#define XBAR_AHB_BURST_EN BIT(2) - -#define xbar_w32(x, y) ltq_w32((x), ltq_xbar_membase + (y)) -#define xbar_r32(x) ltq_r32(ltq_xbar_membase + (x)) - static void __iomem *pmu_membase; -static void __iomem *ltq_xbar_membase; void __iomem *ltq_cgu_membase; void __iomem *ltq_ebu_membase; @@ -293,16 +285,6 @@ static void pci_ext_disable(struct clk *clk) ltq_cgu_w32((1 << 31) | (1 << 30), pcicr); } -static void xbar_fpi_burst_disable(void) -{ - u32 reg; - - /* bit 1 as 1 --burst; bit 1 as 0 -- single */ - reg = xbar_r32(XBAR_ALWAYS_LAST); - reg &= ~XBAR_FPI_BURST_EN; - xbar_w32(reg, XBAR_ALWAYS_LAST); -} - /* enable a clockout source */ static int clkout_enable(struct clk *clk) { @@ -459,26 +441,6 @@ void __init ltq_soc_init(void) if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase) panic("Failed to remap core resources"); - if (of_machine_is_compatible("lantiq,vr9")) { - struct resource res_xbar; - struct device_node *np_xbar = - of_find_compatible_node(NULL, NULL, - "lantiq,xbar-xway"); - - if (!np_xbar) - panic("Failed to load xbar nodes from devicetree"); - if (of_address_to_resource(np_xbar, 0, &res_xbar)) - panic("Failed to get xbar resources"); - if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), - res_xbar.name)) - panic("Failed to get xbar resources"); - - ltq_xbar_membase = ioremap_nocache(res_xbar.start, - resource_size(&res_xbar)); - if (!ltq_xbar_membase) - panic("Failed to remap xbar resources"); - } - /* make sure to unprotect the memory region where flash is located */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); @@ -507,8 +469,8 @@ void __init ltq_soc_init(void) if (of_machine_is_compatible("lantiq,grx390") || of_machine_is_compatible("lantiq,ar10")) { - clkdev_add_pmu("1e101000.usb", "phy", 1, 2, PMU_ANALOG_USB0_P); - clkdev_add_pmu("1e106000.usb", "phy", 1, 2, PMU_ANALOG_USB1_P); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P); /* rc 0 */ clkdev_add_pmu("1d900000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE0_P); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); @@ -528,8 +490,8 @@ void __init ltq_soc_init(void) else clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M, CLOCK_133M); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE); clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY); clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY); @@ -538,8 +500,8 @@ void __init ltq_soc_init(void) } else if (of_machine_is_compatible("lantiq,grx390")) { clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(), ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); /* rc 2 */ clkdev_add_pmu("1a800000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P); clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); @@ -551,22 +513,23 @@ void __init ltq_soc_init(void) } else if (of_machine_is_compatible("lantiq,ar10")) { clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(), ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); + clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY); + clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,vr9")) { clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(), ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0 | PMU_AHBM); - clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1 | PMU_AHBM); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); clkdev_add_pmu("1d900000.pcie", "phy", 1, 1, PMU1_PCIE_PHY); clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); @@ -579,17 +542,18 @@ void __init ltq_soc_init(void) PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY); + clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY); + clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,ar9")) { clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz(), CLOCK_250M); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); - clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); @@ -598,14 +562,11 @@ void __init ltq_soc_init(void) } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); - clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); - clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); + clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); + clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0); } - - if (of_machine_is_compatible("lantiq,vr9")) - xbar_fpi_burst_disable(); } diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c deleted file mode 100644 index f0a0f2d431b2335fc9f851a41766ec089ea7073d..0000000000000000000000000000000000000000 --- a/arch/mips/lantiq/xway/xrx200_phy_fw.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Lantiq XRX200 PHY Firmware Loader - * Author: John Crispin - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2012 John Crispin - */ - -#include -#include -#include -#include - -#include - -#define XRX200_GPHY_FW_ALIGN (16 * 1024) - -static dma_addr_t xway_gphy_load(struct platform_device *pdev) -{ - const struct firmware *fw; - dma_addr_t dev_addr = 0; - const char *fw_name; - void *fw_addr; - size_t size; - - if (of_get_property(pdev->dev.of_node, "firmware1", NULL) || - of_get_property(pdev->dev.of_node, "firmware2", NULL)) { - switch (ltq_soc_type()) { - case SOC_TYPE_VR9: - if (of_property_read_string(pdev->dev.of_node, - "firmware1", &fw_name)) { - dev_err(&pdev->dev, - "failed to load firmware filename\n"); - return 0; - } - break; - case SOC_TYPE_VR9_2: - if (of_property_read_string(pdev->dev.of_node, - "firmware2", &fw_name)) { - dev_err(&pdev->dev, - "failed to load firmware filename\n"); - return 0; - } - break; - } - } else if (of_property_read_string(pdev->dev.of_node, - "firmware", &fw_name)) { - dev_err(&pdev->dev, "failed to load firmware filename\n"); - return 0; - } - - dev_info(&pdev->dev, "requesting %s\n", fw_name); - if (request_firmware(&fw, fw_name, &pdev->dev)) { - dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name); - return 0; - } - - /* - * GPHY cores need the firmware code in a persistent and contiguous - * memory area with a 16 kB boundary aligned start address - */ - size = fw->size + XRX200_GPHY_FW_ALIGN; - - fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL); - if (fw_addr) { - fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); - dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN); - memcpy(fw_addr, fw->data, fw->size); - } else { - dev_err(&pdev->dev, "failed to alloc firmware memory\n"); - } - - release_firmware(fw); - return dev_addr; -} - -static int xway_phy_fw_probe(struct platform_device *pdev) -{ - dma_addr_t fw_addr; - struct property *pp; - unsigned char *phyids; - int i, ret = 0; - - fw_addr = xway_gphy_load(pdev); - if (!fw_addr) - return -EINVAL; - pp = of_find_property(pdev->dev.of_node, "phys", NULL); - if (!pp) - return -ENOENT; - phyids = pp->value; - for (i = 0; i < pp->length && !ret; i++) - ret = xrx200_gphy_boot(&pdev->dev, phyids[i], fw_addr); - if (!ret) - mdelay(100); - return ret; -} - -static const struct of_device_id xway_phy_match[] = { - { .compatible = "lantiq,phy-xrx200" }, - {}, -}; - -static struct platform_driver xway_phy_driver = { - .probe = xway_phy_fw_probe, - .driver = { - .name = "phy-xrx200", - .of_match_table = xway_phy_match, - }, -}; -builtin_platform_driver(xway_phy_driver); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index a37fe3d1ee2fbd6b0b0c52fb954daed9f8dfc03e..6ab430d245758abeb74533b9aa9d85c123fad454 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -6,7 +6,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o -obj-y += iomap.o +obj-y += iomap.o iomap_copy.o obj-$(CONFIG_PCI) += iomap-pci.o lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 2307a3cb2714fca0819cfb7727eca41650f51fbf..68c495ed71e303f4bb83bd5cac88d04c73b2a2a9 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -8,6 +8,7 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2007, 2014 Maciej W. Rozycki */ +#include #include #include #include diff --git a/arch/mips/lib/iomap_copy.c b/arch/mips/lib/iomap_copy.c new file mode 100644 index 0000000000000000000000000000000000000000..368bb38267c58a5ddab15d02ae8909fd7591d630 --- /dev/null +++ b/arch/mips/lib/iomap_copy.c @@ -0,0 +1,42 @@ +/* + * This file is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include + +/** + * __ioread64_copy - copy data from MMIO space, in 64-bit units + * @to: destination (must be 64-bit aligned) + * @from: source, in MMIO space (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from MMIO space to kernel space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __ioread64_copy(void *to, const void __iomem *from, size_t count) +{ +#ifdef CONFIG_64BIT + u64 *dst = to; + const u64 __iomem *src = from; + const u64 __iomem *end = src + count; + + while (src < end) + *dst++ = __raw_readq(src++); +#else + __ioread32_copy(to, from, count * 2); +#endif +} +EXPORT_SYMBOL_GPL(__ioread64_copy); diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c index a78fb657068cb39fc785e241f9b70aeaa412a09b..8281334df9c86429039501dcd361aeca44141c83 100644 --- a/arch/mips/loongson64/lemote-2f/clock.c +++ b/arch/mips/loongson64/lemote-2f/clock.c @@ -80,6 +80,9 @@ EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return (unsigned long)clk->rate; } EXPORT_SYMBOL(clk_get_rate); diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index b7a355c3c40813b45aa29940471bfa7148aa643a..8501109bb0f0f5bb70f7a4907431ff0c54c13e65 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -319,8 +319,8 @@ static void loongson3_init_secondary(void) loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]); per_cpu(cpu_state, cpu) = CPU_ONLINE; - cpu_data[cpu].core = - cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; + cpu_set_core(&cpu_data[cpu], + cpu_logical_map(cpu) % loongson_sysconf.cores_per_package); cpu_data[cpu].package = cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; @@ -386,7 +386,8 @@ static void __init loongson3_smp_setup(void) ipi_status0_regs_init(); ipi_en0_regs_init(); ipi_mailbox_buf_init(); - cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; + cpu_set_core(&cpu_data[0], + cpu_logical_map(0) % loongson_sysconf.cores_per_package); cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; } @@ -399,7 +400,7 @@ static void __init loongson3_prepare_cpus(unsigned int max_cpus) /* * Setup the PC, SP, and GP of a secondary processor and start it runing! */ -static void loongson3_boot_secondary(int cpu, struct task_struct *idle) +static int loongson3_boot_secondary(int cpu, struct task_struct *idle) { unsigned long startargs[4]; @@ -422,6 +423,7 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle) (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8)); loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -697,7 +699,7 @@ void play_dead(void) static int loongson3_disable_clock(unsigned int cpu) { - uint64_t core_id = cpu_data[cpu].core; + uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { @@ -711,7 +713,7 @@ static int loongson3_disable_clock(unsigned int cpu) static int loongson3_enable_clock(unsigned int cpu) { - uint64_t core_id = cpu_data[cpu].core; + uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { @@ -734,7 +736,7 @@ early_initcall(register_loongson3_notifier); #endif -struct plat_smp_ops loongson3_smp_ops = { +const struct plat_smp_ops loongson3_smp_ops = { .send_ipi_single = loongson3_send_ipi_single, .send_ipi_mask = loongson3_send_ipi_mask, .init_secondary = loongson3_init_secondary, diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile index e9bbc2a6526fa57072b0b4299ec1a230bfe01c29..e9f10b88b695baa01ec6f3cc52bd46d3742994ff 100644 --- a/arch/mips/math-emu/Makefile +++ b/arch/mips/math-emu/Makefile @@ -4,9 +4,11 @@ obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \ dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \ - dp_tint.o dp_fint.o dp_maddf.o dp_2008class.o dp_fmin.o dp_fmax.o \ + dp_tint.o dp_fint.o dp_rint.o dp_maddf.o dp_2008class.o dp_fmin.o \ + dp_fmax.o \ sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \ - sp_tint.o sp_fint.o sp_maddf.o sp_2008class.o sp_fmin.o sp_fmax.o \ + sp_tint.o sp_fint.o sp_rint.o sp_maddf.o sp_2008class.o sp_fmin.o \ + sp_fmax.o \ dsemul.o lib-y += ieee754d.o \ diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f08a7b4facb9d2011d045030e58cef82e49d4fdb..192542dbd9724788838a74cb5ed4b8404b0d83fc 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -58,7 +58,7 @@ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *, mips_instruction); static int fpux_emu(struct pt_regs *, - struct mips_fpu_struct *, mips_instruction, void *__user *); + struct mips_fpu_struct *, mips_instruction, void __user **); /* Control registers */ @@ -830,12 +830,12 @@ do { \ } while (0) #define DIFROMREG(di, x) \ - ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0)) + ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0)) #define DITOREG(di, x) \ do { \ unsigned fpr, i; \ - fpr = (x) & ~(cop1_64bit(xcp) == 0); \ + fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \ set_fpr64(&ctx->fpr[fpr], 0, di); \ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \ set_fpr64(&ctx->fpr[fpr], i, 0); \ @@ -973,7 +973,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx, */ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - struct mm_decoded_insn dec_insn, void *__user *fault_addr) + struct mm_decoded_insn dec_insn, void __user **fault_addr) { unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; unsigned int cond, cbit, bit0; @@ -1195,9 +1195,11 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, bit0 = get_fpr32(fpr, 0) & 0x1; switch (MIPSInst_RS(ir)) { case bc1eqz_op: + MIPS_FPU_EMU_INC_STATS(bc1eqz); cond = bit0 == 0; break; case bc1nez_op: + MIPS_FPU_EMU_INC_STATS(bc1nez); cond = bit0 != 0; break; } @@ -1230,6 +1232,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } branch_common: + MIPS_FPU_EMU_INC_STATS(branches); set_delay_slot(xcp); if (cond) { /* @@ -1460,7 +1463,7 @@ DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg); DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg); static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - mips_instruction ir, void *__user *fault_addr) + mips_instruction ir, void __user **fault_addr) { unsigned rcsr = 0; /* resulting csr */ @@ -1682,15 +1685,19 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { /* binary ops */ case fadd_op: + MIPS_FPU_EMU_INC_STATS(add_s); handler.b = ieee754sp_add; goto scopbop; case fsub_op: + MIPS_FPU_EMU_INC_STATS(sub_s); handler.b = ieee754sp_sub; goto scopbop; case fmul_op: + MIPS_FPU_EMU_INC_STATS(mul_s); handler.b = ieee754sp_mul; goto scopbop; case fdiv_op: + MIPS_FPU_EMU_INC_STATS(div_s); handler.b = ieee754sp_div; goto scopbop; @@ -1699,6 +1706,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sqrt_s); handler.u = ieee754sp_sqrt; goto scopuop; @@ -1711,6 +1719,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rsqrt_s); handler.u = fpemu_sp_rsqrt; goto scopuop; @@ -1718,6 +1727,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(recip_s); handler.u = fpemu_sp_recip; goto scopuop; @@ -1754,6 +1764,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(seleqz_s); SPFROMREG(rv.s, MIPSInst_FT(ir)); if (rv.w & 0x1) rv.w = 0; @@ -1765,6 +1776,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(selnez_s); SPFROMREG(rv.s, MIPSInst_FT(ir)); if (rv.w & 0x1) SPFROMREG(rv.s, MIPSInst_FS(ir)); @@ -1778,6 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maddf_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); @@ -1791,6 +1804,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(msubf_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); @@ -1804,9 +1818,9 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rint_s); SPFROMREG(fs, MIPSInst_FS(ir)); - rv.l = ieee754sp_tlong(fs); - rv.s = ieee754sp_flong(rv.l); + rv.s = ieee754sp_rint(fs); goto copcsr; } @@ -1816,6 +1830,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(class_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_2008class(fs); rfmt = w_fmt; @@ -1828,6 +1843,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(min_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmin(fs, ft); @@ -1840,6 +1856,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(mina_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmina(fs, ft); @@ -1852,6 +1869,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(max_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmax(fs, ft); @@ -1864,6 +1882,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maxa_s); SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmaxa(fs, ft); @@ -1871,15 +1890,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } case fabs_op: + MIPS_FPU_EMU_INC_STATS(abs_s); handler.u = ieee754sp_abs; goto scopuop; case fneg_op: + MIPS_FPU_EMU_INC_STATS(neg_s); handler.u = ieee754sp_neg; goto scopuop; case fmov_op: /* an easy one */ + MIPS_FPU_EMU_INC_STATS(mov_s); SPFROMREG(rv.s, MIPSInst_FS(ir)); goto copcsr; @@ -1922,12 +1944,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, return SIGILL; /* not defined */ case fcvtd_op: + MIPS_FPU_EMU_INC_STATS(cvt_d_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fsp(fs); rfmt = d_fmt; goto copcsr; case fcvtw_op: + MIPS_FPU_EMU_INC_STATS(cvt_w_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_tint(fs); rfmt = w_fmt; @@ -1940,6 +1964,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + if (MIPSInst_FUNC(ir) == fceil_op) + MIPS_FPU_EMU_INC_STATS(ceil_w_s); + if (MIPSInst_FUNC(ir) == ffloor_op) + MIPS_FPU_EMU_INC_STATS(floor_w_s); + if (MIPSInst_FUNC(ir) == fround_op) + MIPS_FPU_EMU_INC_STATS(round_w_s); + if (MIPSInst_FUNC(ir) == ftrunc_op) + MIPS_FPU_EMU_INC_STATS(trunc_w_s); + oldrm = ieee754_csr.rm; SPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -1952,6 +1985,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sel_s); SPFROMREG(fd, MIPSInst_FD(ir)); if (fd.bits & 0x1) SPFROMREG(rv.s, MIPSInst_FT(ir)); @@ -1963,6 +1997,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(cvt_l_s); SPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754sp_tlong(fs); rfmt = l_fmt; @@ -1975,6 +2010,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + if (MIPSInst_FUNC(ir) == fceill_op) + MIPS_FPU_EMU_INC_STATS(ceil_l_s); + if (MIPSInst_FUNC(ir) == ffloorl_op) + MIPS_FPU_EMU_INC_STATS(floor_l_s); + if (MIPSInst_FUNC(ir) == froundl_op) + MIPS_FPU_EMU_INC_STATS(round_l_s); + if (MIPSInst_FUNC(ir) == ftruncl_op) + MIPS_FPU_EMU_INC_STATS(trunc_l_s); + oldrm = ieee754_csr.rm; SPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2016,15 +2060,19 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { /* binary ops */ case fadd_op: + MIPS_FPU_EMU_INC_STATS(add_d); handler.b = ieee754dp_add; goto dcopbop; case fsub_op: + MIPS_FPU_EMU_INC_STATS(sub_d); handler.b = ieee754dp_sub; goto dcopbop; case fmul_op: + MIPS_FPU_EMU_INC_STATS(mul_d); handler.b = ieee754dp_mul; goto dcopbop; case fdiv_op: + MIPS_FPU_EMU_INC_STATS(div_d); handler.b = ieee754dp_div; goto dcopbop; @@ -2033,6 +2081,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sqrt_d); handler.u = ieee754dp_sqrt; goto dcopuop; /* @@ -2044,12 +2093,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rsqrt_d); handler.u = fpemu_dp_rsqrt; goto dcopuop; case frecip_op: if (!cpu_has_mips_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(recip_d); handler.u = fpemu_dp_recip; goto dcopuop; case fmovc_op: @@ -2083,6 +2134,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(seleqz_d); DPFROMREG(rv.d, MIPSInst_FT(ir)); if (rv.l & 0x1) rv.l = 0; @@ -2094,6 +2146,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(selnez_d); DPFROMREG(rv.d, MIPSInst_FT(ir)); if (rv.l & 0x1) DPFROMREG(rv.d, MIPSInst_FS(ir)); @@ -2107,6 +2160,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maddf_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); @@ -2120,6 +2174,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(msubf_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); @@ -2133,9 +2188,9 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(rint_d); DPFROMREG(fs, MIPSInst_FS(ir)); - rv.l = ieee754dp_tlong(fs); - rv.d = ieee754dp_flong(rv.l); + rv.d = ieee754dp_rint(fs); goto copcsr; } @@ -2145,9 +2200,10 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(class_d); DPFROMREG(fs, MIPSInst_FS(ir)); - rv.w = ieee754dp_2008class(fs); - rfmt = w_fmt; + rv.l = ieee754dp_2008class(fs); + rfmt = l_fmt; break; } @@ -2157,6 +2213,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(min_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmin(fs, ft); @@ -2169,6 +2226,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(mina_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmina(fs, ft); @@ -2181,6 +2239,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(max_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmax(fs, ft); @@ -2193,6 +2252,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(maxa_d); DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmaxa(fs, ft); @@ -2200,15 +2260,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } case fabs_op: + MIPS_FPU_EMU_INC_STATS(abs_d); handler.u = ieee754dp_abs; goto dcopuop; case fneg_op: + MIPS_FPU_EMU_INC_STATS(neg_d); handler.u = ieee754dp_neg; goto dcopuop; case fmov_op: /* an easy one */ + MIPS_FPU_EMU_INC_STATS(mov_d); DPFROMREG(rv.d, MIPSInst_FS(ir)); goto copcsr; @@ -2228,6 +2291,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * unary conv ops */ case fcvts_op: + MIPS_FPU_EMU_INC_STATS(cvt_s_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fdp(fs); rfmt = s_fmt; @@ -2237,6 +2301,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, return SIGILL; /* not defined */ case fcvtw_op: + MIPS_FPU_EMU_INC_STATS(cvt_w_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754dp_tint(fs); /* wrong */ rfmt = w_fmt; @@ -2249,6 +2314,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_2_3_4_5_r) return SIGILL; + if (MIPSInst_FUNC(ir) == fceil_op) + MIPS_FPU_EMU_INC_STATS(ceil_w_d); + if (MIPSInst_FUNC(ir) == ffloor_op) + MIPS_FPU_EMU_INC_STATS(floor_w_d); + if (MIPSInst_FUNC(ir) == fround_op) + MIPS_FPU_EMU_INC_STATS(round_w_d); + if (MIPSInst_FUNC(ir) == ftrunc_op) + MIPS_FPU_EMU_INC_STATS(trunc_w_d); + oldrm = ieee754_csr.rm; DPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2261,6 +2335,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(sel_d); DPFROMREG(fd, MIPSInst_FD(ir)); if (fd.bits & 0x1) DPFROMREG(rv.d, MIPSInst_FT(ir)); @@ -2272,6 +2347,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + MIPS_FPU_EMU_INC_STATS(cvt_l_d); DPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754dp_tlong(fs); rfmt = l_fmt; @@ -2284,6 +2360,15 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, if (!cpu_has_mips_3_4_5_64_r2_r6) return SIGILL; + if (MIPSInst_FUNC(ir) == fceill_op) + MIPS_FPU_EMU_INC_STATS(ceil_l_d); + if (MIPSInst_FUNC(ir) == ffloorl_op) + MIPS_FPU_EMU_INC_STATS(floor_l_d); + if (MIPSInst_FUNC(ir) == froundl_op) + MIPS_FPU_EMU_INC_STATS(round_l_d); + if (MIPSInst_FUNC(ir) == ftruncl_op) + MIPS_FPU_EMU_INC_STATS(trunc_l_d); + oldrm = ieee754_csr.rm; DPFROMREG(fs, MIPSInst_FS(ir)); ieee754_csr.rm = MIPSInst_FUNC(ir); @@ -2325,12 +2410,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { case fcvts_op: /* convert word to single precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_s_w); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fint(fs.bits); rfmt = s_fmt; goto copcsr; case fcvtd_op: /* convert word to double precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_d_w); SPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fint(fs.bits); rfmt = d_fmt; @@ -2350,6 +2437,90 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, (MIPSInst_FUNC(ir) & 0x20)) return SIGILL; + if (!sig) { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_af_s); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_un_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_eq_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ueq_s); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_lt_s); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_ult_s); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_le_s); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_ule_s); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_or_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_une_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ne_s); + break; + } + } + } else { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_saf_s); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sun_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_seq_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sueq_s); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_slt_s); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_sult_s); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_sle_s); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_sule_s); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sor_s); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_sune_s); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sne_s); + break; + } + } + } + /* fmt is w_fmt for single precision so fix it */ rfmt = s_fmt; /* default to false */ @@ -2394,6 +2565,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } } + break; } case l_fmt: @@ -2406,11 +2578,13 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, switch (MIPSInst_FUNC(ir)) { case fcvts_op: /* convert long to single precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_s_l); rv.s = ieee754sp_flong(bits); rfmt = s_fmt; goto copcsr; case fcvtd_op: /* convert long to double precision real */ + MIPS_FPU_EMU_INC_STATS(cvt_d_l); rv.d = ieee754dp_flong(bits); rfmt = d_fmt; goto copcsr; @@ -2424,6 +2598,90 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, (MIPSInst_FUNC(ir) & 0x20)) return SIGILL; + if (!sig) { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_af_d); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_un_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_eq_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ueq_d); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_lt_d); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_ult_d); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_le_d); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_ule_d); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_or_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_une_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_ne_d); + break; + } + } + } else { + if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) { + switch (cmpop) { + case 0: + MIPS_FPU_EMU_INC_STATS(cmp_saf_d); + break; + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sun_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_seq_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sueq_d); + break; + case 4: + MIPS_FPU_EMU_INC_STATS(cmp_slt_d); + break; + case 5: + MIPS_FPU_EMU_INC_STATS(cmp_sult_d); + break; + case 6: + MIPS_FPU_EMU_INC_STATS(cmp_sle_d); + break; + case 7: + MIPS_FPU_EMU_INC_STATS(cmp_sule_d); + break; + } + } else { + switch (cmpop) { + case 1: + MIPS_FPU_EMU_INC_STATS(cmp_sor_d); + break; + case 2: + MIPS_FPU_EMU_INC_STATS(cmp_sune_d); + break; + case 3: + MIPS_FPU_EMU_INC_STATS(cmp_sne_d); + break; + } + } + } + /* fmt is l_fmt for double precision so fix it */ rfmt = d_fmt; /* default to false */ @@ -2468,6 +2726,8 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } } + break; + default: return SIGILL; } @@ -2553,7 +2813,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * For simplicity we always terminate upon an ISA mode switch. */ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - int has_fpu, void *__user *fault_addr) + int has_fpu, void __user **fault_addr) { unsigned long oldepc, prevepc; struct mm_decoded_insn dec_insn; diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c index fd71b8daaaf20525abfb4470f6892fc1dddc5cd1..5bec64f2884eb14103e53726e42a6e29563ff6b5 100644 --- a/arch/mips/math-emu/dp_fmax.c +++ b/arch/mips/math-emu/dp_fmax.c @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) return ys ? x : y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y) else if (xs < ys) return x; - /* Compare exponent */ - if (xe > ye) - return x; - else if (xe < ye) - return y; + /* Signs of inputs are equal, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } else { + /* Inputs are both negative */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return y; + return x; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return y; - return x; + return x; + return y; } union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754dp_inf(xs & ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): @@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): @@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) return y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y) return y; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) return y; - return x; + else if (xm > ym) + return x; + else if (xs == 0) + return x; + return y; } diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c index c1072b0dfb9519dad2b5e0d8b79d2199012d1213..a287b23818d82ed0cf59f9066032619dea8f991a 100644 --- a/arch/mips/math-emu/dp_fmin.c +++ b/arch/mips/math-emu/dp_fmin.c @@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) return ys ? y : x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y) else if (xs < ys) return y; - /* Compare exponent */ - if (xe > ye) - return y; - else if (xe < ye) - return x; + /* Signs of inputs are the same, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } else { + /* Inputs are both negative */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return x; + return y; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return x; - return y; + return y; + return x; } union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) @@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754dp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754dp_inf(xs | ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): - return x; + return y; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): - return y; + return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754dp_zero(1); + return ieee754dp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; @@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y) return x; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) + return x; + else if (xm > ym) + return y; + else if (xs == 1) return x; return y; } diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c index caa62f20a888d120dc22ec187aa5693470b9ec22..e0d9be5fbf4cd541406b93c5d2add6814a82a9eb 100644 --- a/arch/mips/math-emu/dp_maddf.c +++ b/arch/mips/math-emu/dp_maddf.c @@ -14,22 +14,45 @@ #include "ieee754dp.h" -enum maddf_flags { - maddf_negate_product = 1 << 0, -}; + +/* 128 bits shift right logical with rounding. */ +void srl128(u64 *hptr, u64 *lptr, int count) +{ + u64 low; + + if (count >= 128) { + *lptr = *hptr != 0 || *lptr != 0; + *hptr = 0; + } else if (count >= 64) { + if (count == 64) { + *lptr = *hptr | (*lptr != 0); + } else { + low = *lptr; + *lptr = *hptr >> (count - 64); + *lptr |= (*hptr << (128 - count)) != 0 || low != 0; + } + *hptr = 0; + } else { + low = *lptr; + *lptr = low >> count | *hptr << (64 - count); + *lptr |= (low << (64 - count)) != 0; + *hptr = *hptr >> count; + } +} static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, union ieee754dp y, enum maddf_flags flags) { int re; int rs; - u64 rm; unsigned lxm; unsigned hxm; unsigned lym; unsigned hym; u64 lrm; u64 hrm; + u64 lzm; + u64 hzm; u64 t; u64 at; int s; @@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, ieee754_clearcx(); - switch (zc) { - case IEEE754_CLASS_SNAN: - ieee754_setcx(IEEE754_INVALID_OPERATION); + /* + * Handle the cases when at least one of x, y or z is a NaN. + * Order of precedence is sNaN, qNaN and z, x, y. + */ + if (zc == IEEE754_CLASS_SNAN) return ieee754dp_nanxcpt(z); - case IEEE754_CLASS_DNORM: - DPDNORMZ; - /* QNAN and ZERO cases are handled separately below */ - } - - switch (CLPAIR(xc, yc)) { - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): - return ieee754dp_nanxcpt(y); - - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): + if (xc == IEEE754_CLASS_SNAN) return ieee754dp_nanxcpt(x); - - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): + if (yc == IEEE754_CLASS_SNAN) + return ieee754dp_nanxcpt(y); + if (zc == IEEE754_CLASS_QNAN) + return z; + if (xc == IEEE754_CLASS_QNAN) + return x; + if (yc == IEEE754_CLASS_QNAN) return y; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): - return x; + if (zc == IEEE754_CLASS_DNORM) + DPDNORMZ; + /* ZERO z cases are handled separately below */ + switch (CLPAIR(xc, yc)) { /* * Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; ieee754_setcx(IEEE754_INVALID_OPERATION); return ieee754dp_indef(); @@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; - return ieee754dp_inf(xs ^ ys); + if ((zc == IEEE754_CLASS_INF) && + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { + /* + * Cases of addition of infinities with opposite signs + * or subtraction of infinities with same signs. + */ + ieee754_setcx(IEEE754_INVALID_OPERATION); + return ieee754dp_indef(); + } + /* + * z is here either not an infinity, or an infinity having the + * same sign as product (x*y) (in case of MADDF.D instruction) + * or product -(x*y) (in MSUBF.D case). The result must be an + * infinity, and its sign is determined only by the value of + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. + */ + if (flags & MADDF_NEGATE_PRODUCT) + return ieee754dp_inf(1 ^ (xs ^ ys)); + else + return ieee754dp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): @@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); - /* Multiplication is 0 so just return z */ + if (zc == IEEE754_CLASS_ZERO) { + /* Handle cases +0 + (-0) and similar ones. */ + if ((!(flags & MADDF_NEGATE_PRODUCT) + && (zs == (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) + && (zs != (xs ^ ys)))) + /* + * Cases of addition of zeros of equal signs + * or subtraction of zeroes of opposite signs. + * The sign of the resulting zero is in any + * such case determined only by the sign of z. + */ + return z; + + return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); + } + /* x*y is here 0, and z is not 0, so just return z */ return z; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754dp_inf(zs); /* fall through to real computations */ } @@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, re = xe + ye; rs = xs ^ ys; - if (flags & maddf_negate_product) + if (flags & MADDF_NEGATE_PRODUCT) rs ^= 1; /* shunt to top of word */ @@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, ym <<= 64 - (DP_FBITS + 1); /* - * Multiply 64 bits xm, ym to give high 64 bits rm with stickness. + * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm. */ /* 32 * 32 => 64 */ @@ -195,81 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x, hrm = hrm + (t >> 32); - rm = hrm | (lrm != 0); - - /* - * Sticky shift down to normal rounding precision. - */ - if ((s64) rm < 0) { - rm = (rm >> (64 - (DP_FBITS + 1 + 3))) | - ((rm << (DP_FBITS + 1 + 3)) != 0); + /* Put explicit bit at bit 126 if necessary */ + if ((int64_t)hrm < 0) { + lrm = (hrm << 63) | (lrm >> 1); + hrm = hrm >> 1; re++; - } else { - rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) | - ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); } - assert(rm & (DP_HIDDEN_BIT << 3)); - if (zc == IEEE754_CLASS_ZERO) - return ieee754dp_format(rs, re, rm); + assert(hrm & (1 << 62)); - /* And now the addition */ - assert(zm & DP_HIDDEN_BIT); + if (zc == IEEE754_CLASS_ZERO) { + /* + * Move explicit bit from bit 126 to bit 55 since the + * ieee754dp_format code expects the mantissa to be + * 56 bits wide (53 + 3 rounding bits). + */ + srl128(&hrm, &lrm, (126 - 55)); + return ieee754dp_format(rs, re, lrm); + } - /* - * Provide guard,round and stick bit space. - */ - zm <<= 3; + /* Move explicit bit from bit 52 to bit 126 */ + lzm = 0; + hzm = zm << 10; + assert(hzm & (1 << 62)); + /* Make the exponents the same */ if (ze > re) { /* * Have to shift y fraction right to align. */ s = ze - re; - rm = XDPSRS(rm, s); + srl128(&hrm, &lrm, s); re += s; } else if (re > ze) { /* * Have to shift x fraction right to align. */ s = re - ze; - zm = XDPSRS(zm, s); + srl128(&hzm, &lzm, s); ze += s; } assert(ze == re); assert(ze <= DP_EMAX); + /* Do the addition */ if (zs == rs) { /* - * Generate 28 bit result of adding two 27 bit numbers - * leaving result in xm, xs and xe. + * Generate 128 bit result by adding two 127 bit numbers + * leaving result in hzm:lzm, zs and ze. */ - zm = zm + rm; - - if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */ - zm = XDPSRS1(zm); + hzm = hzm + hrm + (lzm > (lzm + lrm)); + lzm = lzm + lrm; + if ((int64_t)hzm < 0) { /* carry out */ + srl128(&hzm, &lzm, 1); ze++; } } else { - if (zm >= rm) { - zm = zm - rm; + if (hzm > hrm || (hzm == hrm && lzm >= lrm)) { + hzm = hzm - hrm - (lzm < lrm); + lzm = lzm - lrm; } else { - zm = rm - zm; + hzm = hrm - hzm - (lrm < lzm); + lzm = lrm - lzm; zs = rs; } - if (zm == 0) + if (lzm == 0 && hzm == 0) return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD); /* - * Normalize to rounding precision. + * Put explicit bit at bit 126 if necessary. */ - while ((zm >> (DP_FBITS + 3)) == 0) { - zm <<= 1; - ze--; + if (hzm == 0) { + /* left shift by 63 or 64 bits */ + if ((int64_t)lzm < 0) { + /* MSB of lzm is the explicit bit */ + hzm = lzm >> 1; + lzm = lzm << 63; + ze -= 63; + } else { + hzm = lzm; + lzm = 0; + ze -= 64; + } + } + + t = 0; + while ((hzm >> (62 - t)) == 0) + t++; + + assert(t <= 62); + if (t) { + hzm = hzm << t | lzm >> (64 - t); + lzm = lzm << t; + ze -= t; } } - return ieee754dp_format(zs, ze, zm); + /* + * Move explicit bit from bit 126 to bit 55 since the + * ieee754dp_format code expects the mantissa to be + * 56 bits wide (53 + 3 rounding bits). + */ + srl128(&hzm, &lzm, (126 - 55)); + + return ieee754dp_format(zs, ze, lzm); } union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, @@ -281,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x, union ieee754dp y) { - return _dp_maddf(z, x, y, maddf_negate_product); + return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); } diff --git a/arch/mips/math-emu/dp_rint.c b/arch/mips/math-emu/dp_rint.c new file mode 100644 index 0000000000000000000000000000000000000000..c3b9077ff3575a64b9c123f79ec9afd5794815d3 --- /dev/null +++ b/arch/mips/math-emu/dp_rint.c @@ -0,0 +1,89 @@ +/* IEEE754 floating point arithmetic + * double precision: common utilities + */ +/* + * MIPS floating point support + * Copyright (C) 1994-2000 Algorithmics Ltd. + * Copyright (C) 2017 Imagination Technologies, Ltd. + * Author: Aleksandar Markovic + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + */ + +#include "ieee754dp.h" + +union ieee754dp ieee754dp_rint(union ieee754dp x) +{ + union ieee754dp ret; + u64 residue; + int sticky; + int round; + int odd; + + COMPXDP; + + ieee754_clearcx(); + + EXPLODEXDP; + FLUSHXDP; + + if (xc == IEEE754_CLASS_SNAN) + return ieee754dp_nanxcpt(x); + + if ((xc == IEEE754_CLASS_QNAN) || + (xc == IEEE754_CLASS_INF) || + (xc == IEEE754_CLASS_ZERO)) + return x; + + if (xe >= DP_FBITS) + return x; + + if (xe < -1) { + residue = xm; + round = 0; + sticky = residue != 0; + xm = 0; + } else { + residue = xm << (64 - DP_FBITS + xe); + round = (residue >> 63) != 0; + sticky = (residue << 1) != 0; + xm >>= DP_FBITS - xe; + } + + odd = (xm & 0x1) != 0x0; + + switch (ieee754_csr.rm) { + case FPU_CSR_RN: /* toward nearest */ + if (round && (sticky || odd)) + xm++; + break; + case FPU_CSR_RZ: /* toward zero */ + break; + case FPU_CSR_RU: /* toward +infinity */ + if ((round || sticky) && !xs) + xm++; + break; + case FPU_CSR_RD: /* toward -infinity */ + if ((round || sticky) && xs) + xm++; + break; + } + + if (round || sticky) + ieee754_setcx(IEEE754_INEXACT); + + ret = ieee754dp_flong(xm); + DPSIGN(ret) = xs; + + return ret; +} diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h index d3be351aed151305396723ddfff661837a114cde..92dc8fa565cb9fe5923cced32efb0e534b957d5a 100644 --- a/arch/mips/math-emu/ieee754.h +++ b/arch/mips/math-emu/ieee754.h @@ -67,6 +67,7 @@ union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fint(int x); union ieee754sp ieee754sp_flong(s64 x); union ieee754sp ieee754sp_fdp(union ieee754dp x); +union ieee754sp ieee754sp_rint(union ieee754sp x); int ieee754sp_tint(union ieee754sp x); s64 ieee754sp_tlong(union ieee754sp x); @@ -101,6 +102,7 @@ union ieee754dp ieee754dp_neg(union ieee754dp x); union ieee754dp ieee754dp_fint(int x); union ieee754dp ieee754dp_flong(s64 x); union ieee754dp ieee754dp_fsp(union ieee754sp x); +union ieee754dp ieee754dp_rint(union ieee754dp x); int ieee754dp_tint(union ieee754dp x); s64 ieee754dp_tlong(union ieee754dp x); diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 8bc2f6963324de776a3ab66ec33638e2b8598211..dd2071f430e0bd180dce8e79bbf00df77b7318dc 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -26,6 +26,10 @@ #define CLPAIR(x, y) ((x)*6+(y)) +enum maddf_flags { + MADDF_NEGATE_PRODUCT = 1 << 0, +}; + static inline void ieee754_clearcx(void) { ieee754_csr.cx = 0; diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h index 8476067075fe013331609ed3db1028b04b9d3e66..0f63e4202cffa214009b0b29c041690645159267 100644 --- a/arch/mips/math-emu/ieee754sp.h +++ b/arch/mips/math-emu/ieee754sp.h @@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x) return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; } +/* 64 bit right shift with rounding */ +#define XSPSRS64(v, rs) \ + (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0)) + /* 3bit extended single precision sticky right shift */ #define XSPSRS(v, rs) \ ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0)) diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c index be650ed7db596bb8b586c7b65bfdddce56f65b3b..8c0ec154aecc1061a2887bb6d993ed1ced446a99 100644 --- a/arch/mips/math-emu/me-debugfs.c +++ b/arch/mips/math-emu/me-debugfs.c @@ -28,14 +28,190 @@ static int fpuemu_stat_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n"); +/* + * Used to obtain names for a debugfs instruction counter, given field name + * in fpuemustats structure. For example, for input "cmp_sueq_d", the output + * would be "cmp.sueq.d". This is needed since dots are not allowed to be + * used in structure field names, and are, on the other hand, desired to be + * used in debugfs item names to be clearly associated to corresponding + * MIPS FPU instructions. + */ +static void adjust_instruction_counter_name(char *out_name, char *in_name) +{ + int i = 0; + + strcpy(out_name, in_name); + while (in_name[i] != '\0') { + if (out_name[i] == '_') + out_name[i] = '.'; + i++; + } +} + +static int fpuemustats_clear_show(struct seq_file *s, void *unused) +{ + __this_cpu_write((fpuemustats).emulated, 0); + __this_cpu_write((fpuemustats).loads, 0); + __this_cpu_write((fpuemustats).stores, 0); + __this_cpu_write((fpuemustats).branches, 0); + __this_cpu_write((fpuemustats).cp1ops, 0); + __this_cpu_write((fpuemustats).cp1xops, 0); + __this_cpu_write((fpuemustats).errors, 0); + __this_cpu_write((fpuemustats).ieee754_inexact, 0); + __this_cpu_write((fpuemustats).ieee754_underflow, 0); + __this_cpu_write((fpuemustats).ieee754_overflow, 0); + __this_cpu_write((fpuemustats).ieee754_zerodiv, 0); + __this_cpu_write((fpuemustats).ieee754_invalidop, 0); + __this_cpu_write((fpuemustats).ds_emul, 0); + + __this_cpu_write((fpuemustats).abs_s, 0); + __this_cpu_write((fpuemustats).abs_d, 0); + __this_cpu_write((fpuemustats).add_s, 0); + __this_cpu_write((fpuemustats).add_d, 0); + __this_cpu_write((fpuemustats).bc1eqz, 0); + __this_cpu_write((fpuemustats).bc1nez, 0); + __this_cpu_write((fpuemustats).ceil_w_s, 0); + __this_cpu_write((fpuemustats).ceil_w_d, 0); + __this_cpu_write((fpuemustats).ceil_l_s, 0); + __this_cpu_write((fpuemustats).ceil_l_d, 0); + __this_cpu_write((fpuemustats).class_s, 0); + __this_cpu_write((fpuemustats).class_d, 0); + __this_cpu_write((fpuemustats).cmp_af_s, 0); + __this_cpu_write((fpuemustats).cmp_af_d, 0); + __this_cpu_write((fpuemustats).cmp_eq_s, 0); + __this_cpu_write((fpuemustats).cmp_eq_d, 0); + __this_cpu_write((fpuemustats).cmp_le_s, 0); + __this_cpu_write((fpuemustats).cmp_le_d, 0); + __this_cpu_write((fpuemustats).cmp_lt_s, 0); + __this_cpu_write((fpuemustats).cmp_lt_d, 0); + __this_cpu_write((fpuemustats).cmp_ne_s, 0); + __this_cpu_write((fpuemustats).cmp_ne_d, 0); + __this_cpu_write((fpuemustats).cmp_or_s, 0); + __this_cpu_write((fpuemustats).cmp_or_d, 0); + __this_cpu_write((fpuemustats).cmp_ueq_s, 0); + __this_cpu_write((fpuemustats).cmp_ueq_d, 0); + __this_cpu_write((fpuemustats).cmp_ule_s, 0); + __this_cpu_write((fpuemustats).cmp_ule_d, 0); + __this_cpu_write((fpuemustats).cmp_ult_s, 0); + __this_cpu_write((fpuemustats).cmp_ult_d, 0); + __this_cpu_write((fpuemustats).cmp_un_s, 0); + __this_cpu_write((fpuemustats).cmp_un_d, 0); + __this_cpu_write((fpuemustats).cmp_une_s, 0); + __this_cpu_write((fpuemustats).cmp_une_d, 0); + __this_cpu_write((fpuemustats).cmp_saf_s, 0); + __this_cpu_write((fpuemustats).cmp_saf_d, 0); + __this_cpu_write((fpuemustats).cmp_seq_s, 0); + __this_cpu_write((fpuemustats).cmp_seq_d, 0); + __this_cpu_write((fpuemustats).cmp_sle_s, 0); + __this_cpu_write((fpuemustats).cmp_sle_d, 0); + __this_cpu_write((fpuemustats).cmp_slt_s, 0); + __this_cpu_write((fpuemustats).cmp_slt_d, 0); + __this_cpu_write((fpuemustats).cmp_sne_s, 0); + __this_cpu_write((fpuemustats).cmp_sne_d, 0); + __this_cpu_write((fpuemustats).cmp_sor_s, 0); + __this_cpu_write((fpuemustats).cmp_sor_d, 0); + __this_cpu_write((fpuemustats).cmp_sueq_s, 0); + __this_cpu_write((fpuemustats).cmp_sueq_d, 0); + __this_cpu_write((fpuemustats).cmp_sule_s, 0); + __this_cpu_write((fpuemustats).cmp_sule_d, 0); + __this_cpu_write((fpuemustats).cmp_sult_s, 0); + __this_cpu_write((fpuemustats).cmp_sult_d, 0); + __this_cpu_write((fpuemustats).cmp_sun_s, 0); + __this_cpu_write((fpuemustats).cmp_sun_d, 0); + __this_cpu_write((fpuemustats).cmp_sune_s, 0); + __this_cpu_write((fpuemustats).cmp_sune_d, 0); + __this_cpu_write((fpuemustats).cvt_d_l, 0); + __this_cpu_write((fpuemustats).cvt_d_s, 0); + __this_cpu_write((fpuemustats).cvt_d_w, 0); + __this_cpu_write((fpuemustats).cvt_l_s, 0); + __this_cpu_write((fpuemustats).cvt_l_d, 0); + __this_cpu_write((fpuemustats).cvt_s_d, 0); + __this_cpu_write((fpuemustats).cvt_s_l, 0); + __this_cpu_write((fpuemustats).cvt_s_w, 0); + __this_cpu_write((fpuemustats).cvt_w_s, 0); + __this_cpu_write((fpuemustats).cvt_w_d, 0); + __this_cpu_write((fpuemustats).div_s, 0); + __this_cpu_write((fpuemustats).div_d, 0); + __this_cpu_write((fpuemustats).floor_w_s, 0); + __this_cpu_write((fpuemustats).floor_w_d, 0); + __this_cpu_write((fpuemustats).floor_l_s, 0); + __this_cpu_write((fpuemustats).floor_l_d, 0); + __this_cpu_write((fpuemustats).maddf_s, 0); + __this_cpu_write((fpuemustats).maddf_d, 0); + __this_cpu_write((fpuemustats).max_s, 0); + __this_cpu_write((fpuemustats).max_d, 0); + __this_cpu_write((fpuemustats).maxa_s, 0); + __this_cpu_write((fpuemustats).maxa_d, 0); + __this_cpu_write((fpuemustats).min_s, 0); + __this_cpu_write((fpuemustats).min_d, 0); + __this_cpu_write((fpuemustats).mina_s, 0); + __this_cpu_write((fpuemustats).mina_d, 0); + __this_cpu_write((fpuemustats).mov_s, 0); + __this_cpu_write((fpuemustats).mov_d, 0); + __this_cpu_write((fpuemustats).msubf_s, 0); + __this_cpu_write((fpuemustats).msubf_d, 0); + __this_cpu_write((fpuemustats).mul_s, 0); + __this_cpu_write((fpuemustats).mul_d, 0); + __this_cpu_write((fpuemustats).neg_s, 0); + __this_cpu_write((fpuemustats).neg_d, 0); + __this_cpu_write((fpuemustats).recip_s, 0); + __this_cpu_write((fpuemustats).recip_d, 0); + __this_cpu_write((fpuemustats).rint_s, 0); + __this_cpu_write((fpuemustats).rint_d, 0); + __this_cpu_write((fpuemustats).round_w_s, 0); + __this_cpu_write((fpuemustats).round_w_d, 0); + __this_cpu_write((fpuemustats).round_l_s, 0); + __this_cpu_write((fpuemustats).round_l_d, 0); + __this_cpu_write((fpuemustats).rsqrt_s, 0); + __this_cpu_write((fpuemustats).rsqrt_d, 0); + __this_cpu_write((fpuemustats).sel_s, 0); + __this_cpu_write((fpuemustats).sel_d, 0); + __this_cpu_write((fpuemustats).seleqz_s, 0); + __this_cpu_write((fpuemustats).seleqz_d, 0); + __this_cpu_write((fpuemustats).selnez_s, 0); + __this_cpu_write((fpuemustats).selnez_d, 0); + __this_cpu_write((fpuemustats).sqrt_s, 0); + __this_cpu_write((fpuemustats).sqrt_d, 0); + __this_cpu_write((fpuemustats).sub_s, 0); + __this_cpu_write((fpuemustats).sub_d, 0); + __this_cpu_write((fpuemustats).trunc_w_s, 0); + __this_cpu_write((fpuemustats).trunc_w_d, 0); + __this_cpu_write((fpuemustats).trunc_l_s, 0); + __this_cpu_write((fpuemustats).trunc_l_d, 0); + + return 0; +} + +static int fpuemustats_clear_open(struct inode *inode, struct file *file) +{ + return single_open(file, fpuemustats_clear_show, inode->i_private); +} + +static const struct file_operations fpuemustats_clear_fops = { + .open = fpuemustats_clear_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int __init debugfs_fpuemu(void) { - struct dentry *d, *dir; + struct dentry *fpuemu_debugfs_base_dir; + struct dentry *fpuemu_debugfs_inst_dir; + struct dentry *d, *reset_file; if (!mips_debugfs_dir) return -ENODEV; - dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); - if (!dir) + + fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats", + mips_debugfs_dir); + if (!fpuemu_debugfs_base_dir) + return -ENOMEM; + + reset_file = debugfs_create_file("fpuemustats_clear", 0444, + mips_debugfs_dir, NULL, + &fpuemustats_clear_fops); + if (!reset_file) return -ENOMEM; #define FPU_EMU_STAT_OFFSET(m) \ @@ -43,7 +219,7 @@ static int __init debugfs_fpuemu(void) #define FPU_STAT_CREATE(m) \ do { \ - d = debugfs_create_file(#m , S_IRUGO, dir, \ + d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ if (!d) \ @@ -53,6 +229,7 @@ do { \ FPU_STAT_CREATE(emulated); FPU_STAT_CREATE(loads); FPU_STAT_CREATE(stores); + FPU_STAT_CREATE(branches); FPU_STAT_CREATE(cp1ops); FPU_STAT_CREATE(cp1xops); FPU_STAT_CREATE(errors); @@ -63,6 +240,139 @@ do { \ FPU_STAT_CREATE(ieee754_invalidop); FPU_STAT_CREATE(ds_emul); + fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions", + fpuemu_debugfs_base_dir); + if (!fpuemu_debugfs_inst_dir) + return -ENOMEM; + +#define FPU_STAT_CREATE_EX(m) \ +do { \ + char name[32]; \ + \ + adjust_instruction_counter_name(name, #m); \ + \ + d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ + (void *)FPU_EMU_STAT_OFFSET(m), \ + &fops_fpuemu_stat); \ + if (!d) \ + return -ENOMEM; \ +} while (0) + + FPU_STAT_CREATE_EX(abs_s); + FPU_STAT_CREATE_EX(abs_d); + FPU_STAT_CREATE_EX(add_s); + FPU_STAT_CREATE_EX(add_d); + FPU_STAT_CREATE_EX(bc1eqz); + FPU_STAT_CREATE_EX(bc1nez); + FPU_STAT_CREATE_EX(ceil_w_s); + FPU_STAT_CREATE_EX(ceil_w_d); + FPU_STAT_CREATE_EX(ceil_l_s); + FPU_STAT_CREATE_EX(ceil_l_d); + FPU_STAT_CREATE_EX(class_s); + FPU_STAT_CREATE_EX(class_d); + FPU_STAT_CREATE_EX(cmp_af_s); + FPU_STAT_CREATE_EX(cmp_af_d); + FPU_STAT_CREATE_EX(cmp_eq_s); + FPU_STAT_CREATE_EX(cmp_eq_d); + FPU_STAT_CREATE_EX(cmp_le_s); + FPU_STAT_CREATE_EX(cmp_le_d); + FPU_STAT_CREATE_EX(cmp_lt_s); + FPU_STAT_CREATE_EX(cmp_lt_d); + FPU_STAT_CREATE_EX(cmp_ne_s); + FPU_STAT_CREATE_EX(cmp_ne_d); + FPU_STAT_CREATE_EX(cmp_or_s); + FPU_STAT_CREATE_EX(cmp_or_d); + FPU_STAT_CREATE_EX(cmp_ueq_s); + FPU_STAT_CREATE_EX(cmp_ueq_d); + FPU_STAT_CREATE_EX(cmp_ule_s); + FPU_STAT_CREATE_EX(cmp_ule_d); + FPU_STAT_CREATE_EX(cmp_ult_s); + FPU_STAT_CREATE_EX(cmp_ult_d); + FPU_STAT_CREATE_EX(cmp_un_s); + FPU_STAT_CREATE_EX(cmp_un_d); + FPU_STAT_CREATE_EX(cmp_une_s); + FPU_STAT_CREATE_EX(cmp_une_d); + FPU_STAT_CREATE_EX(cmp_saf_s); + FPU_STAT_CREATE_EX(cmp_saf_d); + FPU_STAT_CREATE_EX(cmp_seq_s); + FPU_STAT_CREATE_EX(cmp_seq_d); + FPU_STAT_CREATE_EX(cmp_sle_s); + FPU_STAT_CREATE_EX(cmp_sle_d); + FPU_STAT_CREATE_EX(cmp_slt_s); + FPU_STAT_CREATE_EX(cmp_slt_d); + FPU_STAT_CREATE_EX(cmp_sne_s); + FPU_STAT_CREATE_EX(cmp_sne_d); + FPU_STAT_CREATE_EX(cmp_sor_s); + FPU_STAT_CREATE_EX(cmp_sor_d); + FPU_STAT_CREATE_EX(cmp_sueq_s); + FPU_STAT_CREATE_EX(cmp_sueq_d); + FPU_STAT_CREATE_EX(cmp_sule_s); + FPU_STAT_CREATE_EX(cmp_sule_d); + FPU_STAT_CREATE_EX(cmp_sult_s); + FPU_STAT_CREATE_EX(cmp_sult_d); + FPU_STAT_CREATE_EX(cmp_sun_s); + FPU_STAT_CREATE_EX(cmp_sun_d); + FPU_STAT_CREATE_EX(cmp_sune_s); + FPU_STAT_CREATE_EX(cmp_sune_d); + FPU_STAT_CREATE_EX(cvt_d_l); + FPU_STAT_CREATE_EX(cvt_d_s); + FPU_STAT_CREATE_EX(cvt_d_w); + FPU_STAT_CREATE_EX(cvt_l_s); + FPU_STAT_CREATE_EX(cvt_l_d); + FPU_STAT_CREATE_EX(cvt_s_d); + FPU_STAT_CREATE_EX(cvt_s_l); + FPU_STAT_CREATE_EX(cvt_s_w); + FPU_STAT_CREATE_EX(cvt_w_s); + FPU_STAT_CREATE_EX(cvt_w_d); + FPU_STAT_CREATE_EX(div_s); + FPU_STAT_CREATE_EX(div_d); + FPU_STAT_CREATE_EX(floor_w_s); + FPU_STAT_CREATE_EX(floor_w_d); + FPU_STAT_CREATE_EX(floor_l_s); + FPU_STAT_CREATE_EX(floor_l_d); + FPU_STAT_CREATE_EX(maddf_s); + FPU_STAT_CREATE_EX(maddf_d); + FPU_STAT_CREATE_EX(max_s); + FPU_STAT_CREATE_EX(max_d); + FPU_STAT_CREATE_EX(maxa_s); + FPU_STAT_CREATE_EX(maxa_d); + FPU_STAT_CREATE_EX(min_s); + FPU_STAT_CREATE_EX(min_d); + FPU_STAT_CREATE_EX(mina_s); + FPU_STAT_CREATE_EX(mina_d); + FPU_STAT_CREATE_EX(mov_s); + FPU_STAT_CREATE_EX(mov_d); + FPU_STAT_CREATE_EX(msubf_s); + FPU_STAT_CREATE_EX(msubf_d); + FPU_STAT_CREATE_EX(mul_s); + FPU_STAT_CREATE_EX(mul_d); + FPU_STAT_CREATE_EX(neg_s); + FPU_STAT_CREATE_EX(neg_d); + FPU_STAT_CREATE_EX(recip_s); + FPU_STAT_CREATE_EX(recip_d); + FPU_STAT_CREATE_EX(rint_s); + FPU_STAT_CREATE_EX(rint_d); + FPU_STAT_CREATE_EX(round_w_s); + FPU_STAT_CREATE_EX(round_w_d); + FPU_STAT_CREATE_EX(round_l_s); + FPU_STAT_CREATE_EX(round_l_d); + FPU_STAT_CREATE_EX(rsqrt_s); + FPU_STAT_CREATE_EX(rsqrt_d); + FPU_STAT_CREATE_EX(sel_s); + FPU_STAT_CREATE_EX(sel_d); + FPU_STAT_CREATE_EX(seleqz_s); + FPU_STAT_CREATE_EX(seleqz_d); + FPU_STAT_CREATE_EX(selnez_s); + FPU_STAT_CREATE_EX(selnez_d); + FPU_STAT_CREATE_EX(sqrt_s); + FPU_STAT_CREATE_EX(sqrt_d); + FPU_STAT_CREATE_EX(sub_s); + FPU_STAT_CREATE_EX(sub_d); + FPU_STAT_CREATE_EX(trunc_w_s); + FPU_STAT_CREATE_EX(trunc_w_d); + FPU_STAT_CREATE_EX(trunc_l_s); + FPU_STAT_CREATE_EX(trunc_l_d); + return 0; } arch_initcall(debugfs_fpuemu); diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c index 4d000844e48e3d1f7560a3e9dead69ffd9743a2c..74a5a00d2f22b87917cae25c02f9b4f568569dd0 100644 --- a/arch/mips/math-emu/sp_fmax.c +++ b/arch/mips/math-emu/sp_fmax.c @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) return ys ? x : y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y) else if (xs < ys) return x; - /* Compare exponent */ - if (xe > ye) - return x; - else if (xe < ye) - return y; + /* Signs of inputs are equal, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } else { + /* Inputs are both negative */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return y; + return x; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return y; - return x; + return x; + return y; } union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754sp_inf(xs & ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): @@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): @@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) return y; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs & ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y) return y; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) return y; - return x; + else if (xm > ym) + return x; + else if (xs == 0) + return x; + return y; } diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c index 4eb1bb9e9dec7b45e36355f2bd2eac7da9990810..c51385f46b09968611764d26db082cbf680e8744 100644 --- a/arch/mips/math-emu/sp_fmin.c +++ b/arch/mips/math-emu/sp_fmin.c @@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) return ys ? y : x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y) else if (xs < ys) return y; - /* Compare exponent */ - if (xe > ye) - return y; - else if (xe < ye) - return x; + /* Signs of inputs are the same, let's compare exponents */ + if (xs == 0) { + /* Inputs are both positive */ + if (xe > ye) + return y; + else if (xe < ye) + return x; + } else { + /* Inputs are both negative */ + if (xe > ye) + return x; + else if (xe < ye) + return y; + } - /* Compare mantissa */ + /* Signs and exponents of inputs are equal, let's compare mantissas */ + if (xs == 0) { + /* Inputs are both positive, with equal signs and exponents */ + if (xm <= ym) + return x; + return y; + } + /* Inputs are both negative, with equal signs and exponents */ if (xm <= ym) - return x; - return y; + return y; + return x; } union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) @@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): return ieee754sp_nanxcpt(x); - /* numbers are preferred to NaNs */ + /* + * Quiet NaN handling + */ + + /* + * The case of both inputs quiet NaNs + */ + case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): + return x; + + /* + * The cases of exactly one input quiet NaN (numbers + * are here preferred as returned values to NaNs) + */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return x; - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): @@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) /* * Infinity and zero handling */ + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): + return ieee754sp_inf(xs | ys); + case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): - return x; + return y; - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): - return y; + return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): - if (xs == ys) - return x; - return ieee754sp_zero(1); + return ieee754sp_zero(xs | ys); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; @@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y) return x; /* Compare mantissa */ - if (xm <= ym) + if (xm < ym) + return x; + else if (xm > ym) + return y; + else if (xs == 1) return x; return y; } diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c index c91d5e5d9b5fa8293312e074350d10add7648a85..7195fe785d81a8f0a2dda6c2139b95175321803c 100644 --- a/arch/mips/math-emu/sp_maddf.c +++ b/arch/mips/math-emu/sp_maddf.c @@ -14,9 +14,6 @@ #include "ieee754sp.h" -enum maddf_flags { - maddf_negate_product = 1 << 0, -}; static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, union ieee754sp y, enum maddf_flags flags) @@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, int re; int rs; unsigned rm; - unsigned short lxm; - unsigned short hxm; - unsigned short lym; - unsigned short hym; - unsigned lrm; - unsigned hrm; - unsigned t; - unsigned at; + uint64_t rm64; + uint64_t zm64; int s; COMPXSP; @@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, ieee754_clearcx(); - switch (zc) { - case IEEE754_CLASS_SNAN: - ieee754_setcx(IEEE754_INVALID_OPERATION); + /* + * Handle the cases when at least one of x, y or z is a NaN. + * Order of precedence is sNaN, qNaN and z, x, y. + */ + if (zc == IEEE754_CLASS_SNAN) return ieee754sp_nanxcpt(z); - case IEEE754_CLASS_DNORM: - SPDNORMZ; - /* QNAN and ZERO cases are handled separately below */ - } - - switch (CLPAIR(xc, yc)) { - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): + if (xc == IEEE754_CLASS_SNAN) + return ieee754sp_nanxcpt(x); + if (yc == IEEE754_CLASS_SNAN) return ieee754sp_nanxcpt(y); + if (zc == IEEE754_CLASS_QNAN) + return z; + if (xc == IEEE754_CLASS_QNAN) + return x; + if (yc == IEEE754_CLASS_QNAN) + return y; - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): - return ieee754sp_nanxcpt(x); + if (zc == IEEE754_CLASS_DNORM) + SPDNORMZ; + /* ZERO z cases are handled separately below */ - case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): - return y; + switch (CLPAIR(xc, yc)) { - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): - case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): - return x; /* * Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; ieee754_setcx(IEEE754_INVALID_OPERATION); return ieee754sp_indef(); @@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): - if (zc == IEEE754_CLASS_QNAN) - return z; - return ieee754sp_inf(xs ^ ys); + if ((zc == IEEE754_CLASS_INF) && + ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) { + /* + * Cases of addition of infinities with opposite signs + * or subtraction of infinities with same signs. + */ + ieee754_setcx(IEEE754_INVALID_OPERATION); + return ieee754sp_indef(); + } + /* + * z is here either not an infinity, or an infinity having the + * same sign as product (x*y) (in case of MADDF.D instruction) + * or product -(x*y) (in MSUBF.D case). The result must be an + * infinity, and its sign is determined only by the value of + * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y. + */ + if (flags & MADDF_NEGATE_PRODUCT) + return ieee754sp_inf(1 ^ (xs ^ ys)); + else + return ieee754sp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): @@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); - /* Multiplication is 0 so just return z */ + if (zc == IEEE754_CLASS_ZERO) { + /* Handle cases +0 + (-0) and similar ones. */ + if ((!(flags & MADDF_NEGATE_PRODUCT) + && (zs == (xs ^ ys))) || + ((flags & MADDF_NEGATE_PRODUCT) + && (zs != (xs ^ ys)))) + /* + * Cases of addition of zeros of equal signs + * or subtraction of zeroes of opposite signs. + * The sign of the resulting zero is in any + * such case determined only by the sign of z. + */ + return z; + + return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); + } + /* x*y is here 0, and z is not 0, so just return z */ return z; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); SPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); SPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): - if (zc == IEEE754_CLASS_QNAN) - return z; - else if (zc == IEEE754_CLASS_INF) + if (zc == IEEE754_CLASS_INF) return ieee754sp_inf(zs); /* fall through to real computations */ } @@ -158,111 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x, re = xe + ye; rs = xs ^ ys; - if (flags & maddf_negate_product) + if (flags & MADDF_NEGATE_PRODUCT) rs ^= 1; - /* shunt to top of word */ - xm <<= 32 - (SP_FBITS + 1); - ym <<= 32 - (SP_FBITS + 1); - - /* - * Multiply 32 bits xm, ym to give high 32 bits rm with stickness. - */ - lxm = xm & 0xffff; - hxm = xm >> 16; - lym = ym & 0xffff; - hym = ym >> 16; - - lrm = lxm * lym; /* 16 * 16 => 32 */ - hrm = hxm * hym; /* 16 * 16 => 32 */ - - t = lxm * hym; /* 16 * 16 => 32 */ - at = lrm + (t << 16); - hrm += at < lrm; - lrm = at; - hrm = hrm + (t >> 16); + /* Multiple 24 bit xm and ym to give 48 bit results */ + rm64 = (uint64_t)xm * ym; - t = hxm * lym; /* 16 * 16 => 32 */ - at = lrm + (t << 16); - hrm += at < lrm; - lrm = at; - hrm = hrm + (t >> 16); + /* Shunt to top of word */ + rm64 = rm64 << 16; - rm = hrm | (lrm != 0); - - /* - * Sticky shift down to normal rounding precision. - */ - if ((int) rm < 0) { - rm = (rm >> (32 - (SP_FBITS + 1 + 3))) | - ((rm << (SP_FBITS + 1 + 3)) != 0); + /* Put explicit bit at bit 62 if necessary */ + if ((int64_t) rm64 < 0) { + rm64 = rm64 >> 1; re++; - } else { - rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) | - ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); } - assert(rm & (SP_HIDDEN_BIT << 3)); - - if (zc == IEEE754_CLASS_ZERO) - return ieee754sp_format(rs, re, rm); - /* And now the addition */ + assert(rm64 & (1 << 62)); - assert(zm & SP_HIDDEN_BIT); + if (zc == IEEE754_CLASS_ZERO) { + /* + * Move explicit bit from bit 62 to bit 26 since the + * ieee754sp_format code expects the mantissa to be + * 27 bits wide (24 + 3 rounding bits). + */ + rm = XSPSRS64(rm64, (62 - 26)); + return ieee754sp_format(rs, re, rm); + } - /* - * Provide guard,round and stick bit space. - */ - zm <<= 3; + /* Move explicit bit from bit 23 to bit 62 */ + zm64 = (uint64_t)zm << (62 - 23); + assert(zm64 & (1 << 62)); + /* Make the exponents the same */ if (ze > re) { /* * Have to shift r fraction right to align. */ s = ze - re; - rm = XSPSRS(rm, s); + rm64 = XSPSRS64(rm64, s); re += s; } else if (re > ze) { /* * Have to shift z fraction right to align. */ s = re - ze; - zm = XSPSRS(zm, s); + zm64 = XSPSRS64(zm64, s); ze += s; } assert(ze == re); assert(ze <= SP_EMAX); + /* Do the addition */ if (zs == rs) { /* - * Generate 28 bit result of adding two 27 bit numbers - * leaving result in zm, zs and ze. + * Generate 64 bit result by adding two 63 bit numbers + * leaving result in zm64, zs and ze. */ - zm = zm + rm; - - if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */ - zm = XSPSRS1(zm); + zm64 = zm64 + rm64; + if ((int64_t)zm64 < 0) { /* carry out */ + zm64 = XSPSRS1(zm64); ze++; } } else { - if (zm >= rm) { - zm = zm - rm; + if (zm64 >= rm64) { + zm64 = zm64 - rm64; } else { - zm = rm - zm; + zm64 = rm64 - zm64; zs = rs; } - if (zm == 0) + if (zm64 == 0) return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD); /* - * Normalize in extended single precision + * Put explicit bit at bit 62 if necessary. */ - while ((zm >> (SP_MBITS + 3)) == 0) { - zm <<= 1; + while ((zm64 >> 62) == 0) { + zm64 <<= 1; ze--; } - } + + /* + * Move explicit bit from bit 62 to bit 26 since the + * ieee754sp_format code expects the mantissa to be + * 27 bits wide (24 + 3 rounding bits). + */ + zm = XSPSRS64(zm64, (62 - 26)); + return ieee754sp_format(zs, ze, zm); } @@ -275,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x, union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x, union ieee754sp y) { - return _sp_maddf(z, x, y, maddf_negate_product); + return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT); } diff --git a/arch/mips/math-emu/sp_rint.c b/arch/mips/math-emu/sp_rint.c new file mode 100644 index 0000000000000000000000000000000000000000..70765b17e1962f39e098dd9b7731ee2cf038b030 --- /dev/null +++ b/arch/mips/math-emu/sp_rint.c @@ -0,0 +1,90 @@ +/* IEEE754 floating point arithmetic + * single precision + */ +/* + * MIPS floating point support + * Copyright (C) 1994-2000 Algorithmics Ltd. + * Copyright (C) 2017 Imagination Technologies, Ltd. + * Author: Aleksandar Markovic + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. + */ + +#include "ieee754sp.h" + +union ieee754sp ieee754sp_rint(union ieee754sp x) +{ + union ieee754sp ret; + u32 residue; + int sticky; + int round; + int odd; + + COMPXDP; /* <-- DP needed for 64-bit mantissa tmp */ + + ieee754_clearcx(); + + EXPLODEXSP; + FLUSHXSP; + + if (xc == IEEE754_CLASS_SNAN) + return ieee754sp_nanxcpt(x); + + if ((xc == IEEE754_CLASS_QNAN) || + (xc == IEEE754_CLASS_INF) || + (xc == IEEE754_CLASS_ZERO)) + return x; + + if (xe >= SP_FBITS) + return x; + + if (xe < -1) { + residue = xm; + round = 0; + sticky = residue != 0; + xm = 0; + } else { + residue = xm << (xe + 1); + residue <<= 31 - SP_FBITS; + round = (residue >> 31) != 0; + sticky = (residue << 1) != 0; + xm >>= SP_FBITS - xe; + } + + odd = (xm & 0x1) != 0x0; + + switch (ieee754_csr.rm) { + case FPU_CSR_RN: /* toward nearest */ + if (round && (sticky || odd)) + xm++; + break; + case FPU_CSR_RZ: /* toward zero */ + break; + case FPU_CSR_RU: /* toward +infinity */ + if ((round || sticky) && !xs) + xm++; + break; + case FPU_CSR_RD: /* toward -infinity */ + if ((round || sticky) && xs) + xm++; + break; + } + + if (round || sticky) + ieee754_setcx(IEEE754_INEXACT); + + ret = ieee754sp_flong(xm); + SPSIGN(ret) = xs; + + return ret; +} diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 81d6a15c93d08ba2603d0e9f2dd8302546a97489..6f534b2099717da8c2d7be70bfa035a05ed5aede 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -37,7 +37,7 @@ #include /* for run_uncached() */ #include #include -#include +#include /* * Bits describing what cache ops an SMP callback function may perform. diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 899e46279902819dcaee11e427411ee11edca9d8..44ac64d5182761d547f5b14a29cd4c9b047d5129 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -20,6 +20,7 @@ #include #include #include +#include /* Cache operations. */ void (*flush_cache_all)(void); @@ -44,7 +45,6 @@ void (*__flush_cache_vunmap)(void); void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); -void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size); /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 8e78251eccc25e0a981ede0b89d1df907f9e896a..c01bd20d020810901b9712dc652f283d37d976b3 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -127,23 +127,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) return gfp | dma_flag; } -static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp) -{ - void *ret; - - gfp = massage_gfp_flags(dev, gfp); - - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = plat_map_dma_mem(dev, ret, size); - } - - return ret; -} - static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -151,13 +134,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - /* - * XXX: seems like the coherent and non-coherent implementations could - * be consolidated. - */ - if (attrs & DMA_ATTR_NON_CONSISTENT) - return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); - gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) @@ -172,7 +148,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, ret = page_address(page); memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); - if (!plat_device_is_coherent(dev)) { + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && + !plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } @@ -180,14 +157,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, return ret; } - -static void mips_dma_free_noncoherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - free_pages((unsigned long) vaddr, get_order(size)); -} - static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { @@ -195,14 +164,9 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (attrs & DMA_ATTR_NON_CONSISTENT) { - mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); - return; - } - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - if (!plat_device_is_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); page = virt_to_page((void *) addr); @@ -409,12 +373,12 @@ static void mips_dma_sync_sg_for_device(struct device *dev, } } -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } -int mips_dma_supported(struct device *dev, u64 mask) +static int mips_dma_supported(struct device *dev, u64 mask) { return plat_dma_supported(dev, mask); } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 8ce2983a701526a5f1bdfd243ad61d8b80375b1d..5f6ea7d746de659730598e253aae97dc9234ad6e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 28adeabe851ff22dbea2da9d701208dadcc4925d..33d3251ecd37a257c2cb435a973e612738e0a216 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -7,6 +7,7 @@ * written by Ralf Baechle */ #include +#include #include #include #include diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index c909c334272945c1c52d812462e78a74c2f1824b..acfb89273dad7d9eaa1529f07e94b3b439733309 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include /* * MIPS32/MIPS64 L2 cache handling @@ -63,34 +63,25 @@ static void mips_sc_prefetch_enable(void) * prefetching for both code & data, for all ports. */ pftctl = read_gcr_l2_pft_control(); - if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { - pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; - pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; - pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT) { + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK; + pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK; + pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN; write_gcr_l2_pft_control(pftctl); - pftctl = read_gcr_l2_pft_control_b(); - pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; - pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; - write_gcr_l2_pft_control_b(pftctl); + set_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID | + CM_GCR_L2_PFT_CONTROL_B_CEN); } } static void mips_sc_prefetch_disable(void) { - unsigned long pftctl; - if (mips_cm_revision() < CM_REV_CM2_5) return; - pftctl = read_gcr_l2_pft_control(); - pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; - write_gcr_l2_pft_control(pftctl); - - pftctl = read_gcr_l2_pft_control_b(); - pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; - pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; - write_gcr_l2_pft_control_b(pftctl); + clear_gcr_l2_pft_control(CM_GCR_L2_PFT_CONTROL_PFTEN); + clear_gcr_l2_pft_control_b(CM_GCR_L2_PFT_CONTROL_B_PORTID | + CM_GCR_L2_PFT_CONTROL_B_CEN); } static bool mips_sc_prefetch_is_enabled(void) @@ -101,9 +92,9 @@ static bool mips_sc_prefetch_is_enabled(void) return false; pftctl = read_gcr_l2_pft_control(); - if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) + if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT)) return false; - return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); + return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN); } static struct bcache_ops mips_sc_ops = { @@ -160,21 +151,21 @@ static int __init mips_sc_probe_cm3(void) unsigned long cfg = read_gcr_l2_config(); unsigned long sets, line_sz, assoc; - if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK) + if (cfg & CM_GCR_L2_CONFIG_BYPASS) return 0; - sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; - sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; + sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE; + sets >>= __ffs(CM_GCR_L2_CONFIG_SET_SIZE); if (sets) c->scache.sets = 64 << sets; - line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; - line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; + line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE; + line_sz >>= __ffs(CM_GCR_L2_CONFIG_LINE_SIZE); if (line_sz) c->scache.linesz = 2 << line_sz; - assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; - assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; + assoc = cfg & CM_GCR_L2_CONFIG_ASSOC; + assoc >>= __ffs(CM_GCR_L2_CONFIG_ASSOC); c->scache.ways = assoc + 1; c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waybit = __ffs(c->scache.waysize); diff --git a/arch/mips/mm/tlbex-fault.S b/arch/mips/mm/tlbex-fault.S index 318855eb5f80309d3e596d33c8130bdbced61bf7..77db401fc62073a119bf79c10da908a07c08c4df 100644 --- a/arch/mips/mm/tlbex-fault.S +++ b/arch/mips/mm/tlbex-fault.S @@ -12,14 +12,15 @@ .macro tlb_do_page_fault, write NESTED(tlb_do_page_fault_\write, PT_SIZE, sp) - SAVE_ALL + .cfi_signal_frame + SAVE_ALL docfi=1 MFC0 a2, CP0_BADVADDR KMODE move a0, sp REG_S a2, PT_BVADDR(sp) li a1, \write - PTR_LA ra, ret_from_exception - j do_page_fault + jal do_page_fault + j ret_from_exception END(tlb_do_page_fault_\write) .endm diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5aadc69c8ce39d8b894393e5d12e45fec934100c..79b9f2ad3ff514e0e48ab409d6465540a4fe097d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2634,11 +2634,6 @@ void build_tlb_refill_handler(void) #endif break; - case CPU_R6000: - case CPU_R6000A: - panic("No R6000 TLB refill handler yet"); - break; - case CPU_R8000: panic("No R8000 TLB refill handler yet"); break; diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c index c398582c316fcc9a8b01f567052ac4bb2ef18c26..a6699c15277d9b802536abba8a2d27288f6e0861 100644 --- a/arch/mips/mti-malta/malta-dtshim.c +++ b/arch/mips/mti-malta/malta-dtshim.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #define ROCIT_REG_BASE 0x1f403000 @@ -236,7 +236,7 @@ static void __init remove_gic(void *fdt) /* if we have a CM which reports a GIC is present, leave the DT alone */ err = mips_cm_probe(); - if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_GICEX_MSK)) + if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) return; if (malta_scon() == MIPS_REVISION_SCON_ROCIT) { diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 0f3b881a3190fdcb993ea01234a2c304b1511f18..009f2918b3201f1390cc163f2d8b61b44d1160cb 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -21,8 +21,7 @@ #include #include #include -#include -#include +#include #include #include diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index b0f9b188e83359d18073e08e5ec3dd3105155d51..a840e0c1642cb269425d4e2995cb1715312c456f 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -29,9 +28,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -215,7 +214,7 @@ void __init arch_init_irq(void) msc_nr_irqs); } - if (gic_present) { + if (mips_gic_present()) { corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI; } else if (cpu_has_veic) { set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch); diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index a01d5debfcaf5578a30b6114721db82a1c392e19..de34adb761579d81075f3070794e1bafa5112667 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include @@ -128,7 +128,7 @@ static int __init plat_enable_iocoherency(void) BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); pr_info("Enabled Bonito IOBC coherency\n"); } - } else if (mips_cm_numiocu() != 0) { + } else if (mips_cps_numiocu(0) != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index cea4ec9098063ed8c82eb41786fba4634f73a4d2..66c866740ff227ed788a4d41f4322aeb5198dd9b 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -40,6 +39,7 @@ #include #include #include +#include #include #include @@ -85,8 +85,8 @@ static void __init estimate_frequencies(void) local_irq_save(flags); - if (gic_present) - gic_start_count(); + if (mips_gic_present()) + clear_gic_config(GIC_CONFIG_COUNTSTOP); /* * Read counters exactly on rising edge of update flag. @@ -95,8 +95,8 @@ static void __init estimate_frequencies(void) while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); start = read_c0_count(); - if (gic_present) - gicstart = gic_read_count(); + if (mips_gic_present()) + gicstart = read_gic_counter(); /* Wait for falling edge before reading RTC. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); @@ -105,8 +105,8 @@ static void __init estimate_frequencies(void) /* Read counters again exactly on rising edge of update flag. */ while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); - if (gic_present) - giccount = gic_read_count(); + if (mips_gic_present()) + giccount = read_gic_counter(); /* Wait for falling edge before reading RTC again. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); @@ -128,7 +128,7 @@ static void __init estimate_frequencies(void) count /= secs; mips_hpt_frequency = count; - if (gic_present) { + if (mips_gic_present()) { giccount = div_u64(giccount - gicstart, secs); gic_frequency = giccount; } @@ -154,7 +154,7 @@ int get_c0_fdc_int(void) if (cpu_has_veic) return -1; - else if (gic_present) + else if (mips_gic_present()) return gic_get_c0_fdc_int(); else if (cp0_fdc_irq >= 0) return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; @@ -167,7 +167,7 @@ int get_c0_perfcount_int(void) if (cpu_has_veic) { set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch); mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; - } else if (gic_present) { + } else if (mips_gic_present()) { mips_cpu_perf_irq = gic_get_c0_perfcount_int(); } else if (cp0_perfcount_irq >= 0) { mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; @@ -184,7 +184,7 @@ unsigned int get_c0_compare_int(void) if (cpu_has_veic) { set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; - } else if (gic_present) { + } else if (mips_gic_present()) { mips_cpu_timer_irq = gic_get_c0_compare_int(); } else { mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; @@ -258,8 +258,7 @@ void __init plat_time_init(void) setup_pit_timer(); #endif -#ifdef CONFIG_MIPS_GIC - if (gic_present) { + if (mips_gic_present()) { freq = freqround(gic_frequency, 5000); printk("GIC frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); @@ -268,5 +267,4 @@ void __init plat_time_init(void) timer_probe(); #endif } -#endif } diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index bddf1ef553a4f695d320d4f88eb7f28b079cec94..39a300bd6cc274e5812fb6450984b5cc41843507 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -122,7 +122,7 @@ static void nlm_init_secondary(void) int hwtid; hwtid = hard_smp_processor_id(); - current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE; + cpu_set_core(¤t_cpu_data, hwtid / NLM_THREADS_PER_CORE); current_cpu_data.package = nlm_nodeid(); nlm_percpu_init(hwtid); nlm_smp_irq_init(hwtid); @@ -147,7 +147,7 @@ unsigned long nlm_next_gp; unsigned long nlm_next_sp; static cpumask_t phys_cpu_present_mask; -void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) +int nlm_boot_secondary(int logical_cpu, struct task_struct *idle) { uint64_t picbase; int hwtid; @@ -161,6 +161,8 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) /* barrier for sp/gp store above */ __sync(); nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */ + + return 0; } void __init nlm_smp_setup(void) @@ -272,7 +274,7 @@ int nlm_wakeup_secondary_cpus(void) return 0; } -struct plat_smp_ops nlm_smp_ops = { +const struct plat_smp_ops nlm_smp_ops = { .send_ipi_single = nlm_send_ipi_single, .send_ipi_mask = nlm_send_ipi_mask, .init_secondary = nlm_init_secondary, diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index c57da6f13929a4e9a52ae6156dd9ddbf38909511..c3e4c18ef8d4d1ca5e4dc74ffd990b800ca0fd1b 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -38,9 +38,9 @@ static int perfcount_irq; #ifdef CONFIG_MIPS_MT_SMP static int cpu_has_mipsmt_pertccounters; #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \ - M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id)) + M_PERFCTL_VPEID(cpu_vpe_id(¤t_cpu_data))) #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ - 0 : cpu_data[smp_processor_id()].vpe_id) + 0 : cpu_vpe_id(¤t_cpu_data)) /* * The number of bits to shift to convert between counters per core and diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 72eb1a56c64508c732ccf1d7965367b85cf1e867..107d9f90d668c2e5008678f0aa320c969cf5d06a 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -100,11 +100,12 @@ static void paravirt_smp_finish(void) local_irq_enable(); } -static void paravirt_boot_secondary(int cpu, struct task_struct *idle) +static int paravirt_boot_secondary(int cpu, struct task_struct *idle) { paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle); smp_wmb(); paravirt_smp_sp[cpu] = __KSTK_TOS(idle); + return 0; } static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) @@ -133,7 +134,7 @@ static void paravirt_prepare_cpus(unsigned int max_cpus) } } -struct plat_smp_ops paravirt_smp_ops = { +const struct plat_smp_ops paravirt_smp_ops = { .send_ipi_single = paravirt_send_ipi_single, .send_ipi_mask = paravirt_send_ipi_mask, .init_secondary = paravirt_init_secondary, diff --git a/arch/mips/paravirt/setup.c b/arch/mips/paravirt/setup.c index cb8448b373a743f88f1e9a7ff6a592bf8f3b9bc0..d2ffec1409a734ac5d94bcb6513474c95ab8d46b 100644 --- a/arch/mips/paravirt/setup.c +++ b/arch/mips/paravirt/setup.c @@ -14,7 +14,7 @@ #include #include -extern struct plat_smp_ops paravirt_smp_ops; +extern const struct plat_smp_ops paravirt_smp_ops; const char *get_system_type(void) { diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c index 1c02f5737367aa7e75569afeb98aebcd3ebb5327..b4c263f16b15eeb62c4fc93d70ff828cdfa7ac7f 100644 --- a/arch/mips/pci/fixup-capcella.c +++ b/arch/mips/pci/fixup-capcella.c @@ -32,13 +32,13 @@ #define INTC PC104PLUS_INTC_IRQ #define INTD PC104PLUS_INTD_IRQ -static char irq_tab_capcella[][5] __initdata = { +static char irq_tab_capcella[][5] = { [11] = { -1, INT1, INT1, INT1, INT1 }, [12] = { -1, INT2, INT2, INT2, INT2 }, [14] = { -1, INTA, INTB, INTC, INTD } }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_capcella[slot][pin]; } diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c index b3ab59318d91a7a2817e6615efba1ef3fc0a2c37..44be65c3e6bb3d211d8692211b732d5f41f2caa4 100644 --- a/arch/mips/pci/fixup-cobalt.c +++ b/arch/mips/pci/fixup-cobalt.c @@ -147,7 +147,7 @@ static void qube_raq_via_board_id_fixup(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, qube_raq_via_board_id_fixup); -static char irq_tab_qube1[] __initdata = { +static char irq_tab_qube1[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, @@ -156,7 +156,7 @@ static char irq_tab_qube1[] __initdata = { [COBALT_PCICONF_ETH1] = 0 }; -static char irq_tab_cobalt[] __initdata = { +static char irq_tab_cobalt[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, @@ -165,7 +165,7 @@ static char irq_tab_cobalt[] __initdata = { [COBALT_PCICONF_ETH1] = ETH1_IRQ }; -static char irq_tab_raq2[] __initdata = { +static char irq_tab_raq2[] = { [COBALT_PCICONF_CPU] = 0, [COBALT_PCICONF_ETH0] = ETH0_IRQ, [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ, @@ -174,7 +174,7 @@ static char irq_tab_raq2[] __initdata = { [COBALT_PCICONF_ETH1] = ETH1_IRQ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (cobalt_board_id <= COBALT_BRD_ID_QUBE1) return irq_tab_qube1[slot]; diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c index 19caf775c20645ada7f1bbc86777c2bd45b23246..c31cb6af1cd07f8e9dd5ad715d30f93abd53a927 100644 --- a/arch/mips/pci/fixup-emma2rh.c +++ b/arch/mips/pci/fixup-emma2rh.c @@ -43,7 +43,7 @@ */ #define MAX_SLOT_NUM 10 -static unsigned char irq_map[][5] __initdata = { +static unsigned char irq_map[][5] = { [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, MARKEINS_PCI_IRQ_INTD, 0,}, [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, @@ -85,7 +85,7 @@ static void emma2rh_pci_host_fixup(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, emma2rh_pci_host_fixup); -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_map[slot][pin]; } diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c index 50da773faedee8fc5850480857ea5e36a78f24c0..b47c2771dc997663a7046dcb6166d1987957c89e 100644 --- a/arch/mips/pci/fixup-fuloong2e.c +++ b/arch/mips/pci/fixup-fuloong2e.c @@ -19,7 +19,7 @@ /* South bridge slot number is set by the pci probe process */ static u8 sb_slot = 5; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = 0; diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c index 133685e215ee6a2f02a5a244e04e08464543273d..c6ec18a07e63ab2f88e53a5ff25cde90704f59a5 100644 --- a/arch/mips/pci/fixup-ip32.c +++ b/arch/mips/pci/fixup-ip32.c @@ -21,7 +21,7 @@ #define INTB MACEPCI_SHARED0_IRQ #define INTC MACEPCI_SHARED1_IRQ #define INTD MACEPCI_SHARED2_IRQ -static char irq_tab_mace[][5] __initdata = { +static char irq_tab_mace[][5] = { /* Dummy INT#A INT#B INT#C INT#D */ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ {0, SCSI0, SCSI0, SCSI0, SCSI0}, @@ -39,7 +39,7 @@ static char irq_tab_mace[][5] __initdata = { * irqs. I suppose a device without a pin A will thank us for doing it * right if there exists such a broken piece of crap. */ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_mace[slot][pin]; } diff --git a/arch/mips/pci/fixup-jmr3927.c b/arch/mips/pci/fixup-jmr3927.c index 0f1069527cbae435775c39d7acbaeb9369561d87..d3102eeea898983c655d7b8e6d7f923cdc56395a 100644 --- a/arch/mips/pci/fixup-jmr3927.c +++ b/arch/mips/pci/fixup-jmr3927.c @@ -31,7 +31,7 @@ #include #include -int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char irq = pin; diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c index 2b5427d3f35c255bc566978b02ff1ac8e057f551..81530a13b34990a38d159ae248aa0a2d453bf332 100644 --- a/arch/mips/pci/fixup-lantiq.c +++ b/arch/mips/pci/fixup-lantiq.c @@ -23,7 +23,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return of_irq_parse_and_map_pci(dev, slot, pin); } diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c index 95ab9a1bd0107384a3faa8e45c9f2872f7c561df..20cdfdc0893875a7556cfbef91db0ab18b8099d2 100644 --- a/arch/mips/pci/fixup-lemote2f.c +++ b/arch/mips/pci/fixup-lemote2f.c @@ -30,7 +30,7 @@ #define PCID 7 /* all the pci device has the PCIA pin, check the datasheet. */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0}, /* 11: Unused */ {0, 0, 0, 0, 0}, /* 12: Unused */ @@ -51,7 +51,7 @@ static char irq_tab[][5] __initdata = { {0, 0, 0, 0, 0}, /* 27: Unused */ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; diff --git a/arch/mips/pci/fixup-loongson3.c b/arch/mips/pci/fixup-loongson3.c index 2b6d5e196f9990255eb5d35de3e6580a69f6003f..8a741c2c6685ded75b18975613f3e0a8b40867a1 100644 --- a/arch/mips/pci/fixup-loongson3.c +++ b/arch/mips/pci/fixup-loongson3.c @@ -32,7 +32,7 @@ static void print_fixup_info(const struct pci_dev *pdev) pdev->vendor, pdev->device, pdev->irq); } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { print_fixup_info(dev); return dev->irq; diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c index 40e920c653cc31762bcc10e4e6a235a4c53b78fa..3ec85331795ef561ce1bc624ae55cd009fc6a844 100644 --- a/arch/mips/pci/fixup-malta.c +++ b/arch/mips/pci/fixup-malta.c @@ -12,7 +12,7 @@ static char pci_irq[5] = { }; -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */ {0, 0, 0, 0, 0 }, /* 1: Unused */ @@ -38,7 +38,7 @@ static char irq_tab[][5] __initdata = { {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */ }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int virq; virq = irq_tab[slot][pin]; diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c index 8e4f8288eca2e2fcfc17bd615a26a5e0a6a152de..66eaf456bc89c31031abc74762874d09131a9ed0 100644 --- a/arch/mips/pci/fixup-mpc30x.c +++ b/arch/mips/pci/fixup-mpc30x.c @@ -22,19 +22,19 @@ #include -static const int internal_func_irqs[] __initconst = { +static const int internal_func_irqs[] = { VRC4173_CASCADE_IRQ, VRC4173_AC97_IRQ, VRC4173_USB_IRQ, }; -static const int irq_tab_mpc30x[] __initconst = { +static const int irq_tab_mpc30x[] = { [12] = VRC4173_PCMCIA1_IRQ, [13] = VRC4173_PCMCIA2_IRQ, [29] = MQ200_IRQ, }; -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == 30) return internal_func_irqs[PCI_FUNC(dev->devfn)]; diff --git a/arch/mips/pci/fixup-pmcmsp.c b/arch/mips/pci/fixup-pmcmsp.c index fab405c21c2f76f856af004fab4e88e68f81cef2..4ad2ef02087bc0cf8bec3b19835ce88aec73b1f7 100644 --- a/arch/mips/pci/fixup-pmcmsp.c +++ b/arch/mips/pci/fixup-pmcmsp.c @@ -47,7 +47,7 @@ #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -86,7 +86,7 @@ static char irq_tab[][5] __initdata = { #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -125,7 +125,7 @@ static char irq_tab[][5] __initdata = { #else /* Unknown board -- don't assign any IRQs */ -static char irq_tab[][5] __initdata = { +static char irq_tab[][5] = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ @@ -202,7 +202,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) * RETURNS: IRQ number * ****************************************************************************/ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); diff --git a/arch/mips/pci/fixup-rbtx4927.c b/arch/mips/pci/fixup-rbtx4927.c index 321db265829c0c0e88cd83790edc028d2bec77d0..d6aaed1d6be9ed3f10d9843b068661b3dcc2e18d 100644 --- a/arch/mips/pci/fixup-rbtx4927.c +++ b/arch/mips/pci/fixup-rbtx4927.c @@ -36,7 +36,7 @@ #include #include -int __init rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char irq = pin; diff --git a/arch/mips/pci/fixup-rbtx4938.c b/arch/mips/pci/fixup-rbtx4938.c index a80579af609bc7155f86836a3e4f0e8ed457e7d3..ff22a22db73ee618a30cb7af4e906ce060da7d92 100644 --- a/arch/mips/pci/fixup-rbtx4938.c +++ b/arch/mips/pci/fixup-rbtx4938.c @@ -13,7 +13,7 @@ #include #include -int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4938_pcic1_map_irq(dev, slot); diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c index f67ebeeb42000bb5d4d73961d6bcc4d632375298..adb9a58641e8a1891e409b078e256d846ffa5d14 100644 --- a/arch/mips/pci/fixup-sni.c +++ b/arch/mips/pci/fixup-sni.c @@ -40,7 +40,7 @@ * seem to be a documentation error. At least on my RM200C the Cirrus * Logic CL-GD5434 VGA is device 3. */ -static char irq_tab_rm200[8][5] __initdata = { +static char irq_tab_rm200[8][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -57,7 +57,7 @@ static char irq_tab_rm200[8][5] __initdata = { * * The VGA card is optional for RM300 systems. */ -static char irq_tab_rm300d[8][5] __initdata = { +static char irq_tab_rm300d[8][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -69,7 +69,7 @@ static char irq_tab_rm300d[8][5] __initdata = { { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; -static char irq_tab_rm300e[5][5] __initdata = { +static char irq_tab_rm300e[5][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ @@ -96,7 +96,7 @@ static char irq_tab_rm300e[5][5] __initdata = { #define INTC PCIT_IRQ_INTC #define INTD PCIT_IRQ_INTD -static char irq_tab_pcit[13][5] __initdata = { +static char irq_tab_pcit[13][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ @@ -113,7 +113,7 @@ static char irq_tab_pcit[13][5] __initdata = { { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ }; -static char irq_tab_pcit_cplus[13][5] __initdata = { +static char irq_tab_pcit_cplus[13][5] = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ @@ -130,7 +130,7 @@ static inline int is_rm300_revd(void) return (csmsr & 0xa0) == 0x20; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (sni_brd_type) { case SNI_BRD_PCI_TOWER_CPLUS: diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c index d0b0083fbd278a529209d41c7259e645d2c37f6c..cc581535f25772daf542445e1e4e59318e94a56f 100644 --- a/arch/mips/pci/fixup-tb0219.c +++ b/arch/mips/pci/fixup-tb0219.c @@ -23,7 +23,7 @@ #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c index 4196ccf3ea3da5e6783de7acd913f68d2fc218bc..b827b5cad5fd80eb8e4fde0a0e66bfbed524751c 100644 --- a/arch/mips/pci/fixup-tb0226.c +++ b/arch/mips/pci/fixup-tb0226.c @@ -23,7 +23,7 @@ #include #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c index 8c5039ed75d74c40df1b739ad37edf177dfad5bf..98f26285f2e3a904c7bff305d5a1cc9d65389f26 100644 --- a/arch/mips/pci/fixup-tb0287.c +++ b/arch/mips/pci/fixup-tb0287.c @@ -22,7 +22,7 @@ #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char bus; int irq = -1; diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index e99ca7702d8ad81660ab011ae4c990c0fb954f96..f15ec98de2de5b5569f051705b74985693406ea5 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c @@ -522,7 +522,7 @@ static int __init alchemy_pci_init(void) arch_initcall(alchemy_pci_init); -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct alchemy_pci_context *ctx = dev->sysdata; if (ctx && ctx->board_map_irq) diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c index 76f16eaed0ad3e9851d854d940e871c8c5f69aa9..230d7dd273e21af0668062c648ee2b6962383de1 100644 --- a/arch/mips/pci/pci-bcm47xx.c +++ b/arch/mips/pci/pci-bcm47xx.c @@ -28,7 +28,7 @@ #include #include -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } diff --git a/arch/mips/pci/pci-lasat.c b/arch/mips/pci/pci-lasat.c index 40d2797d2bc43b255edd050cf17d270cdbc4325d..47f4ee6bbb3bf9bb9874d6d26510d7e1f13cf94e 100644 --- a/arch/mips/pci/pci-lasat.c +++ b/arch/mips/pci/pci-lasat.c @@ -61,7 +61,7 @@ arch_initcall(lasat_pci_setup); #define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7) #define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8) -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (slot) { case 1: diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index fc7726088103c7d48fbc556eab168ce8b53eadc3..0c65c38e05d6cc8ce1730476bb49baf9465a8583 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -139,7 +139,7 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) struct of_pci_range range; struct of_pci_range_parser parser; - pr_info("PCI host bridge %s ranges:\n", node->full_name); + pr_info("PCI host bridge %pOF ranges:\n", node); hose->of_node = node; if (of_pci_range_parser_init(&parser, node)) diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c index cfbbc3e3e914526e8086e156d426a82c043032a2..88e625fb3a4722a08fc34e8e5704d9e26ec7305d 100644 --- a/arch/mips/pci/pci-malta.c +++ b/arch/mips/pci/pci-malta.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include #include @@ -201,7 +201,7 @@ void __init mips_pcibios_init(void) msc_mem_resource.start = start & mask; msc_mem_resource.end = (start & mask) | ~mask; msc_controller.mem_offset = (start & mask) - (map & mask); - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { write_gcr_reg0_base(start); write_gcr_reg0_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); @@ -213,7 +213,7 @@ void __init mips_pcibios_init(void) msc_io_resource.end = (map & mask) | ~mask; msc_controller.io_offset = 0; ioport_resource.end = ~mask; - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { write_gcr_reg1_base(start); write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 628c5132b3d8b254ad0c590ef9a606af05412972..90fba9bf98da7104ee5ef74bd0f259653d279aa3 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -291,7 +291,7 @@ static int mt7620_pci_probe(struct platform_device *pdev) IORESOURCE_MEM, 1); u32 val = 0; - rstpcie0 = devm_reset_control_get(&pdev->dev, "pcie0"); + rstpcie0 = devm_reset_control_get_exclusive(&pdev->dev, "pcie0"); if (IS_ERR(rstpcie0)) return PTR_ERR(rstpcie0); @@ -361,7 +361,7 @@ static int mt7620_pci_probe(struct platform_device *pdev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u16 cmd; u32 val; diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 9ee01936862ee1dfe11d8a21754bb16572230aac..3e92a06fa77288c2705e8435e9352b6bf8c5e368 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -59,8 +59,7 @@ union octeon_pci_address { } s; }; -int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev, - u8 slot, u8 pin); +int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; /** @@ -74,7 +73,7 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; * as it goes through each bridge. * Returns Interrupt number for the device */ -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (octeon_pcibios_map_irq) return octeon_pcibios_map_irq(dev, slot, pin); diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index d6360fe73d058c5733274fb1b163c393a3f0e14c..711cdccdf65ba7df9647f72bd11952432086e33b 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -181,7 +181,7 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) spin_unlock_irqrestore(&rt2880_pci_lock, flags); } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u16 cmd; int irq = -1; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 3520e9b414e7b91dfedc59abeb37d757579e475a..958899ffe99c14b3e149a88e303fb4cd3d497d81 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -207,8 +207,7 @@ static int rt3883_pci_irq_init(struct device *dev, irq = irq_of_parse_and_map(rpc->intc_of_node, 0); if (irq == 0) { - dev_err(dev, "%s has no IRQ", - of_node_full_name(rpc->intc_of_node)); + dev_err(dev, "%pOF has no IRQ", rpc->intc_of_node); return -EINVAL; } @@ -438,8 +437,8 @@ static int rt3883_pci_probe(struct platform_device *pdev) } if (!rpc->intc_of_node) { - dev_err(dev, "%s has no %s child node", - of_node_full_name(rpc->intc_of_node), + dev_err(dev, "%pOF has no %s child node", + rpc->intc_of_node, "interrupt controller"); return -EINVAL; } @@ -454,8 +453,8 @@ static int rt3883_pci_probe(struct platform_device *pdev) } if (!rpc->pci_controller.of_node) { - dev_err(dev, "%s has no %s child node", - of_node_full_name(rpc->intc_of_node), + dev_err(dev, "%pOF has no %s child node", + rpc->intc_of_node, "PCI host bridge"); err = -EINVAL; goto err_put_intc_node; @@ -565,7 +564,7 @@ static int rt3883_pci_probe(struct platform_device *pdev) return err; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return of_irq_parse_and_map_pci(dev, slot, pin); } diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c index 000c0e1f9ef869d884d8c9a0cbb286ba87bae97b..a6418460e3c4447e62028b63655e29233a166c42 100644 --- a/arch/mips/pci/pci-tx4938.c +++ b/arch/mips/pci/pci-tx4938.c @@ -112,7 +112,7 @@ int __init tx4938_pciclk66_setup(void) return pciclk; } -int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) +int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { switch (slot) { diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c index 9d6acc00f3489ddbf4558aa1d67568899e7eea9f..09a65f7dbe7ca001d55780d0bf7c2008a9320cdf 100644 --- a/arch/mips/pci/pci-tx4939.c +++ b/arch/mips/pci/pci-tx4939.c @@ -48,7 +48,7 @@ void __init tx4939_report_pci1clk(void) ((pciclk + 50000) / 100000) % 10); } -int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) +int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) { switch (slot) { @@ -68,7 +68,7 @@ int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) return -1; } -int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4939_pcic1_map_irq(dev, slot); diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c index 7babf01600cb0fd9cbb97d4bb40f079693b2e8ed..9eff9137f78e52dc60bd4c031e42f1db4001ab6e 100644 --- a/arch/mips/pci/pci-xlp.c +++ b/arch/mips/pci/pci-xlp.c @@ -205,7 +205,7 @@ int xlp_socdev_to_node(const struct pci_dev *lnkdev) return PCI_SLOT(lnkdev->devfn) / 8; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_dev *lnkdev; int lnkfunc, node; diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c index 26d2dabef28152541b709b11d2257cb563fe0349..2a1c81a129ba31ae9a6757ef819f2cab3fe2f93a 100644 --- a/arch/mips/pci/pci-xlr.c +++ b/arch/mips/pci/pci-xlr.c @@ -315,7 +315,7 @@ static void xls_pcie_ack_b(struct irq_data *d) } } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return get_irq_vector(dev); } diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index ad3584dbc9d72bc1d0b845a1089ce41cec494737..fd2887415bc8935acf86c79a864b78dbd5878a03 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -1464,8 +1464,7 @@ static int cvmx_pcie_rc_initialize(int pcie_port) * as it goes through each bridge. * Returns Interrupt number for the device */ -int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, - u8 slot, u8 pin) +int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* * The EBH5600 board with the PCI to PCIe bridge mistakenly diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index 1c91cad7988fb4b2e63e43eeeeefbc46ac72e7df..0b06c953d29359bffcebad378c565202b3a6c646 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c @@ -19,8 +19,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/arch/mips/pistachio/irq.c b/arch/mips/pistachio/irq.c index 0a6b24c246528180e35c2cf9e41e5d90b9e03b5d..709a8219073ac772d1aaf6d6ab5bd24a4115b820 100644 --- a/arch/mips/pistachio/irq.c +++ b/arch/mips/pistachio/irq.c @@ -10,7 +10,6 @@ #include #include -#include #include #include diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 17a0f1dec05bd6bff766f5bbbe446b029deb5b5d..8a6af9b76202bd268c267aafe5164c0855670008 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -12,9 +12,9 @@ #include #include #include -#include #include +#include #include unsigned int get_c0_compare_int(void) diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index ffa0f7101a9773ec8e24813f37e3c270d912b5e2..2b08242ade628989e96aa3f24450b406ecdc7e7a 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -22,6 +22,8 @@ #include #include +#include + #ifdef CONFIG_MIPS_MT_SMP #define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */ #define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */ diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index 710b04cf48516f228ffedc113c67c03ad7a016e0..b4627080b828fa2891374b945aed6d7e04bfcb1b 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -82,6 +82,16 @@ choice depends on SOC_MT7620 select BUILTIN_DTB + config DTB_OMEGA2P + bool "Onion Omega2+" + depends on SOC_MT7620 + select BUILTIN_DTB + + config DTB_VOCORE2 + bool "VoCore2" + depends on SOC_MT7620 + select BUILTIN_DTB + endchoice endif diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c index eb1c61917eb76baa14b031ead89693f27a1df22d..1b7df115eb607b8e270bda0de02bd5710133eb30 100644 --- a/arch/mips/ralink/clk.c +++ b/arch/mips/ralink/clk.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL_GPL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { + if (!clk) + return 0; + return clk->rate; } EXPORT_SYMBOL_GPL(clk_get_rate); diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c index 2058280450b5e5d5cd867edbe1065b3be407fb60..bda576f2cad8b7d24069e068d1172439d181b568 100644 --- a/arch/mips/ralink/irq-gic.c +++ b/arch/mips/ralink/irq-gic.c @@ -11,7 +11,7 @@ #include #include -#include +#include int get_c0_perfcount_int(void) { diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 0695c2d64e49107141091f8614c2689c18e68663..1b274742077dc191161c95ea62ea15a3b329b59f 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -12,8 +12,7 @@ #include #include -#include -#include +#include #include #include @@ -199,7 +198,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info) mips_cm_probe(); mips_cpc_probe(); - if (mips_cm_numiocu()) { + if (mips_cps_numiocu(0)) { /* * mips_cm_probe() wipes out bootloader * config for CM regions and we have to configure them diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index 4cd47d23d81a76105b5a5fc21a768cc50d478879..545446dfe7faa4f1cf16b0fec3a2369606922ae2 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -195,7 +195,7 @@ static void ip27_smp_finish(void) * set sp to the kernel stack of the newly created idle process, gp to the proc * struct so that current_thread_info() will work. */ -static void ip27_boot_secondary(int cpu, struct task_struct *idle) +static int ip27_boot_secondary(int cpu, struct task_struct *idle) { unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); @@ -203,6 +203,7 @@ static void ip27_boot_secondary(int cpu, struct task_struct *idle) LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 0, (void *) sp, (void *) gp); + return 0; } static void __init ip27_smp_setup(void) @@ -231,7 +232,7 @@ static void __init ip27_prepare_cpus(unsigned int max_cpus) /* We already did everything necessary earlier */ } -struct plat_smp_ops ip27_smp_ops = { +const struct plat_smp_ops ip27_smp_ops = { .send_ipi_single = ip27_send_ipi_single, .send_ipi_mask = ip27_send_ipi_mask, .init_secondary = ip27_init_secondary, diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index d0e94ffcc1b8b8abb058920631c68b500efaa336..90c9d1255ad798a284389fbb2aaeaa5aacca7a09 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -117,7 +117,7 @@ static void bcm1480_smp_finish(void) * Setup the PC, SP, and GP of a secondary processor and start it * running! */ -static void bcm1480_boot_secondary(int cpu, struct task_struct *idle) +static int bcm1480_boot_secondary(int cpu, struct task_struct *idle) { int retval; @@ -126,6 +126,7 @@ static void bcm1480_boot_secondary(int cpu, struct task_struct *idle) (unsigned long)task_thread_info(idle), 0); if (retval != 0) printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); + return retval; } /* @@ -157,7 +158,7 @@ static void __init bcm1480_prepare_cpus(unsigned int max_cpus) { } -struct plat_smp_ops bcm1480_smp_ops = { +const struct plat_smp_ops bcm1480_smp_ops = { .send_ipi_single = bcm1480_send_ipi_single, .send_ipi_mask = bcm1480_send_ipi_mask, .init_secondary = bcm1480_init_secondary, diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index c1a11a11db7f71b0564d1195c9f95f1f556ebbbb..115399202eab18a252f8a304cfcf791836c3e95f 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -229,8 +229,8 @@ static int __init initrd_setup(char *str) #endif -extern struct plat_smp_ops sb_smp_ops; -extern struct plat_smp_ops bcm1480_smp_ops; +extern const struct plat_smp_ops sb_smp_ops; +extern const struct plat_smp_ops bcm1480_smp_ops; /* * prom_init is called just after the cpu type is determined, from setup_arch() diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 0a4a2c3982d86dc6394b222c2632eace7cd83f56..5baabca52f25c3164b9ee4d8d8b6a7c0c5bcec6c 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -106,7 +106,7 @@ static void sb1250_smp_finish(void) * Setup the PC, SP, and GP of a secondary processor and start it * running! */ -static void sb1250_boot_secondary(int cpu, struct task_struct *idle) +static int sb1250_boot_secondary(int cpu, struct task_struct *idle) { int retval; @@ -115,6 +115,7 @@ static void sb1250_boot_secondary(int cpu, struct task_struct *idle) (unsigned long)task_thread_info(idle), 0); if (retval != 0) printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); + return retval; } /* @@ -146,7 +147,7 @@ static void __init sb1250_prepare_cpus(unsigned int max_cpus) { } -struct plat_smp_ops sb_smp_ops = { +const struct plat_smp_ops sb_smp_ops = { .send_ipi_single = sb1250_send_ipi_single, .send_ipi_mask = sb1250_send_ipi_mask, .init_secondary = sb1250_init_secondary, diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh new file mode 100755 index 0000000000000000000000000000000000000000..5c4f936870391195da1d8de70d789650fc11acb5 --- /dev/null +++ b/arch/mips/tools/generic-board-config.sh @@ -0,0 +1,90 @@ +#!/bin/sh +# +# Copyright (C) 2017 Imagination Technologies +# Author: Paul Burton +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This script merges configuration fragments for boards supported by the +# generic MIPS kernel. It checks each for requirements specified using +# formatted comments, and then calls merge_config.sh to merge those +# fragments which have no unmet requirements. +# +# An example of requirements in your board config fragment might be: +# +# # require CONFIG_CPU_MIPS32_R2=y +# # require CONFIG_CPU_LITTLE_ENDIAN=y +# +# This would mean that your board is only included in kernels which are +# configured for little endian MIPS32r2 CPUs, and not for example in kernels +# configured for 64 bit or big endian systems. +# + +srctree="$1" +objtree="$2" +ref_cfg="$3" +cfg="$4" +boards_origin="$5" +shift 5 + +cd "${srctree}" + +# Only print Skipping... lines if the user explicitly specified BOARDS=. In the +# general case it only serves to obscure the useful output about what actually +# was included. +case ${boards_origin} in +"command line") + print_skipped=1 + ;; +environment*) + print_skipped=1 + ;; +*) + print_skipped=0 + ;; +esac + +for board in $@; do + board_cfg="arch/mips/configs/generic/board-${board}.config" + if [ ! -f "${board_cfg}" ]; then + echo "WARNING: Board config '${board_cfg}' not found" + continue + fi + + # For each line beginning with # require, cut out the field following + # it & search for that in the reference config file. If the requirement + # is not found then the subshell will exit with code 1, and we'll + # continue on to the next board. + grep -E '^# require ' "${board_cfg}" | \ + cut -d' ' -f 3- | \ + while read req; do + case ${req} in + *=y) + # If we require something =y then we check that a line + # containing it is present in the reference config. + grep -Eq "^${req}\$" "${ref_cfg}" && continue + ;; + *=n) + # If we require something =n then we just invert that + # check, considering the requirement met if there isn't + # a line containing the value =y in the reference + # config. + grep -Eq "^${req/%=n/=y}\$" "${ref_cfg}" || continue + ;; + *) + echo "WARNING: Unhandled requirement '${req}'" + ;; + esac + + [ ${print_skipped} -eq 1 ] && echo "Skipping ${board_cfg}" + exit 1 + done || continue + + # Merge this board config fragment into our final config file + ./scripts/kconfig/merge_config.sh \ + -m -O ${objtree} ${cfg} ${board_cfg} \ + | grep -Ev '^(#|Using)' +done diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index 0bd2a1e1ff9ab8e0f0d8c56e31872ac08404c7b6..fb998726bd5d58b31e520d8ac4f8290835db473a 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c @@ -386,9 +386,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - return txx9_board_vec->pci_map_irq(dev, slot, pin); + return txx9_pci_map_irq(dev, slot, pin); } char * (*txx9_board_pcibios_setup)(char *str) __initdata; @@ -424,5 +425,8 @@ char *__init txx9_pcibios_setup(char *str) txx9_pci_err_action = TXX9_PCI_ERR_IGNORE; return NULL; } + + txx9_pci_map_irq = txx9_board_vec->pci_map_irq; + return str; } diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c index e2690d7ca4ddd992e69fffe802cac6355ad14de8..e22b422f282c7a22556ca690413f0dfb83314126 100644 --- a/arch/mips/vdso/gettimeofday.c +++ b/arch/mips/vdso/gettimeofday.c @@ -11,12 +11,10 @@ #include "vdso.h" #include -#include #include #include #include -#include #include #include @@ -126,9 +124,9 @@ static __always_inline u64 read_gic_count(const union mips_vdso_data *data) u32 hi, hi2, lo; do { - hi = __raw_readl(gic + GIC_UMV_SH_COUNTER_63_32_OFS); - lo = __raw_readl(gic + GIC_UMV_SH_COUNTER_31_00_OFS); - hi2 = __raw_readl(gic + GIC_UMV_SH_COUNTER_63_32_OFS); + hi = __raw_readl(gic + sizeof(lo)); + lo = __raw_readl(gic); + hi2 = __raw_readl(gic + sizeof(lo)); } while (hi2 != hi); return (((u64)hi) << 32) + lo; diff --git a/arch/mips/vdso/sigreturn.S b/arch/mips/vdso/sigreturn.S index 715bf599352971067a1c384c601b7bfa4da90d0f..30c6219912ac17224ccb0d033739821c34d8005d 100644 --- a/arch/mips/vdso/sigreturn.S +++ b/arch/mips/vdso/sigreturn.S @@ -19,31 +19,21 @@ .cfi_sections .debug_frame LEAF(__vdso_rt_sigreturn) - .cfi_startproc - .frame sp, 0, ra - .mask 0x00000000, 0 - .fmask 0x00000000, 0 .cfi_signal_frame li v0, __NR_rt_sigreturn syscall - .cfi_endproc END(__vdso_rt_sigreturn) #if _MIPS_SIM == _MIPS_SIM_ABI32 LEAF(__vdso_sigreturn) - .cfi_startproc - .frame sp, 0, ra - .mask 0x00000000, 0 - .fmask 0x00000000, 0 .cfi_signal_frame li v0, __NR_sigreturn syscall - .cfi_endproc END(__vdso_sigreturn) #endif diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 89e8027e07fb327d39de0170c0da61af177c7160..7c475fd99c468396122e77c8f1dbe738ae368bc0 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -59,10 +59,6 @@ void arch_cpu_idle(void) } #endif -void release_segments(struct mm_struct *mm) -{ -} - void machine_restart(char *cmd) { #ifdef CONFIG_KERNEL_DEBUGGER @@ -112,14 +108,6 @@ void release_thread(struct task_struct *dead_task) { } -/* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -void copy_segments(struct task_struct *p, struct mm_struct *new_mm) -{ -} - /* * this gets called so that we can store lazy state into memory and copy the * current task into the new thread. diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts index 31c51f9a2f0973c9d9ee38637ff00b988eb280a2..36ccdf05837dec19988d0be5206257d09a92ecd6 100644 --- a/arch/nios2/boot/dts/3c120_devboard.dts +++ b/arch/nios2/boot/dts/3c120_devboard.dts @@ -159,6 +159,7 @@ }; chosen { - bootargs = "debug console=ttyJ0,115200"; + bootargs = "debug earlycon console=ttyJ0,115200"; + stdout-path = &jtag_uart; }; }; diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index 645129aaa9a016b5e91b2ebc6e42e241ee00f72d..20e86209ef2e03646b49dd1f00324dae0ecedf72 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c @@ -107,7 +107,10 @@ static struct nios2_clocksource nios2_cs = { cycles_t get_cycles(void) { - return nios2_timer_read(&nios2_cs.cs); + /* Only read timer if it has been initialized */ + if (nios2_cs.timer.base) + return nios2_timer_read(&nios2_cs.cs); + return 0; } EXPORT_SYMBOL(get_cycles); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index ba7b7ddc38442dab7e9b0679bba03478bf20096c..a57dedbfc7b77802e09104ee115702a9fe6fee13 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -257,6 +257,18 @@ config PARISC_PAGE_SIZE_64KB endchoice +config PARISC_SELF_EXTRACT + bool "Build kernel as self-extracting executable" + default y + help + Say Y if you want to build the parisc kernel as a kind of + self-extracting executable. + + If you say N here, the kernel will be compressed with gzip + which can be loaded by the palo bootloader directly too. + + If you don't know what to do here, say Y. + config SMP bool "Symmetric multi-processing support" ---help--- diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 58fae5d2449daae762fbe09f54a52561e29832f9..01946ebaff72814435cf6128665bd778eb217fb1 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -129,8 +129,13 @@ Image: vmlinux bzImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +ifdef CONFIG_PARISC_SELF_EXTRACT vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ +else +vmlinuz: vmlinux + @gzip -cf -9 $< > $@ +endif install: $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 5450a11c9d108cf1bfeefef0af4f943e29551a41..7d7e594bda36f0403aef33577c9e75c1817011d4 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -15,7 +15,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs +KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls endif diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 13a4bf9ac4dae5eb626c5c9d01c7e6448a232df0..9345b44b86f036572e33721eb80e9bbbe4493aa4 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -24,7 +24,8 @@ /* Symbols defined by linker scripts */ extern char input_data[]; extern int input_len; -extern __le32 output_len; /* at unaligned address, little-endian */ +/* output_len is inserted by the linker possibly at an unaligned address */ +extern __le32 output_len __aligned(1); extern char _text, _end; extern char _bss, _ebss; extern char _startcode_end; diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 26b4455baa8370560bb572d7cb550cdbd17f1101..510341f62d979e0ced75108c4b771380e0866c57 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -280,6 +280,7 @@ void setup_pdc(void); /* in inventory.c */ /* wrapper-functions from pdc.c */ int pdc_add_valid(unsigned long address); +int pdc_instr(unsigned int *instr); int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len); int pdc_chassis_disp(unsigned long disp); int pdc_chassis_warn(unsigned long *warn); diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index a5dc9066c6d8d50cb35f2e4cf88509fa4621b84b..ad9c9c3b4136da6633e5d0c72fb117465376993e 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -1,6 +1,7 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +extern int init_per_cpu(int cpuid); #if defined(CONFIG_SMP) diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index ab80e5c6f6517c4f165ab9ce8c3863b2d94e7c05..6d471c00c71af12f6a48c4deebb5d260071d5dd3 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -232,6 +232,26 @@ int pdc_add_valid(unsigned long address) } EXPORT_SYMBOL(pdc_add_valid); +/** + * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler. + * @instr: Pointer to variable which will get instruction opcode. + * + * The return value is PDC_OK (0) in case call succeeded. + */ +int __init pdc_instr(unsigned int *instr) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result)); + convert_to_wide(pdc_result); + *instr = pdc_result[0]; + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + /** * pdc_chassis_info - Return chassis information. * @result: The return buffer. diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 05730a83895c7f5700760e880a460257e8c372ba..00aed082969b126719a988bbe7c63a210c10ed90 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -216,8 +217,16 @@ void __init pdc_pdt_init(void) } for (i = 0; i < pdt_status.pdt_entries; i++) { + unsigned long addr; + report_mem_err(pdt_entry[i]); + addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK; + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && + addr >= initrd_start && addr < initrd_end) + pr_crit("CRITICAL: initrd possibly broken " + "due to bad memory!\n"); + /* mark memory page bad */ memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); } diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index a778bd3c107c507e5a76d99e06813876aad64bd7..e120d63c1b285ad545c2281de0abbbcc2dde6e08 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -317,7 +317,7 @@ void __init collect_boot_cpu_data(void) * * o Enable CPU profiling hooks. */ -int init_per_cpu(int cpunum) +int __init init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index dee6f9d6a153ce461ec816e6caeb5a38dd3bb9f2..f7d0c3b33d70a53b5350729f1e3d59b2e803749b 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include static char __initdata command_line[COMMAND_LINE_SIZE]; @@ -115,7 +117,6 @@ void __init dma_ops_init(void) } #endif -extern int init_per_cpu(int cpuid); extern void collect_boot_cpu_data(void); void __init setup_arch(char **cmdline_p) @@ -398,9 +399,8 @@ static int __init parisc_init(void) } arch_initcall(parisc_init); -void start_parisc(void) +void __init start_parisc(void) { - extern void start_kernel(void); extern void early_trap_init(void); int ret, cpunum; diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 63365106ea1907589bbdee1688a1a9be54b1a74d..30c28ab145409b5966f7237ec2b6ca07121adc10 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -255,12 +255,11 @@ void arch_send_call_function_single_ipi(int cpu) static void __init smp_cpu_init(int cpunum) { - extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ - (void) init_per_cpu(cpunum); + init_per_cpu(cpunum); disable_sr_hashing(); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 991654c88eec8635056bef3ae9f536a6e8ff8862..230333157fe384c4171749e2b4eab43e3070a89d 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -817,7 +817,7 @@ void __init initialize_ivt(const void *iva) u32 check = 0; u32 *ivap; u32 *hpmcp; - u32 length; + u32 length, instr; if (strcmp((const char *)iva, "cows can fly")) panic("IVT invalid"); @@ -827,6 +827,14 @@ void __init initialize_ivt(const void *iva) for (i = 0; i < 8; i++) *ivap++ = 0; + /* + * Use PDC_INSTR firmware function to get instruction that invokes + * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of + * the PA 1.1 Firmware Architecture document. + */ + if (pdc_instr(&instr) == PDC_OK) + ivap[0] = instr; + /* Compute Checksum for HPMC handler */ length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 48dc7d4d20bba5e6fdb8669ae6d03b3ed3cb27c4..caab39dfa95d606564af62d3fd565eef6fc41757 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -279,6 +280,17 @@ static void unwind_frame_regs(struct unwind_frame_info *info) info->prev_sp = sp - 64; info->prev_ip = 0; + + /* The stack is at the end inside the thread_union + * struct. If we reach data, we have reached the + * beginning of the stack and should stop unwinding. */ + if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && + info->prev_sp < ((unsigned long) task_thread_info(info->t) + + THREAD_SZ_ALGN)) { + info->prev_sp = 0; + break; + } + if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) break; info->prev_ip = tmp; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 5b101f6a5607dc1ef9e229b6f2421a78d35355ce..e247edbca68ecd2f56500e467776c9d2fa1f1c11 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -261,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, struct task_struct *tsk; struct mm_struct *mm; unsigned long acc_type; - int fault; + int fault = 0; unsigned int flags; if (faulthandler_disabled()) @@ -315,7 +316,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; - else if (fault & VM_FAULT_SIGBUS) + else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) goto bad_area; BUG(); } @@ -352,8 +354,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, if (user_mode(regs)) { struct siginfo si; - - show_signal_msg(regs, code, address, tsk, vma); + unsigned int lsb = 0; switch (code) { case 15: /* Data TLB miss fault/Data page fault */ @@ -386,6 +387,30 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; break; } + +#ifdef CONFIG_MEMORY_FAILURE + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n", + tsk->comm, tsk->pid, address); + si.si_signo = SIGBUS; + si.si_code = BUS_MCEERR_AR; + } +#endif + + /* + * Either small page or large page may be poisoned. + * In other words, VM_FAULT_HWPOISON_LARGE and + * VM_FAULT_HWPOISON are mutually exclusive. + */ + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + else if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + else + show_signal_msg(regs, code, address, tsk, vma); + si.si_addr_lsb = lsb; + si.si_errno = 0; si.si_addr = (void __user *) address; force_sig_info(si.si_signo, &si, current); diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index e084fa548d73d217b72ca96dbe0783dbf1b2105d..063817fee61cfeea164555a99139de3ffda3aedf 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -138,10 +138,11 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m CONFIG_SND_AOA_FABRIC_LAYOUT=m diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index 79bbc8238b325aa5a637b7bc79913ebbc835b3be..805b0f87653c1939f2722929689db2ebb128930f 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig @@ -64,11 +64,12 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_VERBOSE_PROCFS is not set +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQUENCER_OSS=y # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 8cf4a46bef86b00c0d2f5562d3f30262be4f5ecd..6daa56f8895cb22bcd37011328c68932abe50074 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -115,9 +115,10 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y +CONFIG_SND_SEQUENCER=y CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_USB_USX2Y=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 8e798b1fbc9900eba3830878e77ea809a8b56b90..1aab9a62a681d4856aff8f7afaa0f9435832b4d9 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -227,11 +227,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_DUMMY=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 791db775a09cac4e695fd199f9f9ed060337ea5c..6ddca80c52c3b4c1fef2e1442a5c1d2a31f24e2e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -222,11 +222,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_POWERMAC=m CONFIG_SND_AOA=m CONFIG_SND_AOA_FABRIC_LAYOUT=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index d0fe0f8f77c236f03470abb7fc039861e6587a12..41d85cb3c9a2fb65fae203654e0759dde8099080 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -141,11 +141,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_HID_DRAGONRISE=y CONFIG_HID_GYRATION=y CONFIG_HID_TWINHAN=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index ae6eba482d75da632b90be24bb83a89cb7140e78..da0e8d535eb8889efbd937a1d2ba62aac6236400 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -789,17 +789,18 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_DYNAMIC_MINORS=y # CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y CONFIG_SND_DEBUG_VERBOSE=y CONFIG_SND_PCM_XRUN_DEBUG=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m CONFIG_SND_DUMMY=m CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index aef41b17a8bc02d0445bd8f145904f0b9c9454b0..9c7400a19e9df2aca8b005bb658d802c48eb88af 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -79,11 +79,12 @@ CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y +CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_VERBOSE_PROCFS is not set +CONFIG_SND_SEQUENCER=y +CONFIG_SND_SEQUENCER_OSS=y CONFIG_HID_APPLE=m CONFIG_HID_WACOM=m CONFIG_MMC=y diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 26b9994d27eef9c806b5c1f683f10325aff85d1c..43ef2515648098f985ff5f7d10a66b2f6d724391 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -341,7 +341,7 @@ int fix_alignment(struct pt_regs *regs) type = op.type & INSTR_TYPE_MASK; if (!OP_IS_LOAD_STORE(type)) { - if (type != CACHEOP + DCBZ) + if (op.type != CACHEOP + DCBZ) return -EINVAL; PPC_WARN_ALIGNMENT(dcbz, regs); r = emulate_dcbz(op.ea, regs); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 9e816787c0d49fd31bc153debe1235e01281a5fa..116000b4553195f44adabf2894b002fcd5e6d6e1 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1019,6 +1019,10 @@ int eeh_init(void) } else if ((ret = eeh_ops->init())) return ret; + /* Initialize PHB PEs */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + eeh_dev_phb_init_dynamic(hose); + /* Initialize EEH event */ ret = eeh_event_init(); if (ret) diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index ad04ecd63c207def1a1dbb33172c4acc933a2a39..a34e6912c15e2a027c55b3ef1802d22792cac5f9 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c @@ -78,21 +78,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb) /* EEH PE for PHB */ eeh_phb_pe_create(phb); } - -/** - * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs - * - * Scan all the existing PHBs and create EEH devices for their OF - * nodes and their children OF nodes - */ -static int __init eeh_dev_phb_init(void) -{ - struct pci_controller *phb, *tmp; - - list_for_each_entry_safe(phb, tmp, &hose_list, list_node) - eeh_dev_phb_init_dynamic(phb); - - return 0; -} - -core_initcall(eeh_dev_phb_init); diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 6f8273f5e988b2cca6f320299ca6c67445ebca66..91e037ab20a197de398161c057d86bffabc3a68a 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -104,8 +104,10 @@ static unsigned long can_optimize(struct kprobe *p) * and that can be emulated. */ if (!is_conditional_branch(*p->ainsn.insn) && - analyse_instr(&op, ®s, *p->ainsn.insn)) + analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { + emulate_update_regs(®s, &op); nip = regs.nip; + } return nip; } diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 07cd22e354053597d990bf57457164bbb6f02b9c..f52ad5bb710960906b8ae61400688845e2811dd5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk) * in the appropriate thread structures from live. */ - if (tsk != current) + if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current)) return; if (MSR_TM_SUSPENDED(mfmsr())) { diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index b8a4987f58cfdc6ccd66a94d68eea419bc68a37c..1643e9e5365573659bbdf79d6e5dfddb5ee4eafc 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -914,7 +914,7 @@ int rtas_online_cpus_mask(cpumask_var_t cpus) if (ret) { cpumask_var_t tmp_mask; - if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) return ret; /* Use tmp_mask to preserve cpus mask from first failure */ @@ -962,7 +962,7 @@ int rtas_ibm_suspend_me(u64 handle) return -EIO; } - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) return -ENOMEM; atomic_set(&data.working, 0); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index ec74e203ee04e5b8e7e307df28c133b6226cd487..13c9dcdcba6922e32d4c00267201f47e9498f805 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -437,6 +437,7 @@ static inline int check_io_access(struct pt_regs *regs) int machine_check_e500mc(struct pt_regs *regs) { unsigned long mcsr = mfspr(SPRN_MCSR); + unsigned long pvr = mfspr(SPRN_PVR); unsigned long reason = mcsr; int recoverable = 1; @@ -478,8 +479,15 @@ int machine_check_e500mc(struct pt_regs *regs) * may still get logged and cause a machine check. We should * only treat the non-write shadow case as non-recoverable. */ - if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) - recoverable = 0; + /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit + * is not implemented but L1 data cache always runs in write + * shadow mode. Hence on data cache parity errors HW will + * automatically invalidate the L1 Data Cache. + */ + if (PVR_VER(pvr) != PVR_VER_E6500) { + if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) + recoverable = 0; + } } if (reason & MCSR_L2MMU_MHIT) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 18e974a34fced58328ecb062fee539279f3df7cd..73bf1ebfa78fcc7ef74dd51713d800ea12e9e745 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -181,7 +181,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) struct swait_queue_head *wqp; wqp = kvm_arch_vcpu_wq(vcpu); - if (swait_active(wqp)) { + if (swq_has_sleeper(wqp)) { swake_up(wqp); ++vcpu->stat.halt_wakeup; } @@ -4212,11 +4212,13 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if ((cfg->process_table & PRTS_MASK) > 24) return -EINVAL; + mutex_lock(&kvm->lock); kvm->arch.process_table = cfg->process_table; kvmppc_setup_partition_table(kvm); lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); + mutex_unlock(&kvm->lock); return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c index abf5f01b6eb1f04b05d0643ddc46bb8ded237d8f..5b81a807d742c8b11018ead176b60590064f5d7f 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xive.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c @@ -38,7 +38,6 @@ static inline void __iomem *get_tima_phys(void) #define __x_tima get_tima_phys() #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) -#define __x_readb __raw_rm_readb #define __x_writeb __raw_rm_writeb #define __x_readw __raw_rm_readw #define __x_readq __raw_rm_readq diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 663a4a861e7f3600903c61df65240fe32e58e5d3..ec69fa45d5a2f249322d32218a722f4d54390e97 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -771,6 +771,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_restore_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif @@ -1118,6 +1121,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION mtspr SPRN_PPR, r0 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + +/* Move canary into DSISR to check for later */ +BEGIN_FTR_SECTION + li r0, 0x7fff + mtspr SPRN_HDSISR, r0 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) @@ -1630,6 +1640,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_save_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif @@ -1749,7 +1762,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* * Are we running hash or radix ? */ - beq cr2,3f + ld r5, VCPU_KVM(r9) + lbz r0, KVM_RADIX(r5) + cmpwi cr2, r0, 0 + beq cr2, 3f /* Radix: Handle the case where the guest used an illegal PID */ LOAD_REG_ADDR(r4, mmu_base_pid) @@ -1947,9 +1963,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) kvmppc_hdsi: ld r3, VCPU_KVM(r9) lbz r0, KVM_RADIX(r3) - cmpwi r0, 0 mfspr r4, SPRN_HDAR mfspr r6, SPRN_HDSISR +BEGIN_FTR_SECTION + /* Look for DSISR canary. If we find it, retry instruction */ + cmpdi r6, 0x7fff + beq 6f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + cmpwi r0, 0 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ /* HPTE not found fault or protection fault? */ andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h @@ -2466,6 +2487,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ ld r9, HSTATE_KVM_VCPU(r13) bl kvmppc_save_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) @@ -2578,6 +2602,9 @@ kvm_end_cede: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM BEGIN_FTR_SECTION + /* + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + */ bl kvmppc_restore_tm END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 08b200a0bbcec8c47a3128f4685ffcbfad261552..13304622ab1c78682fa18f06120f5eb0970f57fb 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -48,7 +48,6 @@ #define __x_tima xive_tima #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) -#define __x_readb __raw_readb #define __x_writeb __raw_writeb #define __x_readw __raw_readw #define __x_readq __raw_readq diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index d1ed2c41b5d246dde6d53c34530bc30dc63656b9..c7a5deadd1cc782ddd45667c9fc29f96166e85e7 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -28,7 +28,8 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) * bit. */ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { - u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); + u8 pipr = be64_to_cpu(qw1) & 0xff; if (pipr >= xc->hw_cppr) return; } @@ -336,7 +337,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; u8 pending = xc->pending; u32 hirq; - u8 pipr; pr_devel("H_IPOLL(server=%ld)\n", server); @@ -353,7 +353,8 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long pending = 0xff; } else { /* Grab pending interrupt if any */ - pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS); + u8 pipr = be64_to_cpu(qw1) & 0xff; if (pipr < 8) pending |= 1 << pipr; } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index fb9f58b868e76ef0692e45fe874b6546cf74de37..5e8418c28bd884bd854818755b61b06fcff776e6 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -944,9 +944,9 @@ NOKPROBE_SYMBOL(emulate_dcbz); : "r" (addr), "i" (-EFAULT), "0" (err)) static nokprobe_inline void set_cr0(const struct pt_regs *regs, - struct instruction_op *op, int rd) + struct instruction_op *op) { - long val = regs->gpr[rd]; + long val = op->val; op->type |= SETCC; op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); @@ -1326,7 +1326,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 13: /* addic. */ imm = (short) instr; add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); - set_cr0(regs, op, rd); + set_cr0(regs, op); return 1; case 14: /* addi */ @@ -1397,13 +1397,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 28: /* andi. */ op->val = regs->gpr[rd] & (unsigned short) instr; - set_cr0(regs, op, ra); + set_cr0(regs, op); goto logical_done_nocc; case 29: /* andis. */ imm = (unsigned short) instr; op->val = regs->gpr[rd] & (imm << 16); - set_cr0(regs, op, ra); + set_cr0(regs, op); goto logical_done_nocc; #ifdef __powerpc64__ @@ -1513,10 +1513,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->type = COMPUTE + SETCC; imm = 0xf0000000UL; val = regs->gpr[rd]; - op->val = regs->ccr; + op->ccval = regs->ccr; for (sh = 0; sh < 8; ++sh) { if (instr & (0x80000 >> sh)) - op->val = (op->val & ~imm) | + op->ccval = (op->ccval & ~imm) | (val & imm); imm >>= 4; } @@ -1651,8 +1651,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, goto arith_done; case 235: /* mullw */ - op->val = (unsigned int) regs->gpr[ra] * - (unsigned int) regs->gpr[rb]; + op->val = (long)(int) regs->gpr[ra] * + (int) regs->gpr[rb]; + goto arith_done; case 266: /* add */ @@ -2526,7 +2527,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, logical_done: if (instr & 1) - set_cr0(regs, op, ra); + set_cr0(regs, op); logical_done_nocc: op->reg = ra; op->type |= SETREG; @@ -2534,7 +2535,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, arith_done: if (instr & 1) - set_cr0(regs, op, rd); + set_cr0(regs, op); compute_done: op->reg = rd; op->type |= SETREG; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 2e3eb7431571603fc9cd2452df25a32c6cd4c6da..9e3da168d54cdcd36e3911ff040ff2ad187c92b7 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -793,6 +793,11 @@ void perf_event_print_debug(void) u32 pmcs[MAX_HWEVENTS]; int i; + if (!ppmu) { + pr_info("Performance monitor hardware not registered.\n"); + return; + } + if (!ppmu->n_counter) return; diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 9f59041a172bbfdea270ccb4139e66948bc3354f..443d5ca719958e5170374edbcaaf7d2cc52b8104 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -393,7 +393,13 @@ static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) u64 pir = get_hard_smp_processor_id(cpu); mtspr(SPRN_LPCR, lpcr_val); - opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); + + /* + * Program the LPCR via stop-api only if the deepest stop state + * can lose hypervisor context. + */ + if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) + opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); } /* diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 783f3636469093034ae26a23856fef6aaf1922a6..e45b5f10645ae54b6610298a0a03c07f7e5d0966 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -266,7 +266,6 @@ int dlpar_attach_node(struct device_node *dn, struct device_node *parent) return rc; } - of_node_put(dn->parent); return 0; } diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fc0d8f97c03a2f243a51c9ba27c7043626bd95cc..fadb95efbb9e1856ed8a7182cf870dcf3a5f5331 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -462,15 +462,19 @@ static ssize_t dlpar_cpu_add(u32 drc_index) } dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); - of_node_put(parent); if (!dn) { pr_warn("Failed call to configure-connector, drc index: %x\n", drc_index); dlpar_release_drc(drc_index); + of_node_put(parent); return -EINVAL; } rc = dlpar_attach_node(dn, parent); + + /* Regardless we are done with parent now */ + of_node_put(parent); + if (rc) { saved_rc = rc; pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 210ce632d63e1e47c2796acd767e3bf610da7439..f7042ad492bafba5ac21e3db9ce24977fd527169 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -226,8 +226,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) return -ENOENT; dn = dlpar_configure_connector(drc_index, parent_dn); - if (!dn) + if (!dn) { + of_node_put(parent_dn); return -ENOENT; + } rc = dlpar_attach_node(dn, parent_dn); if (rc) diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index e76aefae2aa2b1020e90180b6f4e930acece7294..89726f07d2492fd17beb05eea48c476fd2652550 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -151,7 +151,7 @@ static ssize_t store_hibernate(struct device *dev, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) return -ENOMEM; stream_id = simple_strtoul(buf, NULL, 16); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index dce708e061eac79a56b655eb71bb6bb63f3ce978..20e75a2ca93a283f6413ca6a8ee40ff280e3c74d 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); + pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); + + pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); } #define __HAVE_ARCH_PMDP_SET_WRPROTECT diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index ca8cd80e8feb7c6f01be173a76471420020e2bb6..60181caf8e8ad332029212e82b07c561cb44395a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -404,18 +404,6 @@ static inline void save_vector_registers(void) #endif } -static int __init topology_setup(char *str) -{ - bool enabled; - int rc; - - rc = kstrtobool(str, &enabled); - if (!rc && !enabled) - S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY; - return rc; -} -early_param("topology", topology_setup); - static int __init disable_vector_extension(char *str) { S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c1bf75ffb8756549b1c6a5c97908ff49dd2e79aa..7e1e40323b78e1bb7a3910e1191dfbe40027b748 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event) } /* Check online status of the CPU to which the event is pinned */ - if ((unsigned int)event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return -ENODEV; + if (event->cpu >= 0) { + if ((unsigned int)event->cpu >= nr_cpumask_bits) + return -ENODEV; + if (!cpu_online(event->cpu)) + return -ENODEV; + } /* Force reset of idle/hv excludes regardless of what the * user requested. diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bb47c92476f0d852ded0b9b50f9756e46480e9b9..ed0bdd220e1a643788655fbefaef328dd38c7951 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -8,6 +8,8 @@ #include #include +#include +#include #include #include #include @@ -29,12 +31,20 @@ #define PTF_VERTICAL (1UL) #define PTF_CHECK (2UL) +enum { + TOPOLOGY_MODE_HW, + TOPOLOGY_MODE_SINGLE, + TOPOLOGY_MODE_PACKAGE, + TOPOLOGY_MODE_UNINITIALIZED +}; + struct mask_info { struct mask_info *next; unsigned char id; cpumask_t mask; }; +static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; static void set_topology_timer(void); static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; @@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) cpumask_t mask; cpumask_copy(&mask, cpumask_of(cpu)); - if (!MACHINE_HAS_TOPOLOGY) - return mask; - for (; info; info = info->next) { - if (cpumask_test_cpu(cpu, &info->mask)) - return info->mask; + switch (topology_mode) { + case TOPOLOGY_MODE_HW: + while (info) { + if (cpumask_test_cpu(cpu, &info->mask)) { + mask = info->mask; + break; + } + info = info->next; + } + if (cpumask_empty(&mask)) + cpumask_copy(&mask, cpumask_of(cpu)); + break; + case TOPOLOGY_MODE_PACKAGE: + cpumask_copy(&mask, cpu_present_mask); + break; + default: + /* fallthrough */ + case TOPOLOGY_MODE_SINGLE: + cpumask_copy(&mask, cpumask_of(cpu)); + break; } return mask; } @@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu) int i; cpumask_copy(&mask, cpumask_of(cpu)); - if (!MACHINE_HAS_TOPOLOGY) + if (topology_mode != TOPOLOGY_MODE_HW) return mask; cpu -= cpu % (smp_cpu_mtid + 1); for (i = 0; i <= smp_cpu_mtid; i++) @@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void) { int cpu; - mutex_lock(&smp_cpu_state_mutex); for_each_possible_cpu(cpu) smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); - mutex_unlock(&smp_cpu_state_mutex); } static int ptf(unsigned long fc) @@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc) static void update_cpu_masks(void) { struct cpu_topology_s390 *topo; - int cpu; + int cpu, id; for_each_possible_cpu(cpu) { topo = &cpu_topology[cpu]; @@ -231,12 +254,13 @@ static void update_cpu_masks(void) topo->core_mask = cpu_group_map(&socket_info, cpu); topo->book_mask = cpu_group_map(&book_info, cpu); topo->drawer_mask = cpu_group_map(&drawer_info, cpu); - if (!MACHINE_HAS_TOPOLOGY) { + if (topology_mode != TOPOLOGY_MODE_HW) { + id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; topo->thread_id = cpu; topo->core_id = cpu; - topo->socket_id = cpu; - topo->book_id = cpu; - topo->drawer_id = cpu; + topo->socket_id = id; + topo->book_id = id; + topo->drawer_id = id; if (cpu_present(cpu)) cpumask_set_cpu(cpu, &cpus_with_topology); } @@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void) struct sysinfo_15_1_x *info = tl_info; int rc = 0; + mutex_lock(&smp_cpu_state_mutex); cpumask_clear(&cpus_with_topology); if (MACHINE_HAS_TOPOLOGY) { rc = 1; @@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void) update_cpu_masks(); if (!MACHINE_HAS_TOPOLOGY) topology_update_polarization_simple(); + mutex_unlock(&smp_cpu_state_mutex); return rc; } @@ -289,6 +315,11 @@ void topology_schedule_update(void) schedule_work(&topology_work); } +static void topology_flush_work(void) +{ + flush_work(&topology_work); +} + static void topology_timer_fn(unsigned long ignored) { if (ptf(PTF_CHECK)) @@ -459,6 +490,12 @@ void __init topology_init_early(void) struct sysinfo_15_1_x *info; set_sched_topology(s390_topology); + if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { + if (MACHINE_HAS_TOPOLOGY) + topology_mode = TOPOLOGY_MODE_HW; + else + topology_mode = TOPOLOGY_MODE_SINGLE; + } if (!MACHINE_HAS_TOPOLOGY) goto out; tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); @@ -474,12 +511,97 @@ void __init topology_init_early(void) __arch_update_cpu_topology(); } +static inline int topology_get_mode(int enabled) +{ + if (!enabled) + return TOPOLOGY_MODE_SINGLE; + return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; +} + +static inline int topology_is_enabled(void) +{ + return topology_mode != TOPOLOGY_MODE_SINGLE; +} + +static int __init topology_setup(char *str) +{ + bool enabled; + int rc; + + rc = kstrtobool(str, &enabled); + if (rc) + return rc; + topology_mode = topology_get_mode(enabled); + return 0; +} +early_param("topology", topology_setup); + +static int topology_ctl_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + unsigned int len; + int new_mode; + char buf[2]; + + if (!*lenp || *ppos) { + *lenp = 0; + return 0; + } + if (!write) { + strncpy(buf, topology_is_enabled() ? "1\n" : "0\n", + ARRAY_SIZE(buf)); + len = strnlen(buf, ARRAY_SIZE(buf)); + if (len > *lenp) + len = *lenp; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + goto out; + } + len = *lenp; + if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) + return -EFAULT; + if (buf[0] != '0' && buf[0] != '1') + return -EINVAL; + mutex_lock(&smp_cpu_state_mutex); + new_mode = topology_get_mode(buf[0] == '1'); + if (topology_mode != new_mode) { + topology_mode = new_mode; + topology_schedule_update(); + } + mutex_unlock(&smp_cpu_state_mutex); + topology_flush_work(); +out: + *lenp = len; + *ppos += len; + return 0; +} + +static struct ctl_table topology_ctl_table[] = { + { + .procname = "topology", + .mode = 0644, + .proc_handler = topology_ctl_handler, + }, + { }, +}; + +static struct ctl_table topology_dir_table[] = { + { + .procname = "s390", + .maxlen = 0, + .mode = 0555, + .child = topology_ctl_table, + }, + { }, +}; + static int __init topology_init(void) { if (MACHINE_HAS_TOPOLOGY) set_topology_timer(); else topology_update_polarization_simple(); + register_sysctl_table(topology_dir_table); return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8ecc25e760fa6dfba0f082cfd8dff38b8c33d9e9..98ffe3ee9411ad23798c7ef896783ebd4d6edcfc 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { - unsigned long mask, result; struct page *head, *page; + unsigned long mask; int refs; - result = write ? 0 : _SEGMENT_ENTRY_PROTECT; - mask = result | _SEGMENT_ENTRY_INVALID; - if ((pmd_val(pmd) & mask) != result) + mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID; + if ((pmd_val(pmd) & mask) != 0) return 0; VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 18e0377f72bbb9ad8f559b00fc2452532d0ad312..88ce1e22237b2b9a22f100e753d00456699ece47 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -136,10 +136,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned lo /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while(0) -#define release_segments(mm) do { } while(0) - /* * FPU lazy state save handling. */ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index eedd4f625d07650e1f392a37b45e22d765e51602..777a16318aff1fd41df535ccc91453d7d91d318f 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -170,10 +170,6 @@ struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while (0) -#define release_segments(mm) do { } while (0) -#define forget_segments() do { } while (0) /* * FPU lazy state save handling. */ diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 0d925fa0f0c1f270c7bb6ffcacd8f548ede225ec..9f94435cc44f455597fad4afda30fca3e5238675 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -409,5 +409,4 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index 149d8e8eacb83c223d186f86d342de9db21f6063..1c5bd4f8ffca34110fe68fd3ffd96e98743aef2f 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -189,7 +189,6 @@ CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=y CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m @@ -521,7 +520,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_LZO=m CONFIG_CRC_CCITT=m CONFIG_CRC7=m diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6becb96c60a03c5515cb443b5cdc7c683e259433..ad83c1e66dbd23c5ba46e20c8e5deefdf6f3655e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -140,7 +140,7 @@ static int __init setup_maxnodemem(char *str) { char *endp; unsigned long long maxnodemem; - long node; + unsigned long node; node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; if (node >= MAX_NUMNODES || *endp != ':') diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index 4b2ed5858b2ee0797215279c8d6decadb91298d4..e26376ab5452e2068ce9b3a71ec903ffe444a935 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -20,6 +20,7 @@ config LD_SCRIPT_DYN bool default y depends on !LD_SCRIPT_STATIC + select MODULE_REL_CRCS if MODVERSIONS source "fs/Kconfig.binfmt" diff --git a/arch/um/Makefile b/arch/um/Makefile index 6ca4f66085c145fe07c106d1320c2541166ba8c9..b76fcce397a11c44891658ceab02cedf244d44fa 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -121,7 +121,7 @@ archheaders: archprepare: include/generated/user_constants.h LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static -LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib +LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie) CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ $(call cc-option, -fno-stack-protector,) \ diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig index 5636221b8785b0678e2b471b8de8744a3fe2ad30..8f114e3b0a7a36143f79e9af98553ab09784f0bd 100644 --- a/arch/um/configs/i386_defconfig +++ b/arch/um/configs/i386_defconfig @@ -53,7 +53,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UML_NET=y CONFIG_UML_NET_ETHERTAP=y diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig index 7a67b7ac1a7e83cfdfd396db153c3da4aa0179af..5d0875fc0db25b6f8b0c1e1c457bc54703ebfff7 100644 --- a/arch/um/configs/x86_64_defconfig +++ b/arch/um/configs/x86_64_defconfig @@ -51,7 +51,6 @@ CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_INET=y -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UML_NET=y CONFIG_UML_NET_ETHERTAP=y diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index af326fb6510dbf993b2b548537731919330b9e9b..c4d162a94be9d612d6ec834c243b4626a6b15779 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -148,12 +148,7 @@ void mconsole_proc(struct mc_request *req) } do { - loff_t pos = file->f_pos; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); - len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); - set_fs(old_fs); - file->f_pos = pos; + len = kernel_read(file, buf, PAGE_SIZE - 1, &file->f_pos); if (len < 0) { mconsole_reply(req, "Read of file failed", 1, 0); goto out_free; diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index f6d1a3f747a9b58b3f33ce0ee25c3bf889985c0b..86942a492454c296bcc5cfc7f5fdd23544a46dc9 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -58,11 +58,6 @@ static inline void release_thread(struct task_struct *task) { } -static inline void mm_copy_segments(struct mm_struct *from_mm, - struct mm_struct *new_mm) -{ -} - #define init_stack (init_thread_union.stack) /* diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 053baff036746f8360a6ff81034a6e3bcc332156..9300f7630d2ad8f0bfa6e3360bd5af5c04e98181 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -11,6 +11,7 @@ #include #include #include +#include struct thread_info { struct task_struct *task; /* main task structure */ @@ -22,6 +23,8 @@ struct thread_info { 0-0xBFFFFFFF for user 0-0xFFFFFFFF for kernel */ struct thread_info *real_thread; /* Points to non-IRQ stack */ + unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore + them out-of-band */ }; #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 574e03fc7ba256d74e074c4762ced1f77d3cc0b1..d8ddaf9790d2bd858db7f4e41f45d43d1a49d0d0 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -278,7 +278,7 @@ extern int protect(struct mm_id * mm_idp, unsigned long addr, extern int is_skas_winch(int pid, int fd, void *data); extern int start_userspace(unsigned long stub_stack); extern int copy_context_skas0(unsigned long stack, int pid); -extern void userspace(struct uml_pt_regs *regs); +extern void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs); extern int map_stub_pages(int fd, unsigned long code, unsigned long data, unsigned long stack); extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void)); diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c index 1bf61266da8e1e355ef7c1377dcff22f21385ce9..f138a4a0db990fc681bec3554b2790578bff9358 100644 --- a/arch/um/kernel/gmon_syms.c +++ b/arch/um/kernel/gmon_syms.c @@ -7,3 +7,10 @@ extern void __bb_init_func(void *) __attribute__((weak)); EXPORT_SYMBOL(__bb_init_func); + +extern void __gcov_init(void *) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_init); +extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_merge_add); +extern void __gcov_exit(void) __attribute__((weak)); +EXPORT_SYMBOL(__gcov_exit); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 2c7f721eccbcb2b45799a380cfddb9327c0d509c..691b83b10649c04e267c763f04776da70b2f6f03 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -131,7 +131,7 @@ void new_thread_handler(void) * callback returns only if the kernel thread execs a process */ n = fn(arg); - userspace(¤t->thread.regs.regs); + userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } /* Called magically, see new_thread_handler above */ @@ -150,7 +150,7 @@ void fork_handler(void) current->thread.prev_sched = NULL; - userspace(¤t->thread.regs.regs); + userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } int copy_thread(unsigned long clone_flags, unsigned long sp, diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 0b034ebbda2a1c2dd0125a6d4c2135dd886eb440..7f69d17de3540ca8491270408946a00654493cd2 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void __init timer_setup(void) +static void __init um_timer_setup(void) { int err; @@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts) void __init time_init(void) { timer_set_signal_handler(); - late_time_init = timer_setup; + late_time_init = um_timer_setup; } diff --git a/arch/um/os-Linux/drivers/tuntap_user.c b/arch/um/os-Linux/drivers/tuntap_user.c index c2e6e1dad8763b2547269fc242e12ecf72cd08ab..db24ce0d09a6443fc18e7d1a69acc7971ecd11d9 100644 --- a/arch/um/os-Linux/drivers/tuntap_user.c +++ b/arch/um/os-Linux/drivers/tuntap_user.c @@ -80,7 +80,7 @@ static int tuntap_open_tramp(char *gate, int *fd_out, int me, int remote, pid = run_helper(tuntap_pre_exec, &data, argv); if (pid < 0) - return -pid; + return pid; close(remote); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 819d68656673c53f14ee38513a73f63f19f1ac64..c94c3bd70ccd797d03a53f7f9b7b5773ff274506 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -88,12 +88,11 @@ void wait_stub_done(int pid) extern unsigned long current_stub_stack(void); -static void get_skas_faultinfo(int pid, struct faultinfo *fi) +static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs) { int err; - unsigned long fpregs[FP_SIZE]; - err = get_fp_registers(pid, fpregs); + err = get_fp_registers(pid, aux_fp_regs); if (err < 0) { printk(UM_KERN_ERR "save_fp_registers returned %d\n", err); @@ -113,7 +112,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi) */ memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); - err = put_fp_registers(pid, fpregs); + err = put_fp_registers(pid, aux_fp_regs); if (err < 0) { printk(UM_KERN_ERR "put_fp_registers returned %d\n", err); @@ -121,9 +120,9 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi) } } -static void handle_segv(int pid, struct uml_pt_regs * regs) +static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs) { - get_skas_faultinfo(pid, ®s->faultinfo); + get_skas_faultinfo(pid, ®s->faultinfo, aux_fp_regs); segv(regs->faultinfo, 0, 1, NULL); } @@ -332,7 +331,7 @@ int start_userspace(unsigned long stub_stack) return err; } -void userspace(struct uml_pt_regs *regs) +void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs) { int err, status, op, pid = userspace_pid[0]; /* To prevent races if using_sysemu changes under us.*/ @@ -407,11 +406,11 @@ void userspace(struct uml_pt_regs *regs) case SIGSEGV: if (PTRACE_FULL_FAULTINFO) { get_skas_faultinfo(pid, - ®s->faultinfo); + ®s->faultinfo, aux_fp_regs); (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si, regs); } - else handle_segv(pid, regs); + else handle_segv(pid, regs, aux_fp_regs); break; case SIGTRAP + 0x80: handle_trap(pid, regs, local_using_sysemu); diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index b1b6b75c5b173092dc986307780586b54c1d02f2..82bf5f8442ba4ec963f27a2ab2ad310682c87032 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -154,10 +154,10 @@ static int __init nosysemu_cmd_param(char *str, int* add) __uml_setup("nosysemu", nosysemu_cmd_param, "nosysemu\n" -" Turns off syscall emulation patch for ptrace (SYSEMU) on.\n" +" Turns off syscall emulation patch for ptrace (SYSEMU).\n" " SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n" -" behaviour of ptrace() and helps reducing host context switch rate.\n" -" To make it working, you need a kernel patch for your host, too.\n" +" behaviour of ptrace() and helps reduce host context switch rates.\n" +" To make it work, you need a kernel patch for your host, too.\n" " See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n" " information.\n\n"); diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 246c67006ed06ad84516b3a56ceac06e05734a1c..8c1fcb6bad21f91604f0b61357db834f560b7fdb 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -33,7 +33,7 @@ #define s3 ((16 + 2 + (3 * 256)) * 4) /* register macros */ -#define CTX %rdi +#define CTX %r12 #define RIO %rsi #define RX0 %rax @@ -56,12 +56,12 @@ #define RX2bh %ch #define RX3bh %dh -#define RT0 %rbp +#define RT0 %rdi #define RT1 %rsi #define RT2 %r8 #define RT3 %r9 -#define RT0d %ebp +#define RT0d %edi #define RT1d %esi #define RT2d %r8d #define RT3d %r9d @@ -120,13 +120,14 @@ ENTRY(__blowfish_enc_blk) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ - movq %rbp, %r11; + movq %r12, %r11; + movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; @@ -142,7 +143,7 @@ ENTRY(__blowfish_enc_blk) round_enc(14); add_roundkey_enc(16); - movq %r11, %rbp; + movq %r11, %r12; movq %r10, RIO; test %cl, %cl; @@ -157,12 +158,13 @@ ENDPROC(__blowfish_enc_blk) ENTRY(blowfish_dec_blk) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ - movq %rbp, %r11; + movq %r12, %r11; + movq %rdi, CTX; movq %rsi, %r10; movq %rdx, RIO; @@ -181,7 +183,7 @@ ENTRY(blowfish_dec_blk) movq %r10, RIO; write_block(); - movq %r11, %rbp; + movq %r11, %r12; ret; ENDPROC(blowfish_dec_blk) @@ -298,20 +300,21 @@ ENDPROC(blowfish_dec_blk) ENTRY(__blowfish_enc_blk_4way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: bool, if true: xor output */ - pushq %rbp; + pushq %r12; pushq %rbx; pushq %rcx; - preload_roundkey_enc(0); - + movq %rdi, CTX movq %rsi, %r11; movq %rdx, RIO; + preload_roundkey_enc(0); + read_block4(); round_enc4(0); @@ -324,39 +327,40 @@ ENTRY(__blowfish_enc_blk_4way) round_enc4(14); add_preloaded_roundkey4(); - popq %rbp; + popq %r12; movq %r11, RIO; - test %bpl, %bpl; + test %r12b, %r12b; jnz .L__enc_xor4; write_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; .L__enc_xor4: xor_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; ENDPROC(__blowfish_enc_blk_4way) ENTRY(blowfish_dec_blk_4way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ - pushq %rbp; + pushq %r12; pushq %rbx; - preload_roundkey_dec(17); - movq %rsi, %r11; + movq %rdi, CTX; + movq %rsi, %r11 movq %rdx, RIO; + preload_roundkey_dec(17); read_block4(); round_dec4(17); @@ -373,7 +377,7 @@ ENTRY(blowfish_dec_blk_4way) write_block4(); popq %rbx; - popq %rbp; + popq %r12; ret; ENDPROC(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 310319c601ede2884b2d031ca4603593f304e4b3..95ba6956a7f6a1fc0b01e2938f94cee0ff6a2ac3 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -75,17 +75,17 @@ #define RCD1bh %dh #define RT0 %rsi -#define RT1 %rbp +#define RT1 %r12 #define RT2 %r8 #define RT0d %esi -#define RT1d %ebp +#define RT1d %r12d #define RT2d %r8d #define RT2bl %r8b #define RXOR %r9 -#define RRBP %r10 +#define RR12 %r10 #define RDST %r11 #define RXORd %r9d @@ -197,7 +197,7 @@ ENTRY(__camellia_enc_blk) * %rdx: src * %rcx: bool xor */ - movq %rbp, RRBP; + movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; @@ -227,13 +227,13 @@ ENTRY(__camellia_enc_blk) enc_outunpack(mov, RT1); - movq RRBP, %rbp; + movq RR12, %r12; ret; .L__enc_xor: enc_outunpack(xor, RT1); - movq RRBP, %rbp; + movq RR12, %r12; ret; ENDPROC(__camellia_enc_blk) @@ -248,7 +248,7 @@ ENTRY(camellia_dec_blk) movl $24, RXORd; cmovel RXORd, RT2d; /* max */ - movq %rbp, RRBP; + movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; @@ -271,7 +271,7 @@ ENTRY(camellia_dec_blk) dec_outunpack(); - movq RRBP, %rbp; + movq RR12, %r12; ret; ENDPROC(camellia_dec_blk) @@ -433,7 +433,7 @@ ENTRY(__camellia_enc_blk_2way) */ pushq %rbx; - movq %rbp, RRBP; + movq %r12, RR12; movq %rcx, RXOR; movq %rsi, RDST; movq %rdx, RIO; @@ -461,14 +461,14 @@ ENTRY(__camellia_enc_blk_2way) enc_outunpack2(mov, RT2); - movq RRBP, %rbp; + movq RR12, %r12; popq %rbx; ret; .L__enc2_xor: enc_outunpack2(xor, RT2); - movq RRBP, %rbp; + movq RR12, %r12; popq %rbx; ret; ENDPROC(__camellia_enc_blk_2way) @@ -485,7 +485,7 @@ ENTRY(camellia_dec_blk_2way) cmovel RXORd, RT2d; /* max */ movq %rbx, RXOR; - movq %rbp, RRBP; + movq %r12, RR12; movq %rsi, RDST; movq %rdx, RIO; @@ -508,7 +508,7 @@ ENTRY(camellia_dec_blk_2way) dec_outunpack2(); - movq RRBP, %rbp; + movq RR12, %r12; movq RXOR, %rbx; ret; ENDPROC(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index b4a8806234ea1c5f61f46898900a84035af72763..86107c961bb4627f08f6c7eac252589218c6c7e3 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -47,7 +47,7 @@ /********************************************************************** 16-way AVX cast5 **********************************************************************/ -#define CTX %rdi +#define CTX %r15 #define RL1 %xmm0 #define RR1 %xmm1 @@ -70,8 +70,8 @@ #define RTMP %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %rdi +#define RID1d %edi #define RID2 %rsi #define RID2d %esi @@ -226,7 +226,7 @@ .align 16 __cast5_enc_blk16: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RL1: blocks 1 and 2 * RR1: blocks 3 and 4 * RL2: blocks 5 and 6 @@ -246,9 +246,11 @@ __cast5_enc_blk16: * RR4: encrypted blocks 15 and 16 */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -283,7 +285,7 @@ __cast5_enc_blk16: .L__skip_enc: popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; @@ -298,7 +300,7 @@ ENDPROC(__cast5_enc_blk16) .align 16 __cast5_dec_blk16: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RL1: encrypted blocks 1 and 2 * RR1: encrypted blocks 3 and 4 * RL2: encrypted blocks 5 and 6 @@ -318,9 +320,11 @@ __cast5_dec_blk16: * RR4: decrypted blocks 15 and 16 */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -356,7 +360,7 @@ __cast5_dec_blk16: vmovdqa .Lbswap_mask, RKM; popq %rbx; - popq %rbp; + popq %r15; outunpack_blocks(RR1, RL1, RTMP, RX, RKM); outunpack_blocks(RR2, RL2, RTMP, RX, RKM); @@ -372,12 +376,14 @@ ENDPROC(__cast5_dec_blk16) ENTRY(cast5_ecb_enc_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; @@ -400,18 +406,22 @@ ENTRY(cast5_ecb_enc_16way) vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); + popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_enc_16way) ENTRY(cast5_ecb_dec_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + + movq %rdi, CTX; movq %rsi, %r11; vmovdqu (0*4*4)(%rdx), RL1; @@ -434,20 +444,22 @@ ENTRY(cast5_ecb_dec_16way) vmovdqu RR4, (6*4*4)(%r11); vmovdqu RL4, (7*4*4)(%r11); + popq %r15; FRAME_END ret; ENDPROC(cast5_ecb_dec_16way) ENTRY(cast5_cbc_dec_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -483,23 +495,24 @@ ENTRY(cast5_cbc_dec_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast5_cbc_dec_16way) ENTRY(cast5_ctr_16way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src * %rcx: iv (big endian, 64bit) */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -558,8 +571,8 @@ ENTRY(cast5_ctr_16way) vmovdqu RR4, (6*16)(%r11); vmovdqu RL4, (7*16)(%r11); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 952d3156a93312ff8c5a5c2c193132a89f7413da..7f30b6f0d72c15f402a69d621ecde1c283780266 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -47,7 +47,7 @@ /********************************************************************** 8-way AVX cast6 **********************************************************************/ -#define CTX %rdi +#define CTX %r15 #define RA1 %xmm0 #define RB1 %xmm1 @@ -70,8 +70,8 @@ #define RTMP %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %rdi +#define RID1d %edi #define RID2 %rsi #define RID2d %esi @@ -264,15 +264,17 @@ .align 8 __cast6_enc_blk8: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -297,7 +299,7 @@ __cast6_enc_blk8: QBAR(11); popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; @@ -310,15 +312,17 @@ ENDPROC(__cast6_enc_blk8) .align 8 __cast6_dec_blk8: /* input: - * %rdi: ctx, CTX + * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks * output: * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks */ - pushq %rbp; + pushq %r15; pushq %rbx; + movq %rdi, CTX; + vmovdqa .Lbswap_mask, RKM; vmovd .Lfirst_mask, R1ST; vmovd .L32_mask, R32; @@ -343,7 +347,7 @@ __cast6_dec_blk8: QBAR(0); popq %rbx; - popq %rbp; + popq %r15; vmovdqa .Lbswap_mask, RKM; outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); @@ -354,12 +358,14 @@ ENDPROC(__cast6_dec_blk8) ENTRY(cast6_ecb_enc_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); @@ -368,18 +374,21 @@ ENTRY(cast6_ecb_enc_8way) store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_enc_8way) ENTRY(cast6_ecb_dec_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); @@ -388,20 +397,22 @@ ENTRY(cast6_ecb_dec_8way) store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_ecb_dec_8way) ENTRY(cast6_cbc_dec_8way) /* input: - * %rdi: ctx, CTX + * %rdi: ctx * %rsi: dst * %rdx: src */ FRAME_BEGIN - pushq %r12; + pushq %r15; + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -411,8 +422,8 @@ ENTRY(cast6_cbc_dec_8way) store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast6_cbc_dec_8way) @@ -425,9 +436,10 @@ ENTRY(cast6_ctr_8way) * %rcx: iv (little endian, 128bit) */ FRAME_BEGIN - pushq %r12; + pushq %r15 + movq %rdi, CTX; movq %rsi, %r11; movq %rdx, %r12; @@ -438,8 +450,8 @@ ENTRY(cast6_ctr_8way) store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; popq %r12; - FRAME_END ret; ENDPROC(cast6_ctr_8way) @@ -452,7 +464,9 @@ ENTRY(cast6_xts_enc_8way) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ @@ -464,6 +478,7 @@ ENTRY(cast6_xts_enc_8way) /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_xts_enc_8way) @@ -476,7 +491,9 @@ ENTRY(cast6_xts_dec_8way) * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) */ FRAME_BEGIN + pushq %r15; + movq %rdi, CTX movq %rsi, %r11; /* regs <= src, dst <= IVs, regs <= regs xor IVs */ @@ -488,6 +505,7 @@ ENTRY(cast6_xts_dec_8way) /* dst <= regs xor IVs(in dst) */ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + popq %r15; FRAME_END ret; ENDPROC(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index f3e91647ca274a4b20d4b18f0d7232a3c8ca4567..8e49ce1174947ea612fca65a6d143cdc2386534e 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -64,12 +64,12 @@ #define RW2bh %ch #define RT0 %r15 -#define RT1 %rbp +#define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d -#define RT1d %ebp +#define RT1d %esi #define RT2d %r14d #define RT3d %edx @@ -177,13 +177,14 @@ ENTRY(des3_ede_x86_64_crypt_blk) * %rsi: dst * %rdx: src */ - pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; + pushq %rsi; /* dst */ + read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); @@ -241,6 +242,8 @@ ENTRY(des3_ede_x86_64_crypt_blk) round1(32+15, RL0, RR0, dummy2); final_permutation(RR0, RL0); + + popq %rsi /* dst */ write_block(%rsi, RR0, RL0); popq %r15; @@ -248,7 +251,6 @@ ENTRY(des3_ede_x86_64_crypt_blk) popq %r13; popq %r12; popq %rbx; - popq %rbp; ret; ENDPROC(des3_ede_x86_64_crypt_blk) @@ -432,13 +434,14 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) * %rdx: src (3 blocks) */ - pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; + pushq %rsi /* dst */ + /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; @@ -520,6 +523,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) bswapl RR2d; bswapl RL2d; + popq %rsi /* dst */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); @@ -532,7 +536,6 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) popq %r13; popq %r12; popq %rbx; - popq %rbp; ret; ENDPROC(des3_ede_x86_64_crypt_blk_3way) diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1eab79c9ac484172a63d9cc0c4a409b7fffe7e8b..9f712a7dfd797cc499e1ef52ba7999078530494a 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -89,7 +89,7 @@ #define REG_RE %rdx #define REG_RTA %r12 #define REG_RTB %rbx -#define REG_T1 %ebp +#define REG_T1 %r11d #define xmm_mov vmovups #define avx2_zeroupper vzeroupper #define RND_F1 1 @@ -637,7 +637,6 @@ _loop3: ENTRY(\name) push %rbx - push %rbp push %r12 push %r13 push %r14 @@ -673,7 +672,6 @@ _loop3: pop %r14 pop %r13 pop %r12 - pop %rbp pop %rbx ret diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index a4109506a5e8884ba54562aed09bc36e5e800d12..6204bd53528c65c0d4a70f05e76a60bcc476499e 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -37,7 +37,7 @@ #define REG_A %ecx #define REG_B %esi #define REG_C %edi -#define REG_D %ebp +#define REG_D %r12d #define REG_E %edx #define REG_T1 %eax @@ -74,10 +74,10 @@ ENTRY(\name) push %rbx - push %rbp push %r12 + push %rbp + mov %rsp, %rbp - mov %rsp, %r12 sub $64, %rsp # allocate workspace and $~15, %rsp # align stack @@ -99,10 +99,9 @@ xor %rax, %rax rep stosq - mov %r12, %rsp # deallocate workspace - - pop %r12 + mov %rbp, %rsp # deallocate workspace pop %rbp + pop %r12 pop %rbx ret diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index e08888a1a5f2cf5f8a6809d86d497664cece52a1..001bbcf93c79ba08628abcf30dd49ad77158420d 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -103,7 +103,7 @@ SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx -TBL = %rbp +TBL = %r12 a = %eax b = %ebx @@ -350,13 +350,13 @@ a = TMP_ ENTRY(sha256_transform_avx) .align 32 pushq %rbx - pushq %rbp + pushq %r12 pushq %r13 pushq %r14 pushq %r15 - pushq %r12 + pushq %rbp + movq %rsp, %rbp - mov %rsp, %r12 subq $STACK_SIZE, %rsp # allocate stack space and $~15, %rsp # align stack pointer @@ -452,13 +452,12 @@ loop2: done_hash: - mov %r12, %rsp - - popq %r12 + mov %rbp, %rsp + popq %rbp popq %r15 popq %r14 popq %r13 - popq %rbp + popq %r12 popq %rbx ret ENDPROC(sha256_transform_avx) diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 89c8f09787d20552c12230ded345282bb2f2bf42..1420db15dcddc8505b57faad5e6e6701f972b517 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -98,8 +98,6 @@ d = %r8d e = %edx # clobbers NUM_BLKS y3 = %esi # clobbers INP - -TBL = %rbp SRND = CTX # SRND is same register as CTX a = %eax @@ -531,7 +529,6 @@ STACK_SIZE = _RSP + _RSP_SIZE ENTRY(sha256_transform_rorx) .align 32 pushq %rbx - pushq %rbp pushq %r12 pushq %r13 pushq %r14 @@ -568,8 +565,6 @@ ENTRY(sha256_transform_rorx) mov CTX, _CTX(%rsp) loop0: - lea K256(%rip), TBL - ## Load first 16 dwords from two blocks VMOVDQ 0*32(INP),XTMP0 VMOVDQ 1*32(INP),XTMP1 @@ -597,19 +592,19 @@ last_block_enter: .align 16 loop1: - vpaddd 0*32(TBL, SRND), X0, XFER + vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 0*32 - vpaddd 1*32(TBL, SRND), X0, XFER + vpaddd K256+1*32(SRND), X0, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 1*32 - vpaddd 2*32(TBL, SRND), X0, XFER + vpaddd K256+2*32(SRND), X0, XFER vmovdqa XFER, 2*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 2*32 - vpaddd 3*32(TBL, SRND), X0, XFER + vpaddd K256+3*32(SRND), X0, XFER vmovdqa XFER, 3*32+_XFER(%rsp, SRND) FOUR_ROUNDS_AND_SCHED _XFER + 3*32 @@ -619,10 +614,11 @@ loop1: loop2: ## Do last 16 rounds with no scheduling - vpaddd 0*32(TBL, SRND), X0, XFER + vpaddd K256+0*32(SRND), X0, XFER vmovdqa XFER, 0*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 0*32 - vpaddd 1*32(TBL, SRND), X1, XFER + + vpaddd K256+1*32(SRND), X1, XFER vmovdqa XFER, 1*32+_XFER(%rsp, SRND) DO_4ROUNDS _XFER + 1*32 add $2*32, SRND @@ -676,9 +672,6 @@ loop3: ja done_hash do_last_block: - #### do last block - lea K256(%rip), TBL - VMOVDQ 0*16(INP),XWORD0 VMOVDQ 1*16(INP),XWORD1 VMOVDQ 2*16(INP),XWORD2 @@ -718,7 +711,6 @@ done_hash: popq %r14 popq %r13 popq %r12 - popq %rbp popq %rbx ret ENDPROC(sha256_transform_rorx) diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index 39b83c93e7fd6cdbc658a35ef4698756cccef612..c6c05ed2c16a593390ddf7617070f45e58a40ad4 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -95,7 +95,7 @@ SRND = %rsi # clobbers INP c = %ecx d = %r8d e = %edx -TBL = %rbp +TBL = %r12 a = %eax b = %ebx @@ -356,13 +356,13 @@ a = TMP_ ENTRY(sha256_transform_ssse3) .align 32 pushq %rbx - pushq %rbp + pushq %r12 pushq %r13 pushq %r14 pushq %r15 - pushq %r12 + pushq %rbp + mov %rsp, %rbp - mov %rsp, %r12 subq $STACK_SIZE, %rsp and $~15, %rsp @@ -462,13 +462,12 @@ loop2: done_hash: - mov %r12, %rsp - - popq %r12 + mov %rbp, %rsp + popq %rbp popq %r15 popq %r14 popq %r13 - popq %rbp + popq %r12 popq %rbx ret diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 7f5f6c6ec72e9cb0a47c2b5babbbedff487fbbf3..b16d560051629a1c6d518321b93be2672b861c9e 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -69,8 +69,9 @@ XFER = YTMP0 BYTE_FLIP_MASK = %ymm9 -# 1st arg -CTX = %rdi +# 1st arg is %rdi, which is saved to the stack and accessed later via %r12 +CTX1 = %rdi +CTX2 = %r12 # 2nd arg INP = %rsi # 3rd arg @@ -81,7 +82,7 @@ d = %r8 e = %rdx y3 = %rsi -TBL = %rbp +TBL = %rdi # clobbers CTX1 a = %rax b = %rbx @@ -91,26 +92,26 @@ g = %r10 h = %r11 old_h = %r11 -T1 = %r12 +T1 = %r12 # clobbers CTX2 y0 = %r13 y1 = %r14 y2 = %r15 -y4 = %r12 - # Local variables (stack frame) XFER_SIZE = 4*8 SRND_SIZE = 1*8 INP_SIZE = 1*8 INPEND_SIZE = 1*8 +CTX_SIZE = 1*8 RSPSAVE_SIZE = 1*8 -GPRSAVE_SIZE = 6*8 +GPRSAVE_SIZE = 5*8 frame_XFER = 0 frame_SRND = frame_XFER + XFER_SIZE frame_INP = frame_SRND + SRND_SIZE frame_INPEND = frame_INP + INP_SIZE -frame_RSPSAVE = frame_INPEND + INPEND_SIZE +frame_CTX = frame_INPEND + INPEND_SIZE +frame_RSPSAVE = frame_CTX + CTX_SIZE frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE frame_size = frame_GPRSAVE + GPRSAVE_SIZE @@ -576,12 +577,11 @@ ENTRY(sha512_transform_rorx) mov %rax, frame_RSPSAVE(%rsp) # Save GPRs - mov %rbp, frame_GPRSAVE(%rsp) - mov %rbx, 8*1+frame_GPRSAVE(%rsp) - mov %r12, 8*2+frame_GPRSAVE(%rsp) - mov %r13, 8*3+frame_GPRSAVE(%rsp) - mov %r14, 8*4+frame_GPRSAVE(%rsp) - mov %r15, 8*5+frame_GPRSAVE(%rsp) + mov %rbx, 8*0+frame_GPRSAVE(%rsp) + mov %r12, 8*1+frame_GPRSAVE(%rsp) + mov %r13, 8*2+frame_GPRSAVE(%rsp) + mov %r14, 8*3+frame_GPRSAVE(%rsp) + mov %r15, 8*4+frame_GPRSAVE(%rsp) shl $7, NUM_BLKS # convert to bytes jz done_hash @@ -589,14 +589,17 @@ ENTRY(sha512_transform_rorx) mov NUM_BLKS, frame_INPEND(%rsp) ## load initial digest - mov 8*0(CTX),a - mov 8*1(CTX),b - mov 8*2(CTX),c - mov 8*3(CTX),d - mov 8*4(CTX),e - mov 8*5(CTX),f - mov 8*6(CTX),g - mov 8*7(CTX),h + mov 8*0(CTX1), a + mov 8*1(CTX1), b + mov 8*2(CTX1), c + mov 8*3(CTX1), d + mov 8*4(CTX1), e + mov 8*5(CTX1), f + mov 8*6(CTX1), g + mov 8*7(CTX1), h + + # save %rdi (CTX) before it gets clobbered + mov %rdi, frame_CTX(%rsp) vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK @@ -652,14 +655,15 @@ loop2: subq $1, frame_SRND(%rsp) jne loop2 - addm 8*0(CTX),a - addm 8*1(CTX),b - addm 8*2(CTX),c - addm 8*3(CTX),d - addm 8*4(CTX),e - addm 8*5(CTX),f - addm 8*6(CTX),g - addm 8*7(CTX),h + mov frame_CTX(%rsp), CTX2 + addm 8*0(CTX2), a + addm 8*1(CTX2), b + addm 8*2(CTX2), c + addm 8*3(CTX2), d + addm 8*4(CTX2), e + addm 8*5(CTX2), f + addm 8*6(CTX2), g + addm 8*7(CTX2), h mov frame_INP(%rsp), INP add $128, INP @@ -669,12 +673,11 @@ loop2: done_hash: # Restore GPRs - mov frame_GPRSAVE(%rsp) ,%rbp - mov 8*1+frame_GPRSAVE(%rsp) ,%rbx - mov 8*2+frame_GPRSAVE(%rsp) ,%r12 - mov 8*3+frame_GPRSAVE(%rsp) ,%r13 - mov 8*4+frame_GPRSAVE(%rsp) ,%r14 - mov 8*5+frame_GPRSAVE(%rsp) ,%r15 + mov 8*0+frame_GPRSAVE(%rsp), %rbx + mov 8*1+frame_GPRSAVE(%rsp), %r12 + mov 8*2+frame_GPRSAVE(%rsp), %r13 + mov 8*3+frame_GPRSAVE(%rsp), %r14 + mov 8*4+frame_GPRSAVE(%rsp), %r15 # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index b3f49d2863480334e8b2d12828e4ca20045b8be9..73b471da36226386112a23c0036b7f5da14b3266 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -76,8 +76,8 @@ #define RT %xmm14 #define RR %xmm15 -#define RID1 %rbp -#define RID1d %ebp +#define RID1 %r13 +#define RID1d %r13d #define RID2 %rsi #define RID2d %esi @@ -259,7 +259,7 @@ __twofish_enc_blk8: vmovdqu w(CTX), RK1; - pushq %rbp; + pushq %r13; pushq %rbx; pushq %rcx; @@ -282,7 +282,7 @@ __twofish_enc_blk8: popq %rcx; popq %rbx; - popq %rbp; + popq %r13; outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); @@ -301,7 +301,7 @@ __twofish_dec_blk8: vmovdqu (w+4*4)(CTX), RK1; - pushq %rbp; + pushq %r13; pushq %rbx; inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); @@ -322,7 +322,7 @@ __twofish_dec_blk8: vmovdqu (w)(CTX), RK1; popq %rbx; - popq %rbp; + popq %r13; outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4cf100ff2a3746f440049dd5c27c254e4dcc1776..72db0664a53dfd6fe64512ebda83caaa97db1abd 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), @@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 8e2457cb6b4a416e1c84d9baa990ea512ec74ba0..005908ee9333f0e87cdd4db7ca14a278d52fa773 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init), {}, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index db1fe377e6dd9ddfcfbc006a7346879f263d408a..a7196818416a57381b513254628206a9660ed578 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { static struct intel_uncore_type skx_uncore_iio = { .name = "iio", .num_counters = 4, - .num_boxes = 5, + .num_boxes = 6, .perf_ctr_bits = 48, .event_ctl = SKX_IIO0_MSR_PMON_CTL0, .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, @@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = { static struct intel_uncore_type skx_uncore_irp = { .name = "irp", .num_counters = 2, - .num_boxes = 5, + .num_boxes = 6, .perf_ctr_bits = 48, .event_ctl = SKX_IRP0_MSR_PMON_CTL0, .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 4bb3ec69e8ea10537c25ef2d792be94f1d7a4127..06723671ae4e91d53b7327b44fe96b588826d838 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -63,6 +63,14 @@ static bool test_intel(int idx) case INTEL_FAM6_ATOM_SILVERMONT1: case INTEL_FAM6_ATOM_SILVERMONT2: case INTEL_FAM6_ATOM_AIRMONT: + + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_ATOM_DENVERTON: + + case INTEL_FAM6_ATOM_GEMINI_LAKE: + + case INTEL_FAM6_XEON_PHI_KNL: + case INTEL_FAM6_XEON_PHI_KNM: if (idx == PERF_MSR_SMI) return true; break; diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 8d0879f1d42cad890057408160e253ca026503b9..8e02b30cf08e16a2ca5b3d0b6aa97bda051a2c85 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -407,10 +407,10 @@ static int load_aout_library(struct file *file) unsigned long bss, start_addr, len, error; int retval; struct exec ex; - + loff_t pos = 0; retval = -ENOEXEC; - error = kernel_read(file, 0, (char *) &ex, sizeof(ex)); + error = kernel_read(file, &ex, sizeof(ex), &pos); if (error != sizeof(ex)) goto out; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index e0bb46c0285752e73c8eb55420e29f97d88a818e..0e2a5edbce00111f6a41f1b3070610b5316d1e71 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ksig->ka.sa.sa_restorer) sp = (unsigned long) ksig->ka.sa.sa_restorer; - if (fpu->fpstate_active) { + if (fpu->initialized) { unsigned long fx_aligned, math_size; sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1b020381ab38965ae2a80d2d86df32c66c92e2e8..c096624137ae831fa4d0f9cebf528453598fdefb 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ output, input...) \ { \ - register void *__sp asm(_ASM_SP); \ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ "call %P[new2]", feature2) \ - : output, "+r" (__sp) \ + : output, ASM_CALL_CONSTRAINT \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ [new2] "i" (newfunc2), ## input); \ } diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 676ee5807d864d94538bf16d2ed7535655045128..b0dc91f4bedc680ac69d7ecc481a71c863bade77 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -11,10 +11,12 @@ # define __ASM_FORM_COMMA(x) " " #x "," #endif -#ifdef CONFIG_X86_32 +#ifndef __x86_64__ +/* 32 bit */ # define __ASM_SEL(a,b) __ASM_FORM(a) # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) #else +/* 64 bit */ # define __ASM_SEL(a,b) __ASM_FORM(b) # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) #endif @@ -132,4 +134,15 @@ /* For C file, we already have NOKPROBE_SYMBOL macro */ #endif +#ifndef __ASSEMBLY__ +/* + * This output constraint should be used for any inline asm which has a "call" + * instruction. Otherwise the asm may be inserted before the frame pointer + * gets set up by the containing function. If you forget to do this, objtool + * may print a "call without frame pointer save/setup" warning. + */ +register unsigned long current_stack_pointer asm(_ASM_SP); +#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) +#endif + #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 554cdb205d17586887e6b599c991b2fc090ba38a..e3221ffa304e301858db746a03d6cf8600790004 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -23,11 +23,9 @@ /* * High level FPU state handling functions: */ -extern void fpu__activate_curr(struct fpu *fpu); -extern void fpu__activate_fpstate_read(struct fpu *fpu); -extern void fpu__activate_fpstate_write(struct fpu *fpu); -extern void fpu__current_fpstate_write_begin(void); -extern void fpu__current_fpstate_write_end(void); +extern void fpu__initialize(struct fpu *fpu); +extern void fpu__prepare_read(struct fpu *fpu); +extern void fpu__prepare_write(struct fpu *fpu); extern void fpu__save(struct fpu *fpu); extern void fpu__restore(struct fpu *fpu); extern int fpu__restore_sig(void __user *buf, int ia32_frame); @@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); err; \ }) -#define check_insn(insn, output, input...) \ -({ \ - int err; \ +#define kernel_insn(insn, output, input...) \ asm volatile("1:" #insn "\n\t" \ "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl $-1,%[err]\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : [err] "=r" (err), output \ - : "0"(0), input); \ - err; \ -}) + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ + : output : input) static inline int copy_fregs_to_user(struct fregs_state __user *fx) { @@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { - int err; - if (IS_ENABLED(CONFIG_X86_32)) { - err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); + kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { - err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); + kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { /* See comment in copy_fxregs_to_kernel() below. */ - err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); + kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); } } - /* Copying from a kernel buffer to FPU registers should never fail: */ - WARN_ON_FPU(err); } static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) @@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) static inline void copy_kernel_to_fregs(struct fregs_state *fx) { - int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); - - WARN_ON_FPU(err); + kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } static inline int copy_user_to_fregs(struct fregs_state __user *fx) @@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact * XSAVE area format. */ -#define XSTATE_XRESTORE(st, lmask, hmask, err) \ +#define XSTATE_XRESTORE(st, lmask, hmask) \ asm volatile(ALTERNATIVE(XRSTOR, \ XRSTORS, X86_FEATURE_XSAVES) \ "\n" \ - "xor %[err], %[err]\n" \ "3:\n" \ - ".pushsection .fixup,\"ax\"\n" \ - "4: movl $-2, %[err]\n" \ - "jmp 3b\n" \ - ".popsection\n" \ - _ASM_EXTABLE(661b, 4b) \ - : [err] "=r" (err) \ + _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ + : \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") @@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); - /* We should never fault when copying from a kernel buffer: */ + /* + * We should never fault when copying from a kernel buffer, and the FPU + * state we set at boot time should be valid. + */ WARN_ON_FPU(err); } @@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) u32 hmask = mask >> 32; int err; - WARN_ON(!alternatives_patched); + WARN_ON_FPU(!alternatives_patched); XSTATE_XSAVE(xstate, lmask, hmask, err); @@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; - int err; - - XSTATE_XRESTORE(xstate, lmask, hmask, err); - /* We should never fault when copying from a kernel buffer: */ - WARN_ON_FPU(err); + XSTATE_XRESTORE(xstate, lmask, hmask); } /* @@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) */ static inline void fpregs_deactivate(struct fpu *fpu) { - WARN_ON_FPU(!fpu->fpregs_active); - - fpu->fpregs_active = 0; this_cpu_write(fpu_fpregs_owner_ctx, NULL); trace_x86_fpu_regs_deactivated(fpu); } static inline void fpregs_activate(struct fpu *fpu) { - WARN_ON_FPU(fpu->fpregs_active); - - fpu->fpregs_active = 1; this_cpu_write(fpu_fpregs_owner_ctx, fpu); trace_x86_fpu_regs_activated(fpu); } -/* - * The question "does this thread have fpu access?" - * is slightly racy, since preemption could come in - * and revoke it immediately after the test. - * - * However, even in that very unlikely scenario, - * we can just assume we have FPU access - typically - * to save the FP state - we'll just take a #NM - * fault and get the FPU access back. - */ -static inline int fpregs_active(void) -{ - return current->thread.fpu.fpregs_active; -} - /* * FPU state switching for scheduling. * @@ -571,14 +527,13 @@ static inline int fpregs_active(void) static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { - if (old_fpu->fpregs_active) { + if (old_fpu->initialized) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else old_fpu->last_cpu = cpu; /* But leave fpu_fpregs_owner_ctx! */ - old_fpu->fpregs_active = 0; trace_x86_fpu_regs_deactivated(old_fpu); } else old_fpu->last_cpu = -1; @@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) { bool preload = static_cpu_has(X86_FEATURE_FPU) && - new_fpu->fpstate_active; + new_fpu->initialized; if (preload) { if (!fpregs_state_valid(new_fpu, cpu)) @@ -617,8 +572,7 @@ static inline void user_fpu_begin(void) struct fpu *fpu = ¤t->thread.fpu; preempt_disable(); - if (!fpregs_active()) - fpregs_activate(fpu); + fpregs_activate(fpu); preempt_enable(); } diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3c80f5b9c09d8cf5407378a39ba27cfa380bafbf..a1520575d86b81fd2521d7e4013856b7e70d4db2 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -68,6 +68,9 @@ struct fxregs_state { /* Default value for fxregs_state.mxcsr: */ #define MXCSR_DEFAULT 0x1f80 +/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */ +#define MXCSR_AND_FLAGS_SIZE sizeof(u64) + /* * Software based FPU emulation state. This is arbitrary really, * it matches the x87 format to make it easier to understand: @@ -290,36 +293,13 @@ struct fpu { unsigned int last_cpu; /* - * @fpstate_active: + * @initialized: * - * This flag indicates whether this context is active: if the task + * This flag indicates whether this context is initialized: if the task * is not running then we can restore from this context, if the task * is running then we should save into this context. */ - unsigned char fpstate_active; - - /* - * @fpregs_active: - * - * This flag determines whether a given context is actively - * loaded into the FPU's registers and that those registers - * represent the task's current FPU state. - * - * Note the interaction with fpstate_active: - * - * # task does not use the FPU: - * fpstate_active == 0 - * - * # task uses the FPU and regs are active: - * fpstate_active == 1 && fpregs_active == 1 - * - * # the regs are inactive but still match fpstate: - * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu - * - * The third state is what we use for the lazy restore optimization - * on lazy-switching CPUs. - */ - unsigned char fpregs_active; + unsigned char initialized; /* * @state: diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 1b2799e0699a4d47a3ef191f6177f0a4abc13ceb..83fee2469eb76079771c668545db95e929d1446c 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void); void *get_xsave_addr(struct xregs_state *xsave, int xstate); const void *get_xsave_field_ptr(int xstate_field); int using_compacted_format(void); -int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, - void __user *ubuf, struct xregs_state *xsave); -int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, - struct xregs_state *xsave); +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); +int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); + +/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ +extern int validate_xstate_header(const struct xstate_header *hdr); + #endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8844eee290b268b11a8404b1d3305551c6b98203..c73e493adf0748108c31389ca62437267ea877e2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -951,7 +951,6 @@ struct kvm_x86_ops { void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); - u32 (*get_pkru)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); @@ -973,7 +972,7 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); - bool (*get_enable_apicv)(void); + bool (*get_enable_apicv)(struct kvm_vcpu *vcpu); void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 7ae318c340d9b5d0fca2697dbb7e977b0bda3892..c120b5db178aa415e1bbb97957dd7d73687575d3 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, return __pkru_allows_pkey(vma_pkey(vma), write); } +/* + * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID + * bits. This serves two purposes. It prevents a nasty situation in + * which PCID-unaware code saves CR3, loads some other value (with PCID + * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if + * the saved ASID was nonzero. It also means that any bugs involving + * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger + * deterministically. + */ + +static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) +{ + if (static_cpu_has(X86_FEATURE_PCID)) { + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(mm->pgd) | (asid + 1); + } else { + VM_WARN_ON_ONCE(asid != 0); + return __sme_pa(mm->pgd); + } +} + +static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) +{ + VM_WARN_ON_ONCE(asid > 4094); + return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; +} /* * This can be used from process context to figure out what the value of @@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); - - if (static_cpu_has(X86_FEATURE_PCID)) - cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ VM_WARN_ON(in_nmi() || preemptible()); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 63cc96f064dc4987813352eb8ceba869d3738856..738503e1f80c14f1b5720275dd57161509927ef8 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) u64 input_address = input ? virt_to_phys(input) : 0; u64 output_address = output ? virt_to_phys(output) : 0; u64 hv_status; - register void *__sp asm(_ASM_SP); #ifdef CONFIG_X86_64 if (!hv_hypercall_pg) @@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) __asm__ __volatile__("mov %4, %%r8\n" "call *%5" - : "=a" (hv_status), "+r" (__sp), + : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input_address) : "r" (output_address), "m" (hv_hypercall_pg) : "cc", "memory", "r8", "r9", "r10", "r11"); @@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) __asm__ __volatile__("call *%7" : "=A" (hv_status), - "+c" (input_address_lo), "+r" (__sp) + "+c" (input_address_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input_address_hi), "D"(output_address_hi), "S"(output_address_lo), @@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) { u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; - register void *__sp asm(_ASM_SP); #ifdef CONFIG_X86_64 { __asm__ __volatile__("call *%4" - : "=a" (hv_status), "+r" (__sp), + : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input1) : "m" (hv_hypercall_pg) : "cc", "r8", "r9", "r10", "r11"); @@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) __asm__ __volatile__ ("call *%5" : "=A"(hv_status), "+c"(input1_lo), - "+r"(__sp) + ASM_CALL_CONSTRAINT : "A" (control), "b" (input1_hi), "m" (hv_hypercall_pg) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 42873edd9f9d20cda2d1cfcb92b3798c4a75b3aa..280d94c36dada184fce42cd03d5d05fe7ebc8698 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -459,8 +459,8 @@ int paravirt_disable_iospace(void); */ #ifdef CONFIG_X86_32 #define PVOP_VCALL_ARGS \ - unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ - register void *__sp asm("esp") + unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; + #define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) @@ -480,8 +480,8 @@ int paravirt_disable_iospace(void); /* [re]ax isn't an arg, but the return val */ #define PVOP_VCALL_ARGS \ unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx, __eax = __eax; \ - register void *__sp asm("rsp") + __edx = __edx, __ecx = __ecx, __eax = __eax; + #define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) @@ -532,7 +532,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ @@ -542,7 +542,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ @@ -569,7 +569,7 @@ int paravirt_disable_iospace(void); asm volatile(pre \ paravirt_alt(PARAVIRT_CALL) \ post \ - : call_clbr, "+r" (__sp) \ + : call_clbr, ASM_CALL_CONSTRAINT \ : paravirt_type(op), \ paravirt_clobber(clbr), \ ##__VA_ARGS__ \ diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index ec1f3c6511506ee1f0ff5240a9ff95d0e6fa68c1..4f44505dbf870e79d4689af9db2f537fe764b831 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -100,19 +100,14 @@ static __always_inline bool should_resched(int preempt_offset) #ifdef CONFIG_PREEMPT extern asmlinkage void ___preempt_schedule(void); -# define __preempt_schedule() \ -({ \ - register void *__sp asm(_ASM_SP); \ - asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \ -}) +# define __preempt_schedule() \ + asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) extern asmlinkage void preempt_schedule(void); extern asmlinkage void ___preempt_schedule_notrace(void); -# define __preempt_schedule_notrace() \ -({ \ - register void *__sp asm(_ASM_SP); \ - asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \ -}) +# define __preempt_schedule_notrace() \ + asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) + extern asmlinkage void preempt_schedule_notrace(void); #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3fa26a61eabcef0dc95b19221ca5e76a6ba2a01a..b390ff76e58fd9f28c62ba5e3a8ab169ac18ad62 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -677,8 +677,6 @@ static inline void sync_core(void) * Like all of Linux's memory ordering operations, this is a * compiler barrier as well. */ - register void *__sp asm(_ASM_SP); - #ifdef CONFIG_X86_32 asm volatile ( "pushfl\n\t" @@ -686,7 +684,7 @@ static inline void sync_core(void) "pushl $1f\n\t" "iret\n\t" "1:" - : "+r" (__sp) : : "memory"); + : ASM_CALL_CONSTRAINT : : "memory"); #else unsigned int tmp; @@ -703,7 +701,7 @@ static inline void sync_core(void) "iretq\n\t" UNWIND_HINT_RESTORE "1:" - : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); + : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); #endif } diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index a34e0d4b957d639afb5978863e57fefb74a173f2..7116b7931c7b807766fef6d820ec929da43940c7 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h @@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) ({ \ long tmp; \ struct rw_semaphore* ret; \ - register void *__sp asm(_ASM_SP); \ \ asm volatile("# beginning down_write\n\t" \ LOCK_PREFIX " xadd %1,(%4)\n\t" \ @@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) " call " slow_path "\n" \ "1:\n" \ "# ending down_write" \ - : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ + : "+m" (sem->count), "=d" (tmp), \ + "=a" (ret), ASM_CALL_CONSTRAINT \ : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ : "memory", "cc"); \ ret; \ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 5161da1a0fa0a522c80b93f758a84914a88fad46..89e7eeb5cec1dfa6804d5bba2dfe010e6ad8f455 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -158,17 +158,6 @@ struct thread_info { */ #ifndef __ASSEMBLY__ -static inline unsigned long current_stack_pointer(void) -{ - unsigned long sp; -#ifdef CONFIG_X86_64 - asm("mov %%rsp,%0" : "=g" (sp)); -#else - asm("mov %%esp,%0" : "=g" (sp)); -#endif - return sp; -} - /* * Walks up the stack frames to make sure that the specified object is * entirely contained by a single stack frame. diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 342e59789fcdc2e62ce1fd94df32f99f12359bd0..39f7a27bef130fbb6c83a881951e440091ed9168 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu, TP_STRUCT__entry( __field(struct fpu *, fpu) - __field(bool, fpregs_active) - __field(bool, fpstate_active) + __field(bool, initialized) __field(u64, xfeatures) __field(u64, xcomp_bv) ), TP_fast_assign( __entry->fpu = fpu; - __entry->fpregs_active = fpu->fpregs_active; - __entry->fpstate_active = fpu->fpstate_active; + __entry->initialized = fpu->initialized; if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { __entry->xfeatures = fpu->state.xsave.header.xfeatures; __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; } ), - TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", + TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx", __entry->fpu, - __entry->fpregs_active, - __entry->fpstate_active, + __entry->initialized, __entry->xfeatures, __entry->xcomp_bv ) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 184eb9894dae3f2cca10bfe1813a348614830305..4b892917edeb787ce6380fd78a0d1c82f2299982 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) ({ \ int __ret_gu; \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ - register void *__sp asm(_ASM_SP); \ __chk_user_ptr(ptr); \ might_fault(); \ asm volatile("call __get_user_%P4" \ - : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ + : "=a" (__ret_gu), "=r" (__val_gu), \ + ASM_CALL_CONSTRAINT \ : "0" (ptr), "i" (sizeof(*(ptr)))); \ (x) = (__force __typeof__(*(ptr))) __val_gu; \ __builtin_expect(__ret_gu, 0); \ @@ -337,7 +337,7 @@ do { \ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (retval), "=&A"(x) \ - : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ + : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ "i" (errret), "0" (retval)); \ }) diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 9606688caa4bea8db0175fe71fe61d9706108aba..7cb282e9e58777aeb2ffd86b344d39a5189c5f84 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[]; register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ - register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \ - register void *__sp asm(_ASM_SP); + register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; -#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp) +#define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) @@ -552,13 +551,13 @@ static inline void MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, struct desc_struct desc) { - u32 *p = (u32 *) &desc; - mcl->op = __HYPERVISOR_update_descriptor; if (sizeof(maddr) == sizeof(long)) { mcl->args[0] = maddr; mcl->args[1] = *(unsigned long *)&desc; } else { + u32 *p = (u32 *)&desc; + mcl->args[0] = maddr; mcl->args[1] = maddr >> 32; mcl->args[2] = *p++; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index f8ae286c15026908b60b49ee72fef6b827b7b515..079535e53e2a6435b3b4bd0fc3fa5c932cc2cd09 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1373,7 +1373,7 @@ static void __init acpi_reduced_hw_init(void) * If your system is blacklisted here, but you find that acpi=force * works for you, please contact linux-acpi@vger.kernel.org */ -static struct dmi_system_id __initdata acpi_dmi_table[] = { +static const struct dmi_system_id acpi_dmi_table[] __initconst = { /* * Boxes that need ACPI disabled */ @@ -1448,7 +1448,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { }; /* second table for DMI checks that should run after early-quirks */ -static struct dmi_system_id __initdata acpi_dmi_table_late[] = { +static const struct dmi_system_id acpi_dmi_table_late[] __initconst = { /* * HP laptops which use a DSDT reporting as HP/SB400/10000, * which includes some code which overrides all temperature diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 446b0d3d4932fbe30f6d067cfd09bf65a5ac6709..e4b0d92b3ae067cec7f68acc76d7ac4615fe43b1 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -2043,7 +2043,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata apm_dmi_table[] = { +static const struct dmi_system_id apm_dmi_table[] __initconst = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9862e2cd6d93da331052f664f49fd4ce1edb8d6b..d58184b7cd4438144e2d0ac3f4744d19ff4ffb31 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -763,6 +763,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c) } } +static void init_amd_zn(struct cpuinfo_x86 *c) +{ + /* + * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects + * all up to and including B1. + */ + if (c->x86_model <= 1 && c->x86_mask <= 1) + set_cpu_cap(c, X86_FEATURE_CPB); +} + static void init_amd(struct cpuinfo_x86 *c) { early_init_amd(c); @@ -791,6 +801,7 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x10: init_amd_gh(c); break; case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; + case 0x17: init_amd_zn(c); break; } /* Enable workaround for FXSAVE leak */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index db684880d74ae47fbff37888ff31dd13b9a4653b..0af86d9242da0f6882f1f5252dfa659038c627ac 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -21,14 +21,6 @@ void __init check_bugs(void) { -#ifdef CONFIG_X86_32 - /* - * Regardless of whether PCID is enumerated, the SDM says - * that it can't be enabled in 32-bit mode. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); -#endif - identify_boot_cpu(); if (!IS_ENABLED(CONFIG_SMP)) { diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 775f10100d7febac27a84bb9c8deab24e119a8c8..c9176bae7fd8cdb0e85f6d60766a809b9b36aa42 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); fpu__init_system(c); + +#ifdef CONFIG_X86_32 + /* + * Regardless of whether PCID is enumerated, the SDM says + * that it can't be enabled in 32-bit mode. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); +#endif } void __init early_cpu_init(void) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e1114f070c2dfdedf9911cd587afb7cca1769785..f92a6593de1ec651d244d43d16acbd8d49959e4b 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -100,7 +100,7 @@ void __kernel_fpu_begin(void) kernel_fpu_disable(); - if (fpu->fpregs_active) { + if (fpu->initialized) { /* * Ignore return value -- we don't care if reg state * is clobbered. @@ -116,7 +116,7 @@ void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; - if (fpu->fpregs_active) + if (fpu->initialized) copy_kernel_to_fpregs(&fpu->state); kernel_fpu_enable(); @@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu) preempt_disable(); trace_x86_fpu_before_save(fpu); - if (fpu->fpregs_active) { + if (fpu->initialized) { if (!copy_fpregs_to_fpstate(fpu)) { copy_kernel_to_fpregs(&fpu->state); } @@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init); int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) { - dst_fpu->fpregs_active = 0; dst_fpu->last_cpu = -1; - if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) + if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) return 0; WARN_ON_FPU(src_fpu != ¤t->thread.fpu); @@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) /* * Save current FPU registers directly into the child * FPU context, without any memory-to-memory copying. - * In lazy mode, if the FPU context isn't loaded into - * fpregs, CR0.TS will be set and do_device_not_available - * will load the FPU context. * - * We have to do all this with preemption disabled, - * mostly because of the FNSAVE case, because in that - * case we must not allow preemption in the window - * between the FNSAVE and us marking the context lazy. - * - * It shouldn't be an issue as even FNSAVE is plenty - * fast in terms of critical section length. + * ( The function 'fails' in the FNSAVE case, which destroys + * register contents so we have to copy them back. ) */ - preempt_disable(); if (!copy_fpregs_to_fpstate(dst_fpu)) { - memcpy(&src_fpu->state, &dst_fpu->state, - fpu_kernel_xstate_size); - + memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size); copy_kernel_to_fpregs(&src_fpu->state); } - preempt_enable(); trace_x86_fpu_copy_src(src_fpu); trace_x86_fpu_copy_dst(dst_fpu); @@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) * Activate the current task's in-memory FPU context, * if it has not been used before: */ -void fpu__activate_curr(struct fpu *fpu) +void fpu__initialize(struct fpu *fpu) { WARN_ON_FPU(fpu != ¤t->thread.fpu); - if (!fpu->fpstate_active) { + if (!fpu->initialized) { fpstate_init(&fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); /* Safe to do for the current task: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } -EXPORT_SYMBOL_GPL(fpu__activate_curr); +EXPORT_SYMBOL_GPL(fpu__initialize); /* * This function must be called before we read a task's fpstate. * - * If the task has not used the FPU before then initialize its - * fpstate. + * There's two cases where this gets called: + * + * - for the current task (when coredumping), in which case we have + * to save the latest FPU registers into the fpstate, + * + * - or it's called for stopped tasks (ptrace), in which case the + * registers were already saved by the context-switch code when + * the task scheduled out - we only have to initialize the registers + * if they've never been initialized. * * If the task has used the FPU before then save it. */ -void fpu__activate_fpstate_read(struct fpu *fpu) +void fpu__prepare_read(struct fpu *fpu) { - /* - * If fpregs are active (in the current CPU), then - * copy them to the fpstate: - */ - if (fpu->fpregs_active) { + if (fpu == ¤t->thread.fpu) { fpu__save(fpu); } else { - if (!fpu->fpstate_active) { + if (!fpu->initialized) { fpstate_init(&fpu->state); trace_x86_fpu_init_state(fpu); trace_x86_fpu_activate_state(fpu); /* Safe to do for current and for stopped child tasks: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } } @@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu) /* * This function must be called before we write a task's fpstate. * - * If the task has used the FPU before then unlazy it. + * If the task has used the FPU before then invalidate any cached FPU registers. * If the task has not used the FPU before then initialize its fpstate. * * After this function call, after registers in the fpstate are * modified and the child task has woken up, the child task will * restore the modified FPU state from the modified context. If we - * didn't clear its lazy status here then the lazy in-registers + * didn't clear its cached status here then the cached in-registers * state pending on its former CPU could be restored, corrupting * the modifications. */ -void fpu__activate_fpstate_write(struct fpu *fpu) +void fpu__prepare_write(struct fpu *fpu) { /* * Only stopped child tasks can be used to modify the FPU @@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu) */ WARN_ON_FPU(fpu == ¤t->thread.fpu); - if (fpu->fpstate_active) { - /* Invalidate any lazy state: */ + if (fpu->initialized) { + /* Invalidate any cached state: */ __fpu_invalidate_fpregs_state(fpu); } else { fpstate_init(&fpu->state); @@ -310,73 +300,10 @@ void fpu__activate_fpstate_write(struct fpu *fpu) trace_x86_fpu_activate_state(fpu); /* Safe to do for stopped child tasks: */ - fpu->fpstate_active = 1; + fpu->initialized = 1; } } -/* - * This function must be called before we write the current - * task's fpstate. - * - * This call gets the current FPU register state and moves - * it in to the 'fpstate'. Preemption is disabled so that - * no writes to the 'fpstate' can occur from context - * swiches. - * - * Must be followed by a fpu__current_fpstate_write_end(). - */ -void fpu__current_fpstate_write_begin(void) -{ - struct fpu *fpu = ¤t->thread.fpu; - - /* - * Ensure that the context-switching code does not write - * over the fpstate while we are doing our update. - */ - preempt_disable(); - - /* - * Move the fpregs in to the fpu's 'fpstate'. - */ - fpu__activate_fpstate_read(fpu); - - /* - * The caller is about to write to 'fpu'. Ensure that no - * CPU thinks that its fpregs match the fpstate. This - * ensures we will not be lazy and skip a XRSTOR in the - * future. - */ - __fpu_invalidate_fpregs_state(fpu); -} - -/* - * This function must be paired with fpu__current_fpstate_write_begin() - * - * This will ensure that the modified fpstate gets placed back in - * the fpregs if necessary. - * - * Note: This function may be called whether or not an _actual_ - * write to the fpstate occurred. - */ -void fpu__current_fpstate_write_end(void) -{ - struct fpu *fpu = ¤t->thread.fpu; - - /* - * 'fpu' now has an updated copy of the state, but the - * registers may still be out of date. Update them with - * an XRSTOR if they are active. - */ - if (fpregs_active()) - copy_kernel_to_fpregs(&fpu->state); - - /* - * Our update is done and the fpregs/fpstate are in sync - * if necessary. Context switches can happen again. - */ - preempt_enable(); -} - /* * 'fpu__restore()' is called to copy FPU registers from * the FPU fpstate to the live hw registers and to activate @@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void) */ void fpu__restore(struct fpu *fpu) { - fpu__activate_curr(fpu); + fpu__initialize(fpu); /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); @@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu) { preempt_disable(); - if (fpu->fpregs_active) { - /* Ignore delayed exceptions from user space */ - asm volatile("1: fwait\n" - "2:\n" - _ASM_EXTABLE(1b, 2b)); - fpregs_deactivate(fpu); + if (fpu == ¤t->thread.fpu) { + if (fpu->initialized) { + /* Ignore delayed exceptions from user space */ + asm volatile("1: fwait\n" + "2:\n" + _ASM_EXTABLE(1b, 2b)); + fpregs_deactivate(fpu); + } } - fpu->fpstate_active = 0; + fpu->initialized = 0; trace_x86_fpu_dropped(fpu); @@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu) * Make sure fpstate is cleared and initialized. */ if (static_cpu_has(X86_FEATURE_FPU)) { - fpu__activate_curr(fpu); + preempt_disable(); + fpu__initialize(fpu); user_fpu_begin(); copy_init_fpstate_to_fpregs(); + preempt_enable(); } } diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index d5d44c452624c88e3abb5c75e68d00b55fee6019..7affb7e3d9a5b94326b51528119787f4f956640b 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void) WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; - WARN_ON_FPU(current->thread.fpu.fpstate_active); + WARN_ON_FPU(current->thread.fpu.initialized); } /* diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index b188b16841e376574c5f39e55b7b687ffdd09495..3ea15137238964cc6f972b2d11c83cb619de55bd 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r { struct fpu *target_fpu = &target->thread.fpu; - return target_fpu->fpstate_active ? regset->n : 0; + return target_fpu->initialized ? regset->n : 0; } int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) { struct fpu *target_fpu = &target->thread.fpu; - if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) + if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized) return regset->n; else return 0; @@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return -ENODEV; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); fpstate_sanitize_xstate(fpu); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, @@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, if (!boot_cpu_has(X86_FEATURE_FXSR)) return -ENODEV; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); fpstate_sanitize_xstate(fpu); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, @@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, xsave = &fpu->state.xsave; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); if (using_compacted_format()) { - ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave); + if (kbuf) + ret = copy_xstate_to_kernel(kbuf, xsave, pos, count); + else + ret = copy_xstate_to_user(ubuf, xsave, pos, count); } else { fpstate_sanitize_xstate(fpu); /* @@ -129,28 +132,29 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, xsave = &fpu->state.xsave; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - ret = copyin_to_xsaves(kbuf, ubuf, xsave); - else + if (using_compacted_format()) { + if (kbuf) + ret = copy_kernel_to_xstate(xsave, kbuf); + else + ret = copy_user_to_xstate(xsave, ubuf); + } else { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); - - /* - * In case of failure, mark all states as init: - */ - if (ret) - fpstate_init(&fpu->state); + if (!ret) + ret = validate_xstate_header(&xsave->header); + } /* * mxcsr reserved bits must be masked to zero for security reasons. */ xsave->i387.mxcsr &= mxcsr_feature_mask; - xsave->header.xfeatures &= xfeatures_mask; + /* - * These bits must be zero. + * In case of failure, mark all states as init: */ - memset(&xsave->header.reserved, 0, 48); + if (ret) + fpstate_init(&fpu->state); return ret; } @@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; - fpu__activate_fpstate_read(fpu); + fpu__prepare_read(fpu); if (!boot_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); @@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, struct user_i387_ia32_struct env; int ret; - fpu__activate_fpstate_write(fpu); + fpu__prepare_write(fpu); fpstate_sanitize_xstate(fpu); if (!boot_cpu_has(X86_FEATURE_FPU)) @@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) struct fpu *fpu = &tsk->thread.fpu; int fpvalid; - fpvalid = fpu->fpstate_active; + fpvalid = fpu->initialized; if (fpvalid) fpvalid = !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct), diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 83c23c230b4c4fc78664ba08663a04c684c2105b..fb639e70048f58dfaef5576ab73e6998fa8ebbd1 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) */ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) { - struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; + struct fpu *fpu = ¤t->thread.fpu; + struct xregs_state *xsave = &fpu->state.xsave; struct task_struct *tsk = current; int ia32_fxstate = (buf != buf_fx); @@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) sizeof(struct user_i387_ia32_struct), NULL, (struct _fpstate_32 __user *) buf) ? -1 : 1; - if (fpregs_active() || using_compacted_format()) { + if (fpu->initialized || using_compacted_format()) { /* Save the live register state to the user directly. */ if (copy_fpregs_to_sigframe(buf_fx)) return -1; /* Update the thread's fxstate to save the fsave header. */ if (ia32_fxstate) - copy_fxregs_to_kernel(&tsk->thread.fpu); + copy_fxregs_to_kernel(fpu); } else { /* * It is a *bug* if kernel uses compacted-format for xsave @@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) return -1; } - fpstate_sanitize_xstate(&tsk->thread.fpu); + fpstate_sanitize_xstate(fpu); if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) return -1; } @@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk, struct xstate_header *header = &xsave->header; if (use_xsave()) { - /* These bits must be zero. */ - memset(header->reserved, 0, 48); + /* + * Note: we don't need to zero the reserved bits in the + * xstate_header here because we either didn't copy them at all, + * or we checked earlier that they aren't set. + */ /* * Init the state that is not present in the memory @@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk, if (fx_only) header->xfeatures = XFEATURE_MASK_FPSSE; else - header->xfeatures &= (xfeatures_mask & xfeatures); + header->xfeatures &= xfeatures; } if (use_fxsr()) { @@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) if (!access_ok(VERIFY_READ, buf, size)) return -EACCES; - fpu__activate_curr(fpu); + fpu__initialize(fpu); if (!static_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_set(current, NULL, @@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) /* * For 32-bit frames with fxstate, copy the user state to the * thread's fpu state, reconstruct fxstate from the fsave - * header. Sanitize the copied state etc. + * header. Validate and sanitize the copied state. */ struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; /* - * Drop the current fpu which clears fpu->fpstate_active. This ensures + * Drop the current fpu which clears fpu->initialized. This ensures * that any context-switch during the copy of the new state, * avoids the intermediate state from getting restored/saved. * Thus avoiding the new restored state from getting corrupted. * We will be ready to restore/save the state only after - * fpu->fpstate_active is again set. + * fpu->initialized is again set. */ fpu__drop(fpu); if (using_compacted_format()) { - err = copyin_to_xsaves(NULL, buf_fx, - &fpu->state.xsave); + err = copy_user_to_xstate(&fpu->state.xsave, buf_fx); } else { - err = __copy_from_user(&fpu->state.xsave, - buf_fx, state_size); + err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size); + + if (!err && state_size > offsetof(struct xregs_state, header)) + err = validate_xstate_header(&fpu->state.xsave.header); } if (err || __copy_from_user(&env, buf, sizeof(env))) { @@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); } - fpu->fpstate_active = 1; + fpu->initialized = 1; preempt_disable(); fpu__restore(fpu); preempt_enable(); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c24ac1efb12d7a1574450a1359699309148afcd6..f1d5476c902209eebeae83d3cd30fe5b6226921c 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -483,6 +483,30 @@ int using_compacted_format(void) return boot_cpu_has(X86_FEATURE_XSAVES); } +/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ +int validate_xstate_header(const struct xstate_header *hdr) +{ + /* No unknown or supervisor features may be set */ + if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR)) + return -EINVAL; + + /* Userspace must use the uncompacted format */ + if (hdr->xcomp_bv) + return -EINVAL; + + /* + * If 'reserved' is shrunken to add a new field, make sure to validate + * that new field here! + */ + BUILD_BUG_ON(sizeof(hdr->reserved) != 48); + + /* No reserved bits may be set */ + if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) + return -EINVAL; + + return 0; +} + static void __xstate_dump_leaves(void) { int i; @@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state) { struct fpu *fpu = ¤t->thread.fpu; - if (!fpu->fpstate_active) + if (!fpu->initialized) return NULL; /* * fpu__save() takes the CPU's xstate registers @@ -920,39 +944,130 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, } #endif /* ! CONFIG_ARCH_HAS_PKEYS */ +/* + * Weird legacy quirk: SSE and YMM states store information in the + * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP + * area is marked as unused in the xfeatures header, we need to copy + * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use. + */ +static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) +{ + if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM))) + return false; + + if (xfeatures & XFEATURE_MASK_FP) + return false; + + return true; +} + /* * This is similar to user_regset_copyout(), but will not add offset to * the source data pointer or increment pos, count, kbuf, and ubuf. */ -static inline int xstate_copyout(unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf, - const void *data, const int start_pos, - const int end_pos) +static inline void +__copy_xstate_to_kernel(void *kbuf, const void *data, + unsigned int offset, unsigned int size, unsigned int size_total) { - if ((count == 0) || (pos < start_pos)) - return 0; + if (offset < size_total) { + unsigned int copy = min(size, size_total - offset); - if (end_pos < 0 || pos < end_pos) { - unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); + memcpy(kbuf + offset, data, copy); + } +} - if (kbuf) { - memcpy(kbuf + pos, data, copy); - } else { - if (__copy_to_user(ubuf + pos, data, copy)) - return -EFAULT; +/* + * Convert from kernel XSAVES compacted format to standard format and copy + * to a kernel-space ptrace buffer. + * + * It supports partial copy but pos always starts from zero. This is called + * from xstateregs_get() and there we check the CPU has XSAVES. + */ +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) +{ + unsigned int offset, size; + struct xstate_header header; + int i; + + /* + * Currently copy_regset_to_user() starts from pos 0: + */ + if (unlikely(offset_start != 0)) + return -EFAULT; + + /* + * The destination is a ptrace buffer; we put in only user xstates: + */ + memset(&header, 0, sizeof(header)); + header.xfeatures = xsave->header.xfeatures; + header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; + + /* + * Copy xregs_state->header: + */ + offset = offsetof(struct xregs_state, header); + size = sizeof(header); + + __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); + + for (i = 0; i < XFEATURE_MAX; i++) { + /* + * Copy only in-use xstates: + */ + if ((header.xfeatures >> i) & 1) { + void *src = __raw_xsave_addr(xsave, 1 << i); + + offset = xstate_offsets[i]; + size = xstate_sizes[i]; + + /* The next component has to fit fully into the output buffer: */ + if (offset + size > size_total) + break; + + __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); } + + } + + if (xfeatures_mxcsr_quirk(header.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); + } + + /* + * Fill xsave->i387.sw_reserved value for ptrace frame: + */ + offset = offsetof(struct fxregs_state, sw_reserved); + size = sizeof(xstate_fx_sw_bytes); + + __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); + + return 0; +} + +static inline int +__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total) +{ + if (!size) + return 0; + + if (offset < size_total) { + unsigned int copy = min(size, size_total - offset); + + if (__copy_to_user(ubuf + offset, data, copy)) + return -EFAULT; } return 0; } /* * Convert from kernel XSAVES compacted format to standard format and copy - * to a ptrace buffer. It supports partial copy but pos always starts from + * to a user-space buffer. It supports partial copy but pos always starts from * zero. This is called from xstateregs_get() and there we check the CPU * has XSAVES. */ -int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, - void __user *ubuf, struct xregs_state *xsave) +int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) { unsigned int offset, size; int ret, i; @@ -961,7 +1076,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, /* * Currently copy_regset_to_user() starts from pos 0: */ - if (unlikely(pos != 0)) + if (unlikely(offset_start != 0)) return -EFAULT; /* @@ -977,8 +1092,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, offset = offsetof(struct xregs_state, header); size = sizeof(header); - ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count); - + ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total); if (ret) return ret; @@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, offset = xstate_offsets[i]; size = xstate_sizes[i]; - ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count); + /* The next component has to fit fully into the output buffer: */ + if (offset + size > size_total) + break; + ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total); if (ret) return ret; - - if (offset + size >= count) - break; } } + if (xfeatures_mxcsr_quirk(header.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total); + } + /* * Fill xsave->i387.sw_reserved value for ptrace frame: */ offset = offsetof(struct fxregs_state, sw_reserved); size = sizeof(xstate_fx_sw_bytes); - ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); - + ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total); if (ret) return ret; @@ -1018,55 +1137,98 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, } /* - * Convert from a ptrace standard-format buffer to kernel XSAVES format - * and copy to the target thread. This is called from xstateregs_set() and - * there we check the CPU has XSAVES and a whole standard-sized buffer - * exists. + * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format + * and copy to the target thread. This is called from xstateregs_set(). */ -int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, - struct xregs_state *xsave) +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) { unsigned int offset, size; int i; - u64 xfeatures; - u64 allowed_features; + struct xstate_header hdr; offset = offsetof(struct xregs_state, header); - size = sizeof(xfeatures); + size = sizeof(hdr); - if (kbuf) { - memcpy(&xfeatures, kbuf + offset, size); - } else { - if (__copy_from_user(&xfeatures, ubuf + offset, size)) - return -EFAULT; + memcpy(&hdr, kbuf + offset, size); + + if (validate_xstate_header(&hdr)) + return -EINVAL; + + for (i = 0; i < XFEATURE_MAX; i++) { + u64 mask = ((u64)1 << i); + + if (hdr.xfeatures & mask) { + void *dst = __raw_xsave_addr(xsave, 1 << i); + + offset = xstate_offsets[i]; + size = xstate_sizes[i]; + + memcpy(dst, kbuf + offset, size); + } + } + + if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + memcpy(&xsave->i387.mxcsr, kbuf + offset, size); } /* - * Reject if the user sets any disabled or supervisor features: + * The state that came in from userspace was user-state only. + * Mask all the user states out of 'xfeatures': + */ + xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR; + + /* + * Add back in the features that came in from userspace: */ - allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR; + xsave->header.xfeatures |= hdr.xfeatures; - if (xfeatures & ~allowed_features) + return 0; +} + +/* + * Convert from a ptrace or sigreturn standard-format user-space buffer to + * kernel XSAVES format and copy to the target thread. This is called from + * xstateregs_set(), as well as potentially from the sigreturn() and + * rt_sigreturn() system calls. + */ +int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf) +{ + unsigned int offset, size; + int i; + struct xstate_header hdr; + + offset = offsetof(struct xregs_state, header); + size = sizeof(hdr); + + if (__copy_from_user(&hdr, ubuf + offset, size)) + return -EFAULT; + + if (validate_xstate_header(&hdr)) return -EINVAL; for (i = 0; i < XFEATURE_MAX; i++) { u64 mask = ((u64)1 << i); - if (xfeatures & mask) { + if (hdr.xfeatures & mask) { void *dst = __raw_xsave_addr(xsave, 1 << i); offset = xstate_offsets[i]; size = xstate_sizes[i]; - if (kbuf) { - memcpy(dst, kbuf + offset, size); - } else { - if (__copy_from_user(dst, ubuf + offset, size)) - return -EFAULT; - } + if (__copy_from_user(dst, ubuf + offset, size)) + return -EFAULT; } } + if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { + offset = offsetof(struct fxregs_state, mxcsr); + size = MXCSR_AND_FLAGS_SIZE; + if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size)) + return -EFAULT; + } + /* * The state that came in from userspace was user-state only. * Mask all the user states out of 'xfeatures': @@ -1076,7 +1238,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, /* * Add back in the features that came in from userspace: */ - xsave->header.xfeatures |= xfeatures; + xsave->header.xfeatures |= hdr.xfeatures; return 0; } diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c index 50c89e8a95f2f49ecb1d9fd3321d7c86a2deb7eb..7ebcc4a744389d192df8d9c2078922a2ffbca74a 100644 --- a/arch/x86/kernel/io_delay.c +++ b/arch/x86/kernel/io_delay.c @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id) * Quirk table for systems that misbehave (lock up, etc.) if port * 0x80 is used: */ -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = { +static const struct dmi_system_id io_delay_0xed_port_dmi_table[] __initconst = { { .callback = dmi_io_delay_0xed_port, .ident = "Compaq Presario V6000", diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1f38d9a4d9deaf707af2b7e658bd9e9022ba8d75..d4eb450144fdb42c4f667b9f5b1a69d91aa78a2e 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack) static inline void *current_stack(void) { - return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); + return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); } static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) @@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) /* Save the next esp at the bottom of the stack */ prev_esp = (u32 *)irqstk; - *prev_esp = current_stack_pointer(); + *prev_esp = current_stack_pointer; if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); @@ -139,7 +139,7 @@ void do_softirq_own_stack(void) /* Push the previous esp onto the stack */ prev_esp = (u32 *)irqstk; - *prev_esp = current_stack_pointer(); + *prev_esp = current_stack_pointer; call_on_stack(__do_softirq, isp); } diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 4b0592ca9e47b332d0ce67f8bcf5f555653587b2..8c1cc08f514f4362bdaefa933fda3cd3769b04f9 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent) return 0; out_clean_nodes: - for (j = i - 1; j > 0; j--) + for (j = i - 1; j >= 0; j--) cleanup_setup_data_node(*(kobjp + j)); kfree(kobjp); out_setup_data_kobj: diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 874827b0d7ca2e3a0f79d40ee7c2b9e84e9bb583..e675704fa6f7d89ed919631284156717e342979f 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -140,7 +140,8 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.halted = is_idle_task(current) || preempt_count() > 1; + n.halted = is_idle_task(current) || preempt_count() > 1 || + rcu_preempt_depth(); init_swait_queue_head(&n.wq); hlist_add_head(&n.link, &b->list); raw_spin_unlock(&b->lock); @@ -180,7 +181,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n) hlist_del_init(&n->link); if (n->halted) smp_send_reschedule(n->cpu); - else if (swait_active(&n->wq)) + else if (swq_has_sleeper(&n->wq)) swake_up(&n->wq); } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 54984b1426413879a82489c6f91b20501419e807..54180fa6f66fa8fe04c7528f054d642db522b617 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -150,7 +150,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d) /* * This is a single dmi_table handling all reboot quirks. */ -static struct dmi_system_id __initdata reboot_dmi_table[] = { +static const struct dmi_system_id reboot_dmi_table[] __initconst = { /* Acer */ { /* Handle reboot issue on Acer Aspire one */ diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e04442345fc0977cf73f2573f77b3df71310f0a8..4e188fda59612ed70b98342e8580cd1f311ed141 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, sp = (unsigned long) ka->sa.sa_restorer; } - if (fpu->fpstate_active) { + if (fpu->initialized) { sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), &buf_fx, &math_size); *fpstate = (void __user *)sp; @@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, return (void __user *)-1L; /* save i387 and extended state */ - if (fpu->fpstate_active && + if (fpu->initialized && copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) return (void __user *)-1L; @@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) /* * Ensure the signal handler starts with the new fpu state. */ - if (fpu->fpstate_active) + if (fpu->initialized) fpu__clear(fpu); } signal_setup_done(failed, ksig, stepping); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0854ff1692745adf4831e2deea0cb203977847d9..ad59edd84de70cfb978b8c0bc2ac38b892418b71 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused) */ if (boot_cpu_has(X86_FEATURE_PCID)) __write_cr4(__read_cr4() | X86_CR4_PCIDE); - cpu_init(); - x86_cpuinit.early_percpu_clock_init(); - preempt_disable(); - smp_callin(); - - enable_start_cpu0 = 0; #ifdef CONFIG_X86_32 /* switch away from the initial page table */ @@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused) __flush_tlb_all(); #endif + cpu_init(); + x86_cpuinit.early_percpu_clock_init(); + preempt_disable(); + smp_callin(); + + enable_start_cpu0 = 0; + /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 34ea3651362ef8383fb456f24125ec4881f42668..67db4f43309ecadc86f4d7e95c6a0db0650a0d18 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) * from double_fault. */ BUG_ON((unsigned long)(current_top_of_stack() - - current_stack_pointer()) >= THREAD_SIZE); + current_stack_pointer) >= THREAD_SIZE); preempt_enable_no_resched(); } diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 1ea3c0e1e3a9a3df6d6061e1e367abf632c50b79..0bc5c1315708e6aad8b409d377be41e2291efb40 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -59,7 +59,6 @@ static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) { unsigned x86_leaf = x86_feature / 32; - BUILD_BUG_ON(!__builtin_constant_p(x86_leaf)); BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 16bf6655aa858e2815135ada11a462329a0df386..a36254cbf7765aabf3e77c45e7cc82c3e3ff0fbb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4102,10 +4102,12 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) { u64 maxphyaddr; - u32 eax = 0x80000008; + u32 eax, ebx, ecx, edx; - if (ctxt->ops->get_cpuid(ctxt, &eax, NULL, NULL, - NULL, false)) + eax = 0x80000008; + ecx = 0; + if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, + &edx, false)) maxphyaddr = eax & 0xff; else maxphyaddr = 36; @@ -5296,7 +5298,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { - register void *__sp asm(_ASM_SP); ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) @@ -5304,7 +5305,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), - [fastop]"+S"(fop), "+r"(__sp) + [fastop]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index aaf10b6f5380d6d41531b25365b433eac9d42c48..69c5612be786fbc7909b6f81a8d0a29f2cd324a6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1324,6 +1324,10 @@ static void apic_timer_expired(struct kvm_lapic *apic) atomic_inc(&apic->lapic_timer.pending); kvm_set_pending_timer(vcpu); + /* + * For x86, the atomic_inc() is serialized, thus + * using swait_active() is safe. + */ if (swait_active(q)) swake_up(q); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2c1cfe68a9af1e11761f1938deea2d6d1fe51ab5..0e68f0b3cbf72064f36bfedcb997305fde124911 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1200,7 +1200,6 @@ static void avic_init_vmcb(struct vcpu_svm *svm) vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK; vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT; vmcb->control.int_ctl |= AVIC_ENABLE_MASK; - svm->vcpu.arch.apicv_active = true; } static void init_vmcb(struct vcpu_svm *svm) @@ -1316,7 +1315,7 @@ static void init_vmcb(struct vcpu_svm *svm) set_intercept(svm, INTERCEPT_PAUSE); } - if (avic) + if (kvm_vcpu_apicv_active(&svm->vcpu)) avic_init_vmcb(svm); /* @@ -1600,6 +1599,23 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); } +static int avic_init_vcpu(struct vcpu_svm *svm) +{ + int ret; + + if (!kvm_vcpu_apicv_active(&svm->vcpu)) + return 0; + + ret = avic_init_backing_page(&svm->vcpu); + if (ret) + return ret; + + INIT_LIST_HEAD(&svm->ir_list); + spin_lock_init(&svm->ir_list_lock); + + return ret; +} + static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; @@ -1636,14 +1652,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) if (!hsave_page) goto free_page3; - if (avic) { - err = avic_init_backing_page(&svm->vcpu); - if (err) - goto free_page4; - - INIT_LIST_HEAD(&svm->ir_list); - spin_lock_init(&svm->ir_list_lock); - } + err = avic_init_vcpu(svm); + if (err) + goto free_page4; /* We initialize this flag to true to make sure that the is_running * bit would be set the first time the vcpu is loaded. @@ -4395,9 +4406,9 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) return; } -static bool svm_get_enable_apicv(void) +static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu) { - return avic; + return avic && irqchip_split(vcpu->kvm); } static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) @@ -4414,7 +4425,7 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; - if (!avic) + if (!kvm_vcpu_apicv_active(&svm->vcpu)) return; vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK; @@ -5302,6 +5313,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, */ if (info->rep_prefix != REPE_PREFIX) goto out; + break; case SVM_EXIT_IOIO: { u64 exit_info; u32 bytes; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 699704d4bc9e7716e04da7298ec7f4491b436af1..a2b804e10c95d71ac19aebc83c6f89316cea658d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -200,6 +200,8 @@ struct loaded_vmcs { int cpu; bool launched; bool nmi_known_unmasked; + unsigned long vmcs_host_cr3; /* May not match real cr3 */ + unsigned long vmcs_host_cr4; /* May not match real cr4 */ struct list_head loaded_vmcss_on_cpu_link; }; @@ -600,8 +602,6 @@ struct vcpu_vmx { int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; - unsigned long vmcs_host_cr3; /* May not match real cr3 */ - unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; @@ -2202,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) struct pi_desc old, new; unsigned int dest; - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + /* + * In case of hot-plug or hot-unplug, we may have to undo + * vmx_vcpu_pi_put even if there is no assigned device. And we + * always keep PI.NDST up to date for simplicity: it makes the + * code easier, and CPU migration is not a fast path. + */ + if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; + /* + * First handle the simple case where no cmpxchg is necessary; just + * allow posting non-urgent interrupts. + * + * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change + * PI.NDST: pi_post_block will do it for us and the wakeup_handler + * expects the VCPU to be on the blocked_vcpu_list that matches + * PI.NDST. + */ + if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || + vcpu->cpu == cpu) { + pi_clear_sn(pi_desc); + return; + } + + /* The full case. */ do { old.control = new.control = pi_desc->control; - /* - * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there - * are two possible cases: - * 1. After running 'pre_block', context switch - * happened. For this case, 'sn' was set in - * vmx_vcpu_put(), so we need to clear it here. - * 2. After running 'pre_block', we were blocked, - * and woken up by some other guy. For this case, - * we don't need to do anything, 'pi_post_block' - * will do everything for us. However, we cannot - * check whether it is case #1 or case #2 here - * (maybe, not needed), so we also clear sn here, - * I think it is not a big deal. - */ - if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { - if (vcpu->cpu != cpu) { - dest = cpu_physical_id(cpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - } + dest = cpu_physical_id(cpu); - /* set 'NV' to 'notification vector' */ - new.nv = POSTED_INTR_VECTOR; - } + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; - /* Allow posting non-urgent interrupts */ new.sn = 0; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); } static void decache_tsc_multiplier(struct vcpu_vmx *vmx) @@ -5012,7 +5010,7 @@ static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_activ } } -static bool vmx_get_enable_apicv(void) +static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; } @@ -5077,21 +5075,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; if (vcpu->mode == IN_GUEST_MODE) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - /* - * Currently, we don't support urgent interrupt, - * all interrupts are recognized as non-urgent - * interrupt, so we cannot post interrupts when - * 'SN' is set. + * The vector of interrupt to be delivered to vcpu had + * been set in PIR before this function. * - * If the vcpu is in guest mode, it means it is - * running instead of being scheduled out and - * waiting in the run queue, and that's the only - * case when 'SN' is set currently, warning if - * 'SN' is set. + * Following cases will be reached in this block, and + * we always send a notification event in all cases as + * explained below. + * + * Case 1: vcpu keeps in non-root mode. Sending a + * notification event posts the interrupt to vcpu. + * + * Case 2: vcpu exits to root mode and is still + * runnable. PIR will be synced to vIRR before the + * next vcpu entry. Sending a notification event in + * this case has no effect, as vcpu is not in root + * mode. + * + * Case 3: vcpu exits to root mode and is blocked. + * vcpu_block() has already synced PIR to vIRR and + * never blocks vcpu if vIRR is not cleared. Therefore, + * a blocked vcpu here does not wait for any requested + * interrupts in PIR, and sending a notification event + * which has no effect is safe here. */ - WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); return true; @@ -5169,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) */ cr3 = __read_cr3(); vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ - vmx->host_state.vmcs_host_cr3 = cr3; + vmx->loaded_vmcs->vmcs_host_cr3 = cr3; /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ - vmx->host_state.vmcs_host_cr4 = cr4; + vmx->loaded_vmcs->vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 @@ -8344,12 +8351,14 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, - vmcs_readl(EXIT_QUALIFICATION), - vmx->idt_vectoring_info, - intr_info, - vmcs_read32(VM_EXIT_INTR_ERROR_CODE), - KVM_ISA_VMX); + if (vmx->nested.nested_run_pending) + return false; + + if (unlikely(vmx->fail)) { + pr_info_ratelimited("%s failed vm entry %x\n", __func__, + vmcs_read32(VM_INSTRUCTION_ERROR)); + return true; + } /* * The host physical addresses of some pages of guest memory @@ -8363,14 +8372,12 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) */ nested_mark_vmcs12_pages_dirty(vcpu); - if (vmx->nested.nested_run_pending) - return false; - - if (unlikely(vmx->fail)) { - pr_info_ratelimited("%s failed vm entry %x\n", __func__, - vmcs_read32(VM_INSTRUCTION_ERROR)); - return true; - } + trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, + vmcs_readl(EXIT_QUALIFICATION), + vmx->idt_vectoring_info, + intr_info, + vmcs_read32(VM_EXIT_INTR_ERROR_CODE), + KVM_ISA_VMX); switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: @@ -9036,7 +9043,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - register void *__sp asm(_ASM_SP); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { @@ -9065,7 +9071,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif - "+r"(__sp) + ASM_CALL_CONSTRAINT : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), @@ -9265,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { + if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) { vmcs_writel(HOST_CR3, cr3); - vmx->host_state.vmcs_host_cr3 = cr3; + vmx->loaded_vmcs->vmcs_host_cr3 = cr3; } cr4 = cr4_read_shadow(); - if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { + if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); - vmx->host_state.vmcs_host_cr4 = cr4; + vmx->loaded_vmcs->vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the @@ -9424,12 +9430,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; - vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - - vmx->loaded_vmcs->launched = 1; - - vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); - /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current @@ -9451,6 +9451,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; + vmx->idt_vectoring_info = 0; + + vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); + if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + return; + + vmx->loaded_vmcs->launched = 1; + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); @@ -9581,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; + /* + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR + * or POSTED_INTR_WAKEUP_VECTOR. + */ + vmx->pi_desc.nv = POSTED_INTR_VECTOR; + vmx->pi_desc.sn = 1; + return &vmx->vcpu; free_vmcs: @@ -9829,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, WARN_ON(!is_guest_mode(vcpu)); - if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { + if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && + !to_vmx(vcpu)->nested.nested_run_pending) { vmcs12->vm_exit_intr_error_code = fault->error_code; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | @@ -10525,6 +10541,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + } else { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif } /* @@ -11388,46 +11409,30 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u32 vm_inst_error = 0; /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); + /* + * The only expected VM-instruction error is "VM entry with + * invalid control field(s)." Anything else indicates a + * problem with L0. + */ + WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD)); + leave_guest_mode(vcpu); - prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, - exit_qualification); - if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, - vmcs12->vm_exit_msr_store_count)) - nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); + if (likely(!vmx->fail)) { + prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, + exit_qualification); - if (unlikely(vmx->fail)) - vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR); + if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, + vmcs12->vm_exit_msr_store_count)) + nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); + } vmx_switch_vmcs(vcpu, &vmx->vmcs01); - - /* - * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of - * the VM-exit interrupt information (valid interrupt) is always set to - * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need - * kvm_cpu_has_interrupt(). See the commit message for details. - */ - if (nested_exit_intr_ack_set(vcpu) && - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - kvm_cpu_has_interrupt(vcpu)) { - int irq = kvm_cpu_get_interrupt(vcpu); - WARN_ON(irq < 0); - vmcs12->vm_exit_intr_info = irq | - INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; - } - - trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, - vmcs12->exit_qualification, - vmcs12->idt_vectoring_info_field, - vmcs12->vm_exit_intr_info, - vmcs12->vm_exit_intr_error_code, - KVM_ISA_VMX); - vm_entry_controls_reset_shadow(vmx); vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); @@ -11436,8 +11441,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); - load_vmcs12_host_state(vcpu, vmcs12); - /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); @@ -11486,21 +11489,57 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - /* - * Exiting from L2 to L1, we're now back to L1 which thinks it just - * finished a VMLAUNCH or VMRESUME instruction, so we need to set the - * success or failure flag accordingly. - */ - if (unlikely(vmx->fail)) { - vmx->fail = 0; - nested_vmx_failValid(vcpu, vm_inst_error); - } else - nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + + if (likely(!vmx->fail)) { + /* + * TODO: SDM says that with acknowledge interrupt on + * exit, bit 31 of the VM-exit interrupt information + * (valid interrupt) is always set to 1 on + * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't + * need kvm_cpu_has_interrupt(). See the commit + * message for details. + */ + if (nested_exit_intr_ack_set(vcpu) && + exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + kvm_cpu_has_interrupt(vcpu)) { + int irq = kvm_cpu_get_interrupt(vcpu); + WARN_ON(irq < 0); + vmcs12->vm_exit_intr_info = irq | + INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; + } + + trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, + vmcs12->exit_qualification, + vmcs12->idt_vectoring_info_field, + vmcs12->vm_exit_intr_info, + vmcs12->vm_exit_intr_error_code, + KVM_ISA_VMX); + + load_vmcs12_host_state(vcpu, vmcs12); + + return; + } + + /* + * After an early L2 VM-entry failure, we're now back + * in L1 which thinks it just finished a VMLAUNCH or + * VMRESUME instruction, so we need to set the failure + * flag and the VM-instruction error field of the VMCS + * accordingly. + */ + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + /* + * The emulated instruction was already skipped in + * nested_vmx_run, but the updated RIP was never + * written back to the vmcs01. + */ + skip_emulated_instruction(vcpu); + vmx->fail = 0; } /* @@ -11671,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); } +static void __pi_post_block(struct kvm_vcpu *vcpu) +{ + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + struct pi_desc old, new; + unsigned int dest; + + do { + old.control = new.control = pi_desc->control; + WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, + "Wakeup handler not enabled while the VCPU is blocked\n"); + + dest = cpu_physical_id(vcpu->cpu); + + if (x2apic_enabled()) + new.ndst = dest; + else + new.ndst = (dest << 8) & 0xFF00; + + /* set 'NV' to 'notification vector' */ + new.nv = POSTED_INTR_VECTOR; + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); + + if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_del(&vcpu->blocked_vcpu_list); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + vcpu->pre_pcpu = -1; + } +} + /* * This routine does the following things for vCPU which is going * to be blocked if VT-d PI is enabled. @@ -11686,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, */ static int pi_pre_block(struct kvm_vcpu *vcpu) { - unsigned long flags; unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -11696,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) !kvm_vcpu_apicv_active(vcpu)) return 0; - vcpu->pre_pcpu = vcpu->cpu; - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, - vcpu->pre_pcpu)); - spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); + WARN_ON(irqs_disabled()); + local_irq_disable(); + if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { + vcpu->pre_pcpu = vcpu->cpu; + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_add_tail(&vcpu->blocked_vcpu_list, + &per_cpu(blocked_vcpu_on_cpu, + vcpu->pre_pcpu)); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + } do { old.control = new.control = pi_desc->control; - /* - * We should not block the vCPU if - * an interrupt is posted for it. - */ - if (pi_test_on(pi_desc) == 1) { - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock_irqrestore( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - vcpu->pre_pcpu = -1; - - return 1; - } - WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " "is set before blocking\n"); @@ -11745,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); + } while (cmpxchg64(&pi_desc->control, old.control, + new.control) != old.control); - return 0; + /* We should not block the vCPU if an interrupt is posted for it. */ + if (pi_test_on(pi_desc) == 1) + __pi_post_block(vcpu); + + local_irq_enable(); + return (vcpu->pre_pcpu == -1); } static int vmx_pre_block(struct kvm_vcpu *vcpu) @@ -11764,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu) static void pi_post_block(struct kvm_vcpu *vcpu) { - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - struct pi_desc old, new; - unsigned int dest; - unsigned long flags; - - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (vcpu->pre_pcpu == -1) return; - do { - old.control = new.control = pi_desc->control; - - dest = cpu_physical_id(vcpu->cpu); - - if (x2apic_enabled()) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - - /* Allow posting non-urgent interrupts */ - new.sn = 0; - - /* set 'NV' to 'notification vector' */ - new.nv = POSTED_INTR_VECTOR; - } while (cmpxchg(&pi_desc->control, old.control, - new.control) != old.control); - - if(vcpu->pre_pcpu != -1) { - spin_lock_irqsave( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock_irqrestore( - &per_cpu(blocked_vcpu_on_cpu_lock, - vcpu->pre_pcpu), flags); - vcpu->pre_pcpu = -1; - } + WARN_ON(irqs_disabled()); + local_irq_disable(); + __pi_post_block(vcpu); + local_irq_enable(); } static void vmx_post_block(struct kvm_vcpu *vcpu) @@ -11829,7 +11858,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; - int idx, ret = -EINVAL; + int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || @@ -11838,7 +11867,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - BUG_ON(guest_irq >= irq_rt->nr_rt_entries); + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) @@ -11881,12 +11915,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); - else { - /* suppress notification event before unposting */ - pi_set_sn(vcpu_to_pi_desc(vcpu)); + else ret = irq_set_vcpu_affinity(host_irq, NULL); - pi_clear_sn(vcpu_to_pi_desc(vcpu)); - } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6069af86da3b95884861f1e3a9f7d209ad729b3c..03869eb7fcd67b64e54dcb67acd559a70fd61139 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7225,16 +7225,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int r; sigset_t sigsaved; - fpu__activate_curr(fpu); + fpu__initialize(fpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { + if (kvm_run->immediate_exit) { + r = -EINTR; + goto out; + } kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); r = -EAGAIN; + if (signal_pending(current)) { + r = -EINTR; + vcpu->run->exit_reason = KVM_EXIT_INTR; + ++vcpu->stat.signal_exits; + } goto out; } @@ -7971,7 +7980,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; - vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(); + vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) @@ -8452,6 +8461,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (vcpu->arch.pv.pv_unhalted) return true; + if (vcpu->arch.exception.pending) + return true; + if (kvm_test_request(KVM_REQ_NMI, vcpu) || (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu))) @@ -8619,6 +8631,13 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) sizeof(val)); } +static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val) +{ + + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val, + sizeof(u32)); +} + void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { @@ -8646,6 +8665,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; + u32 val; if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ @@ -8653,15 +8673,26 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_del_async_pf_gfn(vcpu, work->arch.gfn); trace_kvm_async_pf_ready(work->arch.token, work->gva); - if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && - !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { - fault.vector = PF_VECTOR; - fault.error_code_valid = true; - fault.error_code = 0; - fault.nested_page_fault = false; - fault.address = work->arch.token; - fault.async_page_fault = true; - kvm_inject_page_fault(vcpu, &fault); + if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && + !apf_get_user(vcpu, &val)) { + if (val == KVM_PV_REASON_PAGE_NOT_PRESENT && + vcpu->arch.exception.pending && + vcpu->arch.exception.nr == PF_VECTOR && + !apf_put_user(vcpu, 0)) { + vcpu->arch.exception.injected = false; + vcpu->arch.exception.pending = false; + vcpu->arch.exception.nr = 0; + vcpu->arch.exception.has_error_code = false; + vcpu->arch.exception.error_code = 0; + } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { + fault.vector = PF_VECTOR; + fault.error_code_valid = true; + fault.error_code = 0; + fault.nested_page_fault = false; + fault.address = work->arch.token; + fault.async_page_fault = true; + kvm_inject_page_fault(vcpu, &fault); + } } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index d4a7df2205b8de2576bc7cdbf8455f2fa9987b66..220638a4cb94ea3065e1aaca9caaad7d447c8560 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info) struct desc_struct code_descriptor; struct fpu *fpu = ¤t->thread.fpu; - fpu__activate_curr(fpu); + fpu__initialize(fpu); #ifdef RE_ENTRANT_CHECKING if (emulating) { diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c076f710de4cb4e283125f706226fc059fbebb5b..c3521e2be39610c3d932c34126fadd5203f28958 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_refcount); +/* + * Handler for when we fail to restore a task's FPU state. We should never get + * here because the FPU state of a task using the FPU (task->thread.fpu.state) + * should always be valid. However, past bugs have allowed userspace to set + * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn(). + * These caused XRSTOR to fail when switching to the task, leaking the FPU + * registers of the task previously executing on the CPU. Mitigate this class + * of vulnerability by restoring from the initial state (essentially, zeroing + * out all the FPU registers) if we can't restore from the task's FPU state. + */ +bool ex_handler_fprestore(const struct exception_table_entry *fixup, + struct pt_regs *regs, int trapnr) +{ + regs->ip = ex_fixup_addr(fixup); + + WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.", + (void *)instruction_pointer(regs)); + + __copy_kernel_to_fpregs(&init_fpstate, -1); + return true; +} +EXPORT_SYMBOL_GPL(ex_handler_fprestore); + bool ex_handler_ext(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr) { diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b836a7274e123af88edd7b4f261da88609778be9..e2baeaa053a5b9feb76a1587fb4756786ffd76db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ -static void fill_sig_info_pkey(int si_code, siginfo_t *info, - struct vm_area_struct *vma) +static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) { /* This is effectively an #ifdef */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) @@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, * valid VMA, so we should never reach this without a * valid VMA. */ - if (!vma) { + if (!pkey) { WARN_ONCE(1, "PKU fault with no VMA passed in"); info->si_pkey = 0; return; @@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, * absolutely guranteed to be 100% accurate because of * the race explained above. */ - info->si_pkey = vma_pkey(vma); + info->si_pkey = *pkey; } static void force_sig_info_fault(int si_signo, int si_code, unsigned long address, - struct task_struct *tsk, struct vm_area_struct *vma, - int fault) + struct task_struct *tsk, u32 *pkey, int fault) { unsigned lsb = 0; siginfo_t info; @@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; - fill_sig_info_pkey(si_code, &info, vma); + fill_sig_info_pkey(si_code, &info, pkey); force_sig_info(si_signo, &info, tsk); } @@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; unsigned long flags; int sig; - /* No context means no VMA to pass down */ - struct vm_area_struct *vma = NULL; /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs, X86_TRAP_PF)) { @@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, /* XXX: hwpoison faults will set the wrong code. */ force_sig_info_fault(signal, si_code, address, - tsk, vma, 0); + tsk, NULL, 0); } /* @@ -806,7 +802,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, if (is_vmalloc_addr((void *)address) && (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { - register void *__sp asm("rsp"); unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); /* * We're likely to be running with very little stack space @@ -821,7 +816,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, asm volatile ("movq %[stack], %%rsp\n\t" "call handle_stack_overflow\n\t" "1: jmp 1b" - : "+r" (__sp) + : ASM_CALL_CONSTRAINT : "D" ("kernel stack overflow (page fault)"), "S" (regs), "d" (address), [stack] "rm" (stack)); @@ -897,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma, - int si_code) + unsigned long address, u32 *pkey, int si_code) { struct task_struct *tsk = current; @@ -946,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; - force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); + force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0); return; } @@ -959,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static noinline void bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma) + unsigned long address, u32 *pkey) { - __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR); } static void @@ -969,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, struct vm_area_struct *vma, int si_code) { struct mm_struct *mm = current->mm; + u32 pkey; + + if (vma) + pkey = vma_pkey(vma); /* * Something tried to access memory that isn't in our memory map.. @@ -976,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, error_code, address, vma, si_code); + __bad_area_nosemaphore(regs, error_code, address, + (vma) ? &pkey : NULL, si_code); } static noinline void @@ -1019,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - struct vm_area_struct *vma, unsigned int fault) + u32 *pkey, unsigned int fault) { struct task_struct *tsk = current; int code = BUS_ADRERR; @@ -1046,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, code = BUS_MCEERR_AR; } #endif - force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); + force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault); } static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma, - unsigned int fault) + unsigned long address, u32 *pkey, unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { no_context(regs, error_code, address, 0, 0); @@ -1076,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, vma, fault); + do_sigbus(regs, error_code, address, pkey, fault); else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address, vma); + bad_area_nosemaphore(regs, error_code, address, pkey); else BUG(); } @@ -1268,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, struct mm_struct *mm; int fault, major = 0; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + u32 pkey; tsk = current; mm = tsk->mm; @@ -1468,9 +1467,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, return; } + pkey = vma_pkey(vma); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, error_code, address, vma, fault); + mm_fault_error(regs, error_code, address, &pkey, fault); return; } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 3fcc8e01683bef96b219d65dbdd0315db1f60605..16c5f37933a2ae2d120402f1871af93872aebb07 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#define DISABLE_BRANCH_PROFILING + #include #include #include diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 2dab69a706ec062affe14eb7d9f10fd8538a9018..d7bc0eea20a5ed2fc8ec43ebc06429517cbb362b 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -18,7 +18,6 @@ #include /* boot_cpu_has, ... */ #include /* vma_pkey() */ -#include /* fpregs_active() */ int __execute_only_pkey(struct mm_struct *mm) { @@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm) */ preempt_disable(); if (!need_to_set_mm_pkey && - fpregs_active() && + current->thread.fpu.initialized && !__pkru_allows_read(read_pkru(), execute_only_pkey)) { preempt_enable(); return execute_only_pkey; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1ab3821f9e2629df571544077d63be950361bc20..49d9778376d774c3d6f17eb47118176efb99b718 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * isn't free. */ #ifdef CONFIG_DEBUG_VM - if (WARN_ON_ONCE(__read_cr3() != - (__sme_pa(real_prev->pgd) | prev_asid))) { + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { /* * If we were to BUG here, we'd be very likely to kill * the system so hard that we don't see the call trace. @@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, */ this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, next_tlb_gen); - write_cr3(__sme_pa(next->pgd) | prev_asid); + write_cr3(build_cr3(next, prev_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } @@ -192,7 +191,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * mapped in the new pgd, we'll double-fault. Forcibly * map it. */ - unsigned int index = pgd_index(current_stack_pointer()); + unsigned int index = pgd_index(current_stack_pointer); pgd_t *pgd = next->pgd + index; if (unlikely(pgd_none(*pgd))) @@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(__sme_pa(next->pgd) | new_asid); + write_cr3(build_cr3(next, new_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); + write_cr3(build_cr3_noflush(next, new_asid)); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } @@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void) !(cr4_read_shadow() & X86_CR4_PCIDE)); /* Force ASID 0 and force a TLB flush. */ - write_cr3(cr3 & ~CR3_PCID_MASK); + write_cr3(build_cr3(mm, 0)); /* Reinitialize tlbstate. */ this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 9bd115484745703791c6515b289938546d2478ab..0f5f60b14f4895a7fc1550e788a540bc20a111d1 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1092,7 +1092,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pciirq_dmi_table[] = { +static const struct dmi_system_id pciirq_dmi_table[] __initconst = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 4d68d59f457d5aa0a2913cc1c31b4272efc50aa0..84fcfde53f8f3f5bb4b85efc20ab106c419dcc11 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -428,7 +428,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d) return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); } -static struct dmi_system_id msr_save_dmi_table[] = { +static const struct dmi_system_id msr_save_dmi_table[] = { { .callback = msr_initialize_bdw, .ident = "BROADWELL BDX_EP", diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c index 28775f55bde240fc29fcc2030748e2078c0747c5..3c423dfcd78bf0d7f96883266f15b50fd8ffa55c 100644 --- a/arch/x86/um/os-Linux/registers.c +++ b/arch/x86/um/os-Linux/registers.c @@ -5,6 +5,7 @@ */ #include +#include #include #ifdef __i386__ #include @@ -31,7 +32,7 @@ int save_fp_registers(int pid, unsigned long *fp_regs) if (have_xstate_support) { iov.iov_base = fp_regs; - iov.iov_len = sizeof(struct _xstate); + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0) return -errno; return 0; @@ -51,10 +52,9 @@ int restore_fp_registers(int pid, unsigned long *fp_regs) { #ifdef PTRACE_SETREGSET struct iovec iov; - if (have_xstate_support) { iov.iov_base = fp_regs; - iov.iov_len = sizeof(struct _xstate); + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0) return -errno; return 0; @@ -125,13 +125,19 @@ int put_fp_registers(int pid, unsigned long *regs) void arch_init_registers(int pid) { #ifdef PTRACE_GETREGSET - struct _xstate fp_regs; + void * fp_regs; struct iovec iov; - iov.iov_base = &fp_regs; - iov.iov_len = sizeof(struct _xstate); + fp_regs = malloc(FP_SIZE * sizeof(unsigned long)); + if(fp_regs == NULL) + return; + + iov.iov_base = fp_regs; + iov.iov_len = FP_SIZE * sizeof(unsigned long); if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0) have_xstate_support = 1; + + free(fp_regs); #endif } #endif diff --git a/arch/x86/um/os-Linux/tls.c b/arch/x86/um/os-Linux/tls.c index 9d94b3b76c746ac6ca5f5e5eb39f0caf48096693..ed8ea90967dcc355ad51d5ae2d86b6c0a2341c67 100644 --- a/arch/x86/um/os-Linux/tls.c +++ b/arch/x86/um/os-Linux/tls.c @@ -37,7 +37,7 @@ void check_host_supports_tls(int *supports_tls, int *tls_min) continue; else if (errno == ENOSYS) *supports_tls = 0; - return; + return; } } diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index 02250b2633b839c7fa56704d804dd4953f244121..3099c209546f8722c03f84c25c9615adee9c4095 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c @@ -51,7 +51,7 @@ void foo(void) DEFINE(HOST_ORIG_AX, ORIG_EAX); #else #ifdef FP_XSTATE_MAGIC1 - DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long)); + DEFINE_LONGS(HOST_FP_SIZE, 2696); #else DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); #endif diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 509f560bd0c6d4731cac96fc64296184e6818b9c..71495f1a86d72f7df1464ce3ffe805d880948bbd 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1238,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void) * from _brk_limit way up to the max_pfn_mapped (which is the end of * the ramdisk). We continue on, erasing PMD entries that point to page * tables - do note that they are accessible at this stage via __va. - * For good measure we also round up to the PMD - which means that if + * As Xen is aligning the memory end to a 4MB boundary, for good + * measure we also round up to PMD_SIZE * 2 - which means that if * anybody is using __ka address to the initial boot-stack - and try * to use it - they are going to crash. The xen_start_info has been * taken care of already in xen_setup_kernel_pagetable. */ addr = xen_start_info->pt_base; - size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); + size = xen_start_info->nr_pt_frames * PAGE_SIZE; - xen_cleanhighmap(addr, addr + size); + xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2)); xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); -#ifdef DEBUG - /* This is superfluous and is not necessary, but you know what - * lets do it. The MODULES_VADDR -> MODULES_END should be clear of - * anything at this stage. */ - xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); -#endif } #endif @@ -2220,7 +2215,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) * not the first page table in the page table pool. * Iterate through the initial page tables to find the real page table base. */ -static phys_addr_t xen_find_pt_base(pmd_t *pmd) +static phys_addr_t __init xen_find_pt_base(pmd_t *pmd) { phys_addr_t pt_base, paddr; unsigned pmdidx; diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 30ee8c608853d4fb4b238a01319589d38ce018b7..5b0027d4ecc05cff651baec243a612d8deea20bc 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -208,11 +208,6 @@ struct mm_struct; /* Free all resources held by a thread. */ #define release_thread(thread) do { } while(0) -/* Copy and release all segment info associated with a VM */ -#define copy_segments(p, mm) do { } while(0) -#define release_segments(mm) do { } while(0) -#define forget_segments() do { } while (0) - extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) diff --git a/block/blk-core.c b/block/blk-core.c index aebe676225e6fdf360f39760cacf7326cf78cd9f..048be4aa602446f93e3e981f1ece5659e3d7287b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); +#ifdef CONFIG_BLK_DEV_IO_TRACE + mutex_init(&q->blk_trace_mutex); +#endif mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); diff --git a/block/bsg-lib.c b/block/bsg-lib.c index c82408c7cc3c91181f91d32c0147e18781506691..dbddff8174e57a92d09f5bdc9cc83ee6f7217eb9 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req) failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: - kfree(job); return -ENOMEM; } diff --git a/block/partition-generic.c b/block/partition-generic.c index 86e8fe1adcdb7f1b10fd5acd8f5c36f712736971..88c555db4e5deee60d852b78b70a20589cc2813f 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - struct request_queue *q = dev_to_disk(dev)->queue; + struct request_queue *q = part_to_disk(p)->queue; unsigned int inflight[2]; int cpu; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index ffa9f4ccd9b455ef36c48d3ba57d4256574cd450..337cf382718ee3cd81ffe3cdc480052a6f634798 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -619,14 +619,14 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, struct af_alg_ctx *ctx = ask->private; struct af_alg_tsgl *sgl; struct scatterlist *sg; - unsigned int i, j; + unsigned int i, j = 0; while (!list_empty(&ctx->tsgl_list)) { sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); sg = sgl->sg; - for (i = 0, j = 0; i < sgl->cur; i++) { + for (i = 0; i < sgl->cur; i++) { size_t plen = min_t(size_t, used, sg[i].length); struct page *page = sg_page(sg + i); diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e93ab0c421b7b01a041726ad9563ce2d49..70018397e59abf10d125864f5770c41cd13f2021 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->V); - drbg->Vbuf = NULL; - kzfree(drbg->C); - drbg->Cbuf = NULL; + kzfree(drbg->Vbuf); + drbg->V = NULL; + kzfree(drbg->Cbuf); + drbg->C = NULL; kzfree(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index e88fe3632dd6467e034ee6d8cd0aa319e37bccac..0972ec0e2eb8cb867868401dcf1af2446a7f9696 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -418,7 +418,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id video_dmi_table[] = { +static const struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index bf22c29d25179e937523c026c8028f644f3379f6..11b113f8e36741aeb00e921ee64a5ae871f8d55f 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void) for (i = 0; i < wdat->entries; i++) { const struct acpi_generic_address *gas; struct resource_entry *rentry; - struct resource res; + struct resource res = {}; bool found; gas = &entries[i].register_region; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 077f9bad6f44a57bd01e930f9dd740001eadcd76..3c3a37b8503bd43db4200230309ddeff96eb4e00 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes) } ghes_do_proc(ghes, ghes->estatus); +out: + ghes_clear_estatus(ghes); + + if (rc == -ENOENT) + return rc; + /* * GHESv2 type HEST entries introduce support for error acknowledgment, * so only acknowledge the error if this support is present. */ - if (is_hest_type_generic_v2(ghes)) { - rc = ghes_ack_error(ghes->generic_v2); - if (rc) - return rc; - } -out: - ghes_clear_estatus(ghes); + if (is_hest_type_generic_v2(ghes)) + return ghes_ack_error(ghes->generic_v2); + return rc; } diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 037fd537bbf613a6b1a8108e962f3de0feafe340..995c4d8922b12eef963a9cc1cab591ee7b404b1d 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -30,7 +30,7 @@ #include "internal.h" -static struct dmi_system_id acpi_rev_dmi_table[] __initdata; +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst; /* * POLICY: If *anything* doesn't work, put it on the blacklist. @@ -89,7 +89,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d) } #endif -static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE /* * DELL XPS 13 (2015) switches sound between HDA and I2S diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 59f2f96fdb7e69fdee58c80a02ddb0f4edfba5de..4d0979e02a287d638e6f2b1765ad5fda8374f264 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) } #endif -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { /* * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 @@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = { {} }; #else -static struct dmi_system_id dsdt_dmi_table[] __initdata = { +static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #endif diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index fdfae6f3c0b1db36c8413fd14831fdfef751da30..236b14324780a1c1c416c852957cfa732144b8fc 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1809,7 +1809,7 @@ static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id ec_dmi_table[] __initdata = { +static const struct dmi_system_id ec_dmi_table[] __initconst = { { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 19cdd8a783a93218117349a6f1a553db23bf02ee..76998a51bf997bd6c5a3e42415a400cd5ce5582a 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -312,7 +312,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d) * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden * by acpi_osi=!Linux/acpi_osi=!Darwin command line options. */ -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = { { .callback = dmi_disable_osi_vista, .ident = "Fujitsu Siemens", diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c index f62c68e24317e500e45db7642e92d5ac8f401a54..e90b61f7d2db133e51b24a46ea4ef72eab2e88dd 100644 --- a/drivers/acpi/pci_slot.c +++ b/drivers/acpi/pci_slot.c @@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { +static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = { /* * Fujitsu Primequest machines will return 1023 to indicate an * error if the _SUN method is evaluated on SxFy objects that diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index 7cfbda4d7c512d19801c0813ce67ac083f445097..74f738cb6073646ac075bb033403b14e70cb6773 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_idle_dmi_table[] __initdata = { +static const struct dmi_system_id processor_idle_dmi_table[] __initconst = { { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index c1c216163de3f0ab5b044c52527ed10e3090a9e5..3fb8ff5134613fb044b68ab8f9e8b3c7a234f1b5 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -908,11 +908,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { const struct acpi_device *adev = to_acpi_device_node(fwnode); - struct acpi_device *child_adev = NULL; const struct list_head *head; struct list_head *next; if (!child || is_acpi_device_node(child)) { + struct acpi_device *child_adev; + if (adev) head = &adev->children; else @@ -922,8 +923,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, goto nondev; if (child) { - child_adev = to_acpi_device_node(child); - next = child_adev->node.next; + adev = to_acpi_device_node(child); + next = adev->node.next; if (next == head) { child = NULL; goto nondev; @@ -941,8 +942,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, const struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; - if (child_adev) - head = &child_adev->data.subnodes; + if (adev) + head = &adev->data.subnodes; else if (data) head = &data->data.subnodes; else @@ -1293,3 +1294,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); const struct fwnode_operations acpi_static_fwnode_ops; + +bool is_acpi_device_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && + fwnode->ops == &acpi_device_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_device_node); + +bool is_acpi_data_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops; +} +EXPORT_SYMBOL(is_acpi_data_node); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9fdd014759f8dc4f00a19d1d633cbc747bca67b5..6804ddab3052962d28e5e4954ab0c4e830675de4 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -160,7 +160,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id acpisleep_dmi_table[] __initdata = { +static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 1d0417b87cb7ccd50c5b748cfa8e6edba70447d5..551b71a24b8577317d0306df73fc0765577854e8 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) { return 0; } -static struct dmi_system_id thermal_dmi_table[] __initdata = { +static const struct dmi_system_id thermal_dmi_table[] __initconst = { /* * Award BIOS on this AOpen makes thermal control almost worthless. * http://bugzilla.kernel.org/show_bug.cgi?id=8842 diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index f046d21de57dd956819ad4227535667a0b74cbe9..1a5f6a157a57d7bf13b85ff63d5ed33e2198aeba 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -140,13 +140,10 @@ config EXTRA_FIRMWARE config EXTRA_FIRMWARE_DIR string "Firmware blobs root directory" depends on EXTRA_FIRMWARE != "" - default "firmware" + default "/lib/firmware" help This option controls the directory in which the kernel build system looks for the firmware files listed in the EXTRA_FIRMWARE option. - The default is firmware/ in the kernel source tree, but by changing - this option you can point it elsewhere, such as /lib/firmware/ or - some other directory containing the firmware files. config FW_LOADER_USER_HELPER bool diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index a39b2166b145616d2a6261da8da7252aa7e28b4d..744f64f43454314c35acc4341def004968aeff87 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) struct dma_coherent_mem *mem = rmem->priv; int ret; - if (!mem) - return -ENODEV; - - ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, - DMA_MEMORY_EXCLUSIVE, &mem); - - if (ret) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return ret; + if (!mem) { + ret = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, + DMA_MEMORY_EXCLUSIVE, &mem); + if (ret) { + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return ret; + } } mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ea1732ed7a9d9071db73784c7e37d505ed8dea0a..770b1539a083d111ba1a65b569d3a75eaa81dd38 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_pm_callbacks = - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && - (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && + !dev->bus->suspend && !dev->bus->resume)) && + (!dev->class || (pm_ops_is_empty(dev->class->pm) && + !dev->class->suspend && !dev->class->resume)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && - (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && + !dev->driver->suspend && !dev->driver->resume)); spin_unlock_irq(&dev->power.lock); } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a8cc14fd8ae49ff92cb2fc3dd593273aefd6e10d..a6de325306933b57928b8d16d741247e93274102 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, opp->available = availability_req; + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + /* Notify the change of the OPP availability */ if (availability_req) blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, @@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); + dev_pm_opp_put(opp); + goto put_table; + unlock: mutex_unlock(&opp_table->lock); +put_table: dev_pm_opp_put_opp_table(opp_table); return r; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index f850daeffba4417ae6b2cc69a0f4aa5bf065987e..277d43a83f53e8dc011184ebaf6c783c02f7395b 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } -static bool dev_pm_qos_invalid_request(struct device *dev, - struct dev_pm_qos_request *req) +static bool dev_pm_qos_invalid_req_type(struct device *dev, + enum dev_pm_qos_req_type type) { - return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE - && !dev->power.set_latency_tolerance); + return type == DEV_PM_QOS_LATENCY_TOLERANCE && + !dev->power.set_latency_tolerance; } static int __dev_pm_qos_add_request(struct device *dev, @@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev, { int ret = 0; - if (!dev || dev_pm_qos_invalid_request(dev, req)) + if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) return -EINVAL; if (WARN(dev_pm_qos_request_active(req), diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bbd0d186cfc00ff89f9378b5816ff15ee90b1005..2d7178f7754edddf06278e46e87cc9aefc6d4427 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, if (!brd) return -ENODEV; - page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); + page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT); if (!page) return -ENOSPC; *kaddr = page_address(page); diff --git a/drivers/block/loop.h b/drivers/block/loop.h index f68c1d50802fddb3289480124fc1459c9b66ea9e..1f3956702993883859070b2ef39a9b164978a213 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -67,10 +67,8 @@ struct loop_device { struct loop_cmd { struct kthread_work work; struct request *rq; - union { - bool use_aio; /* use AIO interface to handle I/O */ - atomic_t ref; /* only for aio */ - }; + bool use_aio; /* use AIO interface to handle I/O */ + atomic_t ref; /* only for aio */ long ret; struct kiocb iocb; struct bio_vec *bvec; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2aa87cbdede0ed19b77c8e4aaffea2c2d8146758..3684e21d543f23e95bda09fb895e8933b9bce186 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1194,6 +1194,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + /* The block layer will pass back some non-nbd ioctls in case we have + * special handling for them, but we don't so just return an error. + */ + if (_IOC_TYPE(cmd) != 0xab) + return -EINVAL; + mutex_lock(&nbd->config_lock); /* Don't allow ioctl operations on a nbd device that was created with diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index f4f866ee54bcb1ca05b8ac5c401e993e36c54b6a..d3a979e25724f6db32d939bb7a6f85ea95e85fca 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = { static struct platform_device *sonypi_platform_device; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index fe597e6c55c40ca34f070f83a2af53e9b42a66de..1d6729be4cd6376727b9b210319bd77f03ccfb02 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, goto out; } - msleep(TPM_TIMEOUT); /* CHECK */ + tpm_msleep(TPM_TIMEOUT); rmb(); } while (time_before(jiffies, stop)); @@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip) dev_info( &chip->dev, HW_ERR "TPM command timed out during continue self test"); - msleep(delay_msec); + tpm_msleep(delay_msec); continue; } @@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip) } if (rc != TPM_WARN_DOING_SELFTEST) return rc; - msleep(delay_msec); + tpm_msleep(delay_msec); } while (--loops > 0); return rc; @@ -1085,7 +1085,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, } } else { do { - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; @@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev) */ if (rc != TPM_WARN_RETRY) break; - msleep(TPM_TIMEOUT_RETRY); + tpm_msleep(TPM_TIMEOUT_RETRY); } if (rc) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 04fbff2edbf38ce19845e5c1fdd1cd619293419d..2d5466a72e40f82b3272b857b74a1f822f82b966 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -50,7 +50,8 @@ enum tpm_const { enum tpm_timeout { TPM_TIMEOUT = 5, /* msecs */ - TPM_TIMEOUT_RETRY = 100 /* msecs */ + TPM_TIMEOUT_RETRY = 100, /* msecs */ + TPM_TIMEOUT_RANGE_US = 300 /* usecs */ }; /* TPM addresses */ @@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev); int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel); +static inline void tpm_msleep(unsigned int delay_msec) +{ + usleep_range(delay_msec * 1000, + (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US); +}; + struct tpm_chip *tpm_chip_find_get(int chip_num); __must_check int tpm_try_get_ops(struct tpm_chip *chip); void tpm_put_ops(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index f7f34b2aa98190c7474b3dd6c0f02b2abc032ec9..e1a41b788f081c829ad0d0353ac7d3622752c82f 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) if (rc != TPM2_RC_TESTING) break; - msleep(delay_msec); + tpm_msleep(delay_msec); } return rc; diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a4ac63a21d8a051da3c336f5343b9dc72574b719..8f0a98dea327a5bc90d7fcf3529c3701d17abd6b 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = { SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) }; -static struct acpi_device_id crb_device_ids[] = { +static const struct acpi_device_id crb_device_ids[] = { {"MSFT0101", 0}, {"", 0}, }; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index f01d083eced2fc056d6d74e4423a62f28470e302..25f6e2665385d063d47c50ad9f21e25591f828c9 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -32,26 +32,70 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; -static struct vio_device_id tpm_ibmvtpm_device_table[] = { +static const struct vio_device_id tpm_ibmvtpm_device_table[] = { { "IBM,vtpm", "IBM,vtpm"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); /** + * + * ibmvtpm_send_crq_word - Send a CRQ request + * @vdev: vio device struct + * @w1: pre-constructed first word of tpm crq (second word is reserved) + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1) +{ + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0); +} + +/** + * * ibmvtpm_send_crq - Send a CRQ request * * @vdev: vio device struct - * @w1: first word - * @w2: second word + * @valid: Valid field + * @msg: Type field + * @len: Length field + * @data: Data field + * + * The ibmvtpm crq is defined as follows: + * + * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * ----------------------------------------------------------------------- + * Word0 | Valid | Type | Length | Data + * ----------------------------------------------------------------------- + * Word1 | Reserved + * ----------------------------------------------------------------------- + * + * Which matches the following structure (on bigendian host): + * + * struct ibmvtpm_crq { + * u8 valid; + * u8 msg; + * __be16 len; + * __be32 data; + * __be64 reserved; + * } __attribute__((packed, aligned(8))); + * + * However, the value is passed in a register so just compute the numeric value + * to load into the register avoiding byteswap altogether. Endian only affects + * memory loads and stores - registers are internally represented the same. * * Return: - * 0 -Sucess + * 0 (H_SUCCESS) - Success * Non-zero - Failure */ -static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) +static int ibmvtpm_send_crq(struct vio_dev *vdev, + u8 valid, u8 msg, u16 len, u32 data) { - return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); + u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) | + (u64)data; + return ibmvtpm_send_crq_word(vdev, w1); } /** @@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - __be64 *word = (__be64 *)&crq; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) spin_lock(&ibmvtpm->rtce_lock); ibmvtpm->res_len = 0; memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = cpu_to_be16(count); - crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); /* * set the processing flag before the Hcall, since we may get the @@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), - be64_to_cpu(word[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, + count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; @@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) */ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); @@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) */ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) { - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_GET_VERSION; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_version failed rc=%d\n", rc); @@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); @@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) { int rc; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_send_init failed rc=%d\n", rc); @@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - struct ibmvtpm_crq crq; - u64 *buf = (u64 *) &crq; int rc = 0; - crq.valid = (u8)IBMVTPM_VALID_CMD; - crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; - - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), - cpu_to_be64(buf[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, + IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "tpm_ibmvtpm_suspend failed rc=%d\n", rc); diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 3b1b9f9322d5547120f35f490b7ec7b802d2b085..d8f10047fbbaf1f2f965a795358e60be859fb759 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; - msleep(TPM_MSLEEP_TIME); + tpm_msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) @@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip) wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) @@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip) wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; - msleep(TPM_WTX_MSLEEP_TIME); + tpm_msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b617b2eeb080ad41034b7b7cbd1b21aaed524a3a..63bc6c3b949e51406527a484d356e24959ed9615 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l) if (access & TPM_ACCESS_VALID) return 0; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -1; } @@ -117,7 +117,7 @@ static int request_locality(struct tpm_chip *chip, int l) do { if (check_locality(chip, l)) return l; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } return -1; @@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip) burstcnt = (value >> 8) & 0xFFFF; if (burstcnt) return burstcnt; - msleep(TPM_TIMEOUT); + tpm_msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -EBUSY; } @@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) priv->irq = irq; chip->flags |= TPM_CHIP_FLAG_IRQ; if (!priv->irq_tested) - msleep(1); + tpm_msleep(1); if (!priv->irq_tested) disable_interrupts(chip); priv->irq_tested = true; diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 17b861ea2626cb2d20320deae6db4818107fd71b..ae3167c28b129b6c3d3cc0fbc5c4a0da1a9abacd 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -10,25 +10,45 @@ #include #include #include -#include #include #include #include #include #include +#include static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); static int gic_timer_irq; static unsigned int gic_frequency; +static u64 notrace gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + if (mips_cm_is64) + return read_gic_counter(); + + do { + hi = read_gic_counter_32h(); + lo = read_gic_counter_32l(); + hi2 = read_gic_counter_32h(); + } while (hi2 != hi); + + return (((u64) hi) << 32) + lo; +} + static int gic_next_event(unsigned long delta, struct clock_event_device *evt) { + unsigned long flags; u64 cnt; int res; cnt = gic_read_count(); cnt += (u64)delta; - gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); + local_irq_save(flags); + write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask))); + write_gic_vo_compare(cnt); + local_irq_restore(flags); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } @@ -37,7 +57,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id) { struct clock_event_device *cd = dev_id; - gic_write_compare(gic_read_compare()); + write_gic_vl_compare(read_gic_vl_compare()); cd->event_handler(cd); return IRQ_HANDLED; } @@ -139,10 +159,15 @@ static struct clocksource gic_clocksource = { static int __init __gic_clocksource_init(void) { + unsigned int count_width; int ret; /* Set clocksource mask. */ - gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width()); + count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; + count_width >>= __fls(GIC_CONFIG_COUNTBITS); + count_width *= 4; + count_width += 32; + gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); /* Calculate a somewhat reasonable rating value. */ gic_clocksource.rating = 200 + gic_frequency / 10000000; @@ -159,7 +184,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) struct clk *clk; int ret; - if (!gic_present || !node->parent || + if (!mips_gic_present() || !node->parent || !of_device_is_compatible(node->parent, "mti,gic")) { pr_warn("No DT definition for the mips gic driver\n"); return -ENXIO; @@ -197,7 +222,7 @@ static int __init gic_clocksource_of_init(struct device_node *node) } /* And finally start the counter */ - gic_start_count(); + clear_gic_config(GIC_CONFIG_COUNTSTOP); return 0; } diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c index 6a20dc8b253f4c66981e39bec82f1e2634c50f41..9a7d7f0f23feae971f1f8d959b66578e9c59833e 100644 --- a/drivers/clocksource/numachip.c +++ b/drivers/clocksource/numachip.c @@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi return 0; } -static struct clock_event_device numachip2_clockevent = { +static const struct clock_event_device numachip2_clockevent __initconst = { .name = "numachip2", .rating = 400, .set_next_event = numachip2_set_next_event, diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 2ff64d9d4fb31a3a10a85361d8f5b3b33039f76d..62d24690ba0205df997f588207bfe3e9b649c325 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void) return -readl(sched_clk_base + TIMER_VALUE); } -static int integrator_clocksource_init(unsigned long inrate, - void __iomem *base) +static int __init integrator_clocksource_init(unsigned long inrate, + void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a020da7940d6395a326d5c2ddb6dd01c8f5835e3..a753c50e9e412ea96cb69fa454e81e084a3016be 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = { * platforms using "operating-points-v2" property. */ static const struct of_device_id blacklist[] __initconst = { + { .compatible = "calxeda,highbank", }, + { .compatible = "calxeda,ecx-2000", }, + + { .compatible = "marvell,armadaxp", }, + + { .compatible = "nvidia,tegra124", }, + + { .compatible = "st,stih407", }, + { .compatible = "st,stih410", }, + + { .compatible = "sigma,tango4", }, + + { .compatible = "ti,am33xx", }, + { .compatible = "ti,am43", }, + { .compatible = "ti,dra7", }, + { } }; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 9f013ed42977ef17d5606cebb38240a4aed03b15..80ac313e6c59c13fa28e9bf62cd616e5d25f9448 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -578,7 +578,7 @@ static int acer_cpufreq_pst(const struct dmi_system_id *d) * A BIOS update is all that can save them. * Mention this, and disable cpufreq. */ -static struct dmi_system_id powernow_dmi_table[] = { +static const struct dmi_system_id powernow_dmi_table[] = { { .callback = acer_cpufreq_pst, .ident = "Acer Aspire", diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index b29cd339846302402dcee8333765ad670555a2b3..4bf47de6101faca3896966cd4a3214d190568b5c 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) static const struct of_device_id ti_cpufreq_of_match[] = { { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, - { .compatible = "ti,am4372", .data = &am4x_soc_data, }, + { .compatible = "ti,am43", .data = &am4x_soc_data, }, { .compatible = "ti,dra7", .data = &dra7_soc_data }, {}, }; diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 7080c384ad5de656e8c345a336167624b236a2bb..52a75053ee0312146e0ab879bc351b46153faee7 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto out_fail; + goto init_fail; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto out_fail; + goto init_fail; } /* @@ -149,6 +149,8 @@ static int __init arm_idle_init(void) } return 0; +init_fail: + kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c index 12b9145913de77ea0e37949b678a525793e7b98a..72b5e47286b4b7bb3747fcbf91623259febba0ab 100644 --- a/drivers/cpuidle/cpuidle-cps.c +++ b/drivers/cpuidle/cpuidle-cps.c @@ -37,7 +37,7 @@ static int cps_nc_enter(struct cpuidle_device *dev, * TODO: don't treat core 0 specially, just prevent the final core * TODO: remap interrupt affinity temporarily */ - if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT)) + if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT)) index = STATE_NC_WAIT; /* Select the appropriate cps_pm_state */ diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e36aeacd763596e76ce9eac46e3a1edd5f5443c8..1eb852765469e6633a0af44e3d6ed26ad52234ec 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -1,6 +1,7 @@ config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore driver backend" depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + select SOC_BUS help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). @@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. -config CRYPTO_DEV_FSL_CAAM_IMX - def_bool SOC_IMX6 || SOC_IMX7D - depends on CRYPTO_DEV_FSL_CAAM - config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index dacb53fb690e9583323500333de5d74be3af20f5..027e121c6f70aa9214a29692e8a3ef1e6d886496 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "compat.h" #include "regs.h" @@ -19,6 +20,8 @@ bool caam_little_end; EXPORT_SYMBOL(caam_little_end); bool caam_dpaa2; EXPORT_SYMBOL(caam_dpaa2); +bool caam_imx; +EXPORT_SYMBOL(caam_imx); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2); * i.MX targets tend to have clock control subsystems that can * enable/disable clocking to our device. */ -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX static inline struct clk *caam_drv_identify_clk(struct device *dev, char *clk_name) { - return devm_clk_get(dev, clk_name); + return caam_imx ? devm_clk_get(dev, clk_name) : NULL; } -#else -static inline struct clk *caam_drv_identify_clk(struct device *dev, - char *clk_name) -{ - return NULL; -} -#endif /* * Descriptor to instantiate RNG State Handle 0 in normal mode and @@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev) { int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; + static const struct soc_device_attribute imx_soc[] = { + {.family = "Freescale i.MX"}, + {}, + }; struct device *dev; struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; @@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev) dev_set_drvdata(dev, ctrlpriv); nprop = pdev->dev.of_node; + caam_imx = (bool)soc_device_match(imx_soc); + /* Enable clocking */ clk = caam_drv_identify_clk(&pdev->dev, "ipg"); if (IS_ERR(clk)) { diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 2b5efff9ec3caa69a49bd48b1121aa680e08a2c1..17cfd23a38faa3fbe3152d5925821082f2c2dfb3 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -67,6 +67,7 @@ */ extern bool caam_little_end; +extern bool caam_imx; #define caam_to_cpu(len) \ static inline u##len caam##len ## _to_cpu(u##len val) \ @@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg) #else /* CONFIG_64BIT */ static inline void wr_reg64(void __iomem *reg, u64 data) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) { + if (!caam_imx && caam_little_end) { wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); wr_reg32((u32 __iomem *)(reg), data); - } else -#endif - { + } else { wr_reg32((u32 __iomem *)(reg), data >> 32); wr_reg32((u32 __iomem *)(reg) + 1, data); } @@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data) static inline u64 rd_reg64(void __iomem *reg) { -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX - if (caam_little_end) + if (!caam_imx && caam_little_end) return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | (u64)rd_reg32((u32 __iomem *)(reg))); - else -#endif - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | - (u64)rd_reg32((u32 __iomem *)(reg) + 1)); + + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg) + 1)); } #endif /* CONFIG_64BIT */ +static inline u64 cpu_to_caam_dma64(dma_addr_t value) +{ + if (caam_imx) + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | + (u64)cpu_to_caam32(upper_32_bits(value))); + + return cpu_to_caam64(value); +} + +static inline u64 caam_dma64_to_cpu(u64 value) +{ + if (caam_imx) + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | + (u64)caam32_to_cpu(upper_32_bits(value))); + + return caam64_to_cpu(value); +} + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT -#ifdef CONFIG_SOC_IMX7D -#define cpu_to_caam_dma(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#define caam_dma_to_cpu(value) \ - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ - (u64)caam32_to_cpu(upper_32_bits(value))) -#else -#define cpu_to_caam_dma(value) cpu_to_caam64(value) -#define caam_dma_to_cpu(value) caam64_to_cpu(value) -#endif /* CONFIG_SOC_IMX7D */ +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) #else #define cpu_to_caam_dma(value) cpu_to_caam32(value) #define caam_dma_to_cpu(value) caam32_to_cpu(value) -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ - -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX -#define cpu_to_caam_dma64(value) \ - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ - (u64)cpu_to_caam32(upper_32_bits(value))) -#else -#define cpu_to_caam_dma64(value) cpu_to_caam64(value) -#endif +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ /* * jr_outentry diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d2207ac5ba19cdd357ab5bbae3a128d1fa92183e..5438552bc6d783b57a23763e64c59afb90c340ae 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct skcipher_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct skcipher_request)); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3f819399cd95519a9956ed1d3ecba76fa2aa62b4..3980f946874fa08b64288b5d8b04bcac9134a0ad 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct ahash_request req; - struct safexcel_inv_result result = { 0 }; + struct safexcel_inv_result result = {}; int ring = ctx->base.ring; memset(&req, 0, sizeof(struct ahash_request)); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c690858fb1cea4aa057fb726efe46160df9..dff88838dce762c33ca7e898420f051c3a2f80cb 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, req_ctx->swinit = 0; } else { desc->ptr[1] = zero_entry; - /* Indicate next op is not the first. */ - req_ctx->first = 0; } + /* Indicate next op is not the first. */ + req_ctx->first = 0; /* HMAC key */ if (ctx->keylen) @@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, sg_count = edesc->src_nents ?: 1; if (is_sec1 && sg_count > 1) - sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); else sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, DMA_TO_DEVICE); @@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.hash.final = ahash_final; t_alg->algt.alg.hash.finup = ahash_finup; t_alg->algt.alg.hash.digest = ahash_digest; - t_alg->algt.alg.hash.setkey = ahash_setkey; + if (!strncmp(alg->cra_name, "hmac", 4)) + t_alg->algt.alg.hash.setkey = ahash_setkey; t_alg->algt.alg.hash.import = ahash_import; t_alg->algt.alg.hash.export = ahash_export; diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 3600ff7866462254d07a87741269231688b9262b..557b937035328e9376e8f4eee71beeaecefa0dc2 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -201,8 +201,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) if (!dax_dev) return 0; - if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush) +#ifndef CONFIG_ARCH_HAS_PMEM_API + if (a == &dev_attr_write_cache.attr) return 0; +#endif return a->mode; } @@ -267,18 +269,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, } EXPORT_SYMBOL_GPL(dax_copy_from_iter); -void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size); +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { - if (!dax_alive(dax_dev)) + if (unlikely(!dax_alive(dax_dev))) return; - if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)) + if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) return; - if (dax_dev->ops->flush) - dax_dev->ops->flush(dax_dev, pgoff, addr, size); + arch_wb_cache_pmem(addr, size); } +#else +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) +{ +} +#endif EXPORT_SYMBOL_GPL(dax_flush); void dax_write_cache(struct dax_device *dax_dev, bool wc) diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index c463871609764d276f80be43e6bf587da5ea8862..c8f169bf2e27dc0264785f402759a3d15f307ef0 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8]) return local_hash_64(input, 32); } -static struct dmi_system_id gsmi_dmi_table[] __initdata = { +static const struct dmi_system_id gsmi_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c index 8c1bf6dbdaa643dc89636dbccf3300c6f6a63828..19bcbd10855bcadd8f7291f8048da55685c1ef0b 100644 --- a/drivers/firmware/google/memconsole-x86-legacy.c +++ b/drivers/firmware/google/memconsole-x86-legacy.c @@ -126,7 +126,7 @@ static bool memconsole_ebda_init(void) return false; } -static struct dmi_system_id memconsole_dmi_table[] __initdata = { +static const struct dmi_system_id memconsole_dmi_table[] __initconst = { { .ident = "Google Board", .matches = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12e71bbfd2228ebeaccd9188142c276653edb938..103635ab784c989945dc1dbcba616b0fe011c8da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -76,7 +76,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; -extern unsigned amdgpu_gart_size; +extern int amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index fb6e5dbd5a03558d4ad06049b7fe581f18a801ec..309f2419c6d8aaed250f1fd2235cc45c3cdef1d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -155,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; - return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 269b835571eb07f31093574fb86f662e93eb4c2d..60d8bedb694db733a707405a0b50da4fe0c0265c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1079,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, GFP_KERNEL); p->num_post_dep_syncobjs = 0; + if (!p->post_dep_syncobjs) + return -ENOMEM; + for (i = 0; i < num_deps; ++i) { p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); if (!p->post_dep_syncobjs[i]) @@ -1150,7 +1153,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); - amdgpu_cs_parser_fini(p, 0, true); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); @@ -1208,10 +1210,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; r = amdgpu_cs_submit(&parser, cs); - if (r) - goto out; - return 0; out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1a459ac63df453b2cab796b8ed28fd4afff59522..e630d918fefc0993e540ac4ecb1734eeaaf3aec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1062,11 +1062,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size < 32) { + if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { /* gart size must be greater or equal to 32M */ dev_warn(adev->dev, "gart size (%d) too small\n", amdgpu_gart_size); - amdgpu_gart_size = 32; + amdgpu_gart_size = -1; } if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { @@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, goto err; } - r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem); - if (r) { - DRM_ERROR("%p bind failed\n", bo->shadow); - goto err; - } - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, NULL, fence, true); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e39ec981b11c85d11ca6250caf2c4ab7de35fd97..0f16986ec5bc44356c19e9a7988a57e4b35f05bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -76,7 +76,7 @@ int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; -unsigned amdgpu_gart_size = 256; +int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; @@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 94c1e2e8e34ca659717af06aa944c8e2675ba2a7..f4370081f6e60bafc3003e100e2eb41911d59951 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -56,18 +56,6 @@ * Common GART table functions. */ -/** - * amdgpu_gart_set_defaults - set the default gart_size - * - * @adev: amdgpu_device pointer - * - * Set the default gart_size based on parameters and available VRAM. - */ -void amdgpu_gart_set_defaults(struct amdgpu_device *adev) -{ - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; -} - /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index d4cce69362003241455a6d60bc57c3d648700fac..afbe803b1a13a93bce4d342d4c7f6bccce03c3aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -56,7 +56,6 @@ struct amdgpu_gart { const struct amdgpu_gart_funcs *gart_funcs; }; -void amdgpu_gart_set_defaults(struct amdgpu_device *adev); int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 9e05e257729f2e20dd91c4e81f6299e0800cc0e7..0d15eb7d31d7d0471b9253155c211bf5d3a641bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -108,10 +108,10 @@ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) * * Allocate the address space for a node. */ -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) +static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; @@ -143,12 +143,8 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, fpfn, lpfn, mode); spin_unlock(&mgr->lock); - if (!r) { + if (!r) mem->start = node->start; - if (&tbo->mem == mem) - tbo->offset = (tbo->mem.start << PAGE_SHIFT) + - tbo->bdev->man[tbo->mem.mem_type].gpu_offset; - } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 4bdd851f56d081310614f27dce5093255ecf7dfd..538e5f27d1205809ec293dee0aae8da5b5711e8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -221,8 +221,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) spin_lock_init(&adev->irq.lock); - /* Disable vblank irqs aggressively for power-saving */ - adev->ddev->vblank_disable_immediate = true; + if (!adev->enable_virtual_display) + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e7e899190befb9f74edd45a82eeefbed7529fde5..9e495da0bb03c8b4946aa84f1966a951c5639300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; - places[c].lpfn = 0; + if (flags & AMDGPU_GEM_CREATE_SHADOW) + places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; + else + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_TT; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | @@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, if (bo->shadow) return 0; - bo->flags |= AMDGPU_GEM_CREATE_SHADOW; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); - - amdgpu_ttm_placement_init(adev, &placement, - placements, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW); r = amdgpu_bo_create_restricted(adev, size, byte_align, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, NULL, &placement, bo->tbo.resv, 0, @@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; int r; - memset(&placements, 0, - (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + memset(&placements, 0, sizeof(placements)); + amdgpu_ttm_placement_init(adev, &placement, placements, + domain, parent_flags); - amdgpu_ttm_placement_init(adev, &placement, - placements, domain, flags); - - r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, init_value, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, + parent_flags, sg, &placement, resv, + init_value, bo_ptr); if (r) return r; - if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { - if (!resv) { - r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); - WARN_ON(r != 0); - } + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { + if (!resv) + WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, + NULL)); r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); if (!resv) - ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); + reservation_object_unlock((*bo_ptr)->tbo.resv); if (r) amdgpu_bo_unref(bo_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6c5646b48d1a5fce145df441ce66098ea62e95d8..5ce65280b3960fabc9a774184ba20d39481c3d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned irq_type) { int r; + int sched_hw_submission = amdgpu_sched_hw_submission; + + /* Set the hw submission limit higher for KIQ because + * it's used for a number of gfx/compute tasks by both + * KFD and KGD which may have outstanding fences and + * it doesn't really use the gpu scheduler anyway; + * KIQ tasks get submitted directly to the ring. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + sched_hw_submission = max(sched_hw_submission, 256); if (ring->adev == NULL) { if (adev->num_rings >= AMDGPU_MAX_RINGS) @@ -178,8 +188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, - amdgpu_sched_hw_submission); + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); if (r) return r; } @@ -218,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - ring->ring_size = roundup_pow_of_two(max_dw * 4 * - amdgpu_sched_hw_submission); + ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission); ring->buf_mask = (ring->ring_size / 4) - 1; ring->ptr_mask = ring->funcs->support_64bit_ptrs ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8b2c294f6f7999371a045aa985dd409ef12dd2d5..7ef6c28a34d991a2bba6f20224284072330c22d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -761,35 +761,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - uint64_t flags; - int r; - - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); - gtt->offset = (u64)mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; - -} - static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; + uint64_t flags; int r = 0; if (gtt->userptr) { @@ -809,9 +785,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (amdgpu_gtt_mgr_is_allocated(bo_mem)) - r = amdgpu_ttm_do_bind(ttm, bo_mem); + if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) + return 0; + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -824,20 +815,39 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_tt *ttm = bo->ttm; + struct ttm_mem_reg tmp; + + struct ttm_placement placement; + struct ttm_place placements; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) return 0; - r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, - NULL, bo_mem); - if (r) { - DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); + tmp = bo->mem; + tmp.mm_node = NULL; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + + r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); + if (unlikely(r)) return r; - } - return amdgpu_ttm_do_bind(ttm, bo_mem); + r = ttm_bo_move_ttm(bo, true, false, &tmp); + if (unlikely(r)) + ttm_bo_mem_put(bo, &tmp); + else + bo->offset = (bo->mem.start << PAGE_SHIFT) + + bo->bdev->man[bo->mem.mem_type].gpu_offset; + + return r; } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f22a4758719da1121bd9368b857cb14fa42bb958..43093bffa2cfa8f2d3a81592acb138908089f53b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -62,10 +62,6 @@ extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); -int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b9a5a77eedaf00c339ef15faac6e1c4302053f2f..bd20ff018512271b79d9dac6c40b21693ea54f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, unsigned i; int r; - if (parent->bo->shadow) { - struct amdgpu_bo *shadow = parent->bo->shadow; - - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } - if (use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, NULL); if (r) @@ -1277,7 +1269,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type < CHIP_VEGA10 || nptes != AMDGPU_VM_PTE_COUNT(p->adev) || - p->func == amdgpu_vm_do_copy_ptes || + p->src || !(flags & AMDGPU_PTE_VALID)) { dst = amdgpu_bo_gpu_offset(entry->bo); @@ -1294,9 +1286,23 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, entry->addr = (dst | flags); if (use_cpu_update) { + /* In case a huge page is replaced with a system + * memory mapping, p->pages_addr != NULL and + * amdgpu_vm_cpu_set_ptes would try to translate dst + * through amdgpu_vm_map_gart. But dst is already a + * GPU address (of the page table). Disable + * amdgpu_vm_map_gart temporarily. + */ + dma_addr_t *tmp; + + tmp = p->pages_addr; + p->pages_addr = NULL; + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); pde = pd_addr + (entry - parent->entries) * 8; amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + + p->pages_addr = tmp; } else { if (parent->bo->shadow) { pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); @@ -1610,7 +1616,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to - * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update @@ -1624,7 +1629,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, - uint64_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, @@ -1679,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } if (pages_addr) { - if (flags == gtt_flags) - src = adev->gart.table_addr + - (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; - else - max_entries = min(max_entries, 16ull * 1024ull); + max_entries = min(max_entries, 16ull * 1024ull); addr = 0; } else if (flags & AMDGPU_PTE_VALID) { addr += adev->vm_manager.vram_base_offset; @@ -1728,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - uint64_t gtt_flags, flags; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive; + uint64_t flags; int r; if (clear || !bo_va->base.bo) { @@ -1751,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo) { + if (bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? - flags : 0; - } else { + else flags = 0x0; - gtt_flags = ~0x0; - } spin_lock(&vm->status_lock); if (!list_empty(&bo_va->base.vm_status)) @@ -1767,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, - gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, &bo_va->last_pt_update); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d228f5a990449f0b2c28314c972db2664c6bc1cd..dbbe986f90f29ffb6c77662866e336d8130d1af2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) NUM_BANKS(ADDR_SURF_2_BANK); for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); - } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { + } else if (adev->asic_type == CHIP_OLAND) { + tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); + tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); + tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1); + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) + WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); + } else if (adev->asic_type == CHIP_HAINAN) { tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P2) | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 832e592fcd0725b0f32406aec57d2fc466fe1179..fc260c13b1da4938443a972aecea21c2250d7e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4579,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_misc_reserved = 0x00000003; if (!(adev->flags & AMD_IS_APU)) { mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr - + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); } eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; @@ -4768,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); } else { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); @@ -4792,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); - ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; - ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 4f2788b61a08bd4db43d52d14ec9ab6e771b0e37..6c8040e616c4ed69f4f1addbf8282f2aea3ec81b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); @@ -143,9 +143,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 12b0c4cd7a5af801b32dd29067cbba3ee88b2e08..5be9c83dfcf7d6b9ff169e5cb7b5fcde192b49cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -332,7 +332,24 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_HAINAN: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ + case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ + case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ + case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index e42c1ad3af5e06c33a59939bc9abab48b2d9c406..eace9e7182c8a832d2b29984463f78fe05ca002c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -386,7 +386,27 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_TOPAZ: /* no MM engines */ + default: + adev->mc.gart_size = 256ULL << 20; + break; +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ + case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ + case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ + case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ + case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ + adev->mc.gart_size = 1024ULL << 20; + break; +#endif + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 7ca2dae8237a0952da81414d5e14b53fa06e566e..3b3326daf32b9b09b25914a58d023b391ca8e52a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -562,7 +562,26 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_POLARIS11: /* all engines support GPUVM */ + case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS12: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ + case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ + case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ + case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2769c2b3b56e8f5ff8e8bbfa0f2cefe86569fe1f..d04d0b123212035a15f02c964a93271f4a8c64a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -499,7 +499,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - amdgpu_gart_set_defaults(adev); + /* set the gart size */ + if (amdgpu_gart_size == -1) { + switch (adev->asic_type) { + case CHIP_VEGA10: /* all engines support GPUVM */ + default: + adev->mc.gart_size = 256ULL << 20; + break; + case CHIP_RAVEN: /* DCE SG support */ + adev->mc.gart_size = 1024ULL << 20; + break; + } + } else { + adev->mc.gart_size = (u64)amdgpu_gart_size << 20; + } + gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 4395a4f12149c37db23404fa89077d783aa88c06..74cb647da30e08c28260937a2df7d02f13aeae9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp, field; + uint32_t tmp; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); @@ -157,9 +157,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index e4a8c2e52cb2c14ab566d015b178a2e6bd243cac..660b3fbade4194f796ebe0be8e4fc7f7e9c46109 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep, int err = 0; dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; dev->kfd2kgd->get_tile_config(dev->kgd, &config); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 5979158c3f7b93e96627af2c03cd814228b0fe01..944abfad39c1f67447ca720d5e47c4b086336a82 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd, struct kfd_event *ev) { if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { - pr_warn("Signal event wasn't created because limit was reached\n"); + if (!p->signal_event_limit_reached) { + pr_warn("Signal event wasn't created because limit was reached\n"); + p->signal_event_limit_reached = true; + } return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 681b639f51330b78af84854880cae988fd90b252..ed71ad40e8f797ca3c7b7d4f129f5e9fda382d27 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq) { if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) kq->mqd->destroy_mqd(kq->mqd, - NULL, - false, + kq->queue->mqd, + KFD_PREEMPT_TYPE_WAVEFRONT_RESET, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, kq->queue->pipe, kq->queue->queue); @@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq, uint32_t wptr, rptr; unsigned int *queue_address; + /* When rptr == wptr, the buffer is empty. + * When rptr == wptr + 1, the buffer is full. + * It is always rptr that advances to the position of wptr, rather than + * the opposite. So we can only use up to queue_size_dwords - 1 dwords. + */ rptr = *kq->rptr_kernel; wptr = *kq->wptr_kernel; queue_address = (unsigned int *)kq->pq_kernel_addr; @@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq, pr_debug("wptr: %d\n", wptr); pr_debug("queue_address 0x%p\n", queue_address); - available_size = (rptr - 1 - wptr + queue_size_dwords) % + available_size = (rptr + queue_size_dwords - 1 - wptr) % queue_size_dwords; - if (packet_size_in_dwords >= queue_size_dwords || - packet_size_in_dwords >= available_size) { + if (packet_size_in_dwords > available_size) { /* * make sure calling functions know * acquire_packet_buffer() failed @@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq, } if (wptr + packet_size_in_dwords >= queue_size_dwords) { + /* make sure after rolling back to position 0, there is + * still enough space. + */ + if (packet_size_in_dwords >= rptr) { + *buffer_ptr = NULL; + return -ENOMEM; + } + /* fill nops, roll back and start at position 0 */ while (wptr > 0) { queue_address[wptr] = kq->nop_packet; wptr = (wptr + 1) % queue_size_dwords; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b397ec726400c2a52533356363b36bfd275dfb39..b87e96cee5facfea112a874f4e69bfad52f3b70b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -521,6 +521,7 @@ struct kfd_process { struct list_head signal_event_pages; u32 next_nonsignal_event_id; size_t signal_event_count; + bool signal_event_limit_reached; }; /** diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1cae95e2b13adedfb19760279861fae827d057a7..03bec765b03d949de8bdac3cd8db62f8c684c92a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, int num_queues = 0; struct queue *cur; - memset(&q_properties, 0, sizeof(struct queue_properties)); memcpy(&q_properties, properties, sizeof(struct queue_properties)); q = NULL; kq = NULL; diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index ca93b5160ba6cfb34b5cc4cc4f409bda66ba0a35..3e606a761d0e6466be9e40002e4fab09f2ca4a5c 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -419,8 +419,8 @@ struct vi_mqd_allocation { struct vi_mqd mqd; uint32_t wptr_poll_mem; uint32_t rptr_report_mem; - uint32_t dyamic_cu_mask; - uint32_t dyamic_rb_mask; + uint32_t dynamic_cu_mask; + uint32_t dynamic_rb_mask; }; struct cz_mqd { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 9d71a259d97d46fc8008e288f71605f3cb5e23ff..f8f02e70b8bc06974b52dda715755de77aaaeae6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1558,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) */ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, - uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, + uint32_t *acg_freq) { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); @@ -1609,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, cpu_to_le16(dividers.usPll_ss_slew_frac); current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ + return 0; } @@ -1689,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) for (i = 0; i < dpm_table->count; i++) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[i].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; } @@ -1698,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) while (i < NUM_GFXCLK_DPM_LEVELS) { result = vega10_populate_single_gfx_level(hwmgr, dpm_table->dpm_levels[j].value, - &(pp_table->GfxclkLevel[i])); + &(pp_table->GfxclkLevel[i]), + &(pp_table->AcgFreqTable[i])); if (result) return result; i++; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index f6d6c61f796a463abfd8dfb2be0129c6c2579740..2818c98ff5ca907e317ec03c05e91fc4635554b4 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -315,10 +315,12 @@ typedef struct { uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; GbVdroopTable_t AcgBtcGbVdroopTable; QuadraticInt_t AcgAvfsGb; - uint32_t Reserved[4]; + + /* ACG Frequency Table, in Mhz */ + uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS]; /* Padding - ignore */ - uint32_t MmHubPadding[7]; /* SMU internal use */ + uint32_t MmHubPadding[3]; /* SMU internal use */ } PPTable_t; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 76347ff6d6554b447e8e066c7ac81ef85562d4bc..c49a6f22002f77c9205d6e1973ee405e236f6008 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->num_register_entries = 0; } - if (fw_type == UCODE_ID_RLC_G) + if ((fw_type == UCODE_ID_RLC_G) + || (fw_type == UCODE_ID_CP_MEC)) entry->flags = 1; else entry->flags = 0; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8b9221d64b43da04c4268a2c986b8..97c94f9683fa047392ba62f128586e0b7e4492bc 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { struct amd_sched_rq *rq = entity->rq; + int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; - /** * The client will not queue more IBs during this fini, consume existing - * queued IBs + * queued IBs or discard them on SIGKILL */ - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); - + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + amd_sched_entity_is_idle(entity)); amd_sched_rq_remove_entity(rq, entity); + if (r) { + struct amd_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) + sched->ops->free_job(job); + + } kfifo_free(&entity->job_queue); } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index db6aeec50b821eb119fc0e0b6d5885a26350c56f..2e5e089dd9123fa324e1a7d343d8bb47a200739b 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -319,7 +319,7 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", crtc->base.id, crtc->name); - states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY); + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); if (!states) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 80e62f6693215f1deaf9d40af232f56d800d9ac3..0ef9011a1856358b2bc6104973888a69f76db8e7 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -111,7 +111,7 @@ ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, void *data; int ret; - data = kmalloc(msg.len, GFP_TEMPORARY); + data = kmalloc(msg.len, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c index 7d1b0f011d33e1d796890bb71c88458b0ae6d054..935653eb3616c997064fb166cb0d1ce8e839e4b7 100644 --- a/drivers/gpu/drm/drm_scdc_helper.c +++ b/drivers/gpu/drm/drm_scdc_helper.c @@ -102,7 +102,7 @@ ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, void *data; int err; - data = kmalloc(1 + size, GFP_TEMPORARY); + data = kmalloc(1 + size, GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5a634594a6cea2d2be20e19a0e359229a860775c..57881167ccd22c9c16df18e1d93401edef2e1f82 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { void etnaviv_gem_free_object(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_drm_private *priv = obj->dev->dev_private; struct etnaviv_vram_mapping *mapping, *tmp; /* object should not be active */ WARN_ON(is_active(etnaviv_obj)); + mutex_lock(&priv->gem_lock); list_del(&etnaviv_obj->gem_node); + mutex_unlock(&priv->gem_lock); list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, obj_node) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index a7ff2e4c00d26b35ca7e80bccf8d24317b642399..46dfe0737f438d37e4e65dec320e9a7e773e3856 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -37,7 +37,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, struct etnaviv_gem_submit *submit; size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (submit) { submit->dev = dev; submit->gpu = gpu; @@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, cmdbuf->user_size = ALIGN(args->stream_size, 8); ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); - if (ret == 0) - cmdbuf = NULL; + if (ret) + goto out; + + cmdbuf = NULL; if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 730b8d9db18704c42346b9553c8d7ab737493392..6be5b53c3b279f42ee6dbcc445173fa539721ce5 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b1f7299600f040947240618cf2cfc262d1847b70..e651a58c18cf2d5b743439776dd62d50b64f8a7a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = { static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - int old_dpms = connector->dpms; - - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); - - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; + drm_kms_helper_poll_disable(drm_dev); + exynos_drm_fbdev_suspend(drm_dev); + private->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(private->suspend_state)) { + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(private->suspend_state); } - drm_connector_list_iter_end(&conn_iter); return 0; } @@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev) static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct exynos_drm_private *private = drm_dev->dev_private; if (pm_runtime_suspended(dev) || !drm_dev) return 0; - drm_connector_list_iter_begin(drm_dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->funcs->dpms) { - int dpms = connector->dpms; - - connector->dpms = DRM_MODE_DPMS_OFF; - connector->funcs->dpms(connector, dpms); - } - } - drm_connector_list_iter_end(&conn_iter); + drm_atomic_helper_resume(drm_dev, private->suspend_state); + exynos_drm_fbdev_resume(drm_dev); + drm_kms_helper_poll_enable(drm_dev); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cf131c2aa23e431f13a44fe1985bad58c32da250..f8bae4cb4823653562de92a20bec51f87747d07f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -202,6 +202,7 @@ struct drm_exynos_file_private { */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; + struct drm_atomic_state *suspend_state; struct device *dma_dev; void *mapping; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index c3a068409b484fda04bbf837626f011beb875c1f..dfb66ecf417b5c8ba9bd26b8966a52a5f14b4aaa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -18,6 +18,8 @@ #include #include +#include + #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_fbdev.h" @@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(fb_helper); } + +void exynos_drm_fbdev_suspend(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 1); + console_unlock(); +} + +void exynos_drm_fbdev_resume(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + + console_lock(); + drm_fb_helper_set_suspend(private->fb_helper, 0); + console_unlock(); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 330eef87f7180b1b726ff41933874a6568b0474d..645d1bb7f665faed0dfa30bf3ab36c090a2c1c1f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev); void exynos_drm_fbdev_fini(struct drm_device *dev); void exynos_drm_fbdev_restore_mode(struct drm_device *dev); void exynos_drm_output_poll_changed(struct drm_device *dev); +void exynos_drm_fbdev_suspend(struct drm_device *drm); +void exynos_drm_fbdev_resume(struct drm_device *drm); #else @@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev) #define exynos_drm_output_poll_changed (NULL) +static inline void exynos_drm_fbdev_suspend(struct drm_device *drm) +{ +} + +static inline void exynos_drm_fbdev_resume(struct drm_device *drm) +{ +} + #endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 214fa5e51963898ee5288f98d536681cdbebdcd6..0109ff40b1db2a95da6997e982fe0adaf4496196 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct drm_display_mode *m; + struct drm_connector_list_iter conn_iter; int mode_ok; drm_mode_set_crtcinfo(adjusted_mode, 0); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder == encoder) break; } + if (connector) + drm_connector_get(connector); + drm_connector_list_iter_end(&conn_iter); - if (connector->encoder != encoder) + if (!connector) return true; mode_ok = hdmi_mode_valid(connector, adjusted_mode); if (mode_ok == MODE_OK) - return true; + goto cleanup; /* * Find the most suitable mode and copy it to adjusted_mode. @@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, } } +cleanup: + drm_connector_put(connector); + return true; } diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 40af17ec6312533d4080cc1581faa4683a0405b9..ff3154fe6588b6aa48d4c2618d65f1f2d912b137 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - unsigned int bar_index = - (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; + struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; - if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) - return -EINVAL; - + /* + * Power-up software can determine how much address + * space the device requires by writing a value of + * all 1's to the register and then reading the value + * back. The device will return 0's in all don't-care + * address bits. + */ if (new == 0xffffffff) { - /* - * Power-up software can determine how much address - * space the device requires by writing a value of - * all 1's to the register and then reading the value - * back. The device will return 0's in all don't-care - * address bits. - */ - size = vgpu->cfg_space.bar[bar_index].size; - if (lo) { - new = rounddown(new, size); - } else { - u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; - /* for 32bit mode bar it returns all-0 in upper 32 - * bit, for 64bit mode bar it will calculate the - * size with lower 32bit and return the corresponding - * value + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); + /* + * Untrap the BAR, since guest hasn't configured a + * valid GPA */ - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - new &= (~(size-1)) >> 32; - else - new = 0; - } - /* - * Unmapp & untrap the BAR, since guest hasn't configured a - * valid GPA - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: ret = trap_gttmmio(vgpu, false); break; - case INTEL_GVT_PCI_BAR_APERTURE: + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); + intel_vgpu_write_pci_bar(vgpu, offset, + size >> (lo ? 0 : 32), lo); ret = map_aperture(vgpu, false); break; + default: + /* Unimplemented BARs */ + intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } else { - /* - * Unmapp & untrap the old BAR first, since guest has - * re-configured the BAR - */ - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, false); + switch (offset) { + case PCI_BASE_ADDRESS_0: + case PCI_BASE_ADDRESS_1: + /* + * Untrap the old BAR first, since guest has + * re-configured the BAR + */ + trap_gttmmio(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = trap_gttmmio(vgpu, mmio_enabled); break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, false); + case PCI_BASE_ADDRESS_2: + case PCI_BASE_ADDRESS_3: + map_aperture(vgpu, false); + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); + ret = map_aperture(vgpu, mmio_enabled); break; - } - intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - /* Track the new BAR */ - if (mmio_enabled) { - switch (bar_index) { - case INTEL_GVT_PCI_BAR_GTTMMIO: - ret = trap_gttmmio(vgpu, true); - break; - case INTEL_GVT_PCI_BAR_APERTURE: - ret = map_aperture(vgpu, true); - break; - } + default: + intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } return ret; @@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } switch (rounddown(offset, 4)) { - case PCI_BASE_ADDRESS_0: - case PCI_BASE_ADDRESS_1: - case PCI_BASE_ADDRESS_2: - case PCI_BASE_ADDRESS_3: + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_bar_write(vgpu, offset, p_data, bytes); @@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; - int i; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); @@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, */ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 0); + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = + pci_resource_len(gvt->dev_priv->drm.pdev, 2); } /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 57317715977f940039ace12cf79c4a7f67c2056d..19404c96eeb10670da75ce5426c96c10f6a5d7e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2540,7 +2540,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, if (n_pages > ARRAY_SIZE(stack_pages)) { /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY); + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 50d5e24f91a9daa861ba2156ccdfe7588d48e899..92437f455b43ca1a23c28935337cb7eba124aa58 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -293,7 +293,7 @@ static int eb_create(struct i915_execbuffer *eb) * as possible to perform the allocation and warn * if it fails. */ - flags = GFP_TEMPORARY; + flags = GFP_KERNEL; if (size > 1) flags |= __GFP_NORETRY | __GFP_NOWARN; @@ -1515,7 +1515,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); size = nreloc * sizeof(*relocs); - relocs = kvmalloc_array(size, 1, GFP_TEMPORARY); + relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { kvfree(relocs); err = -ENOMEM; @@ -2077,7 +2077,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args, return ERR_PTR(-EFAULT); fences = kvmalloc_array(args->num_cliprects, sizeof(*fences), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (!fences) return ERR_PTR(-ENOMEM); @@ -2463,9 +2463,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Copy in the exec list from userland */ exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list), - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); @@ -2543,7 +2543,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, /* Allocate an extra slot for use by the command parser */ exec2_list = kvmalloc_array(args->buffer_count + 1, sz, - __GFP_NOWARN | GFP_TEMPORARY); + __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0d5a988b3867792a1f8bf069475a1a57042b11f8..e2410eb5d96e01ce208f565fee182da9a99ee604 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3231,7 +3231,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, /* Allocate a temporary list of source pages for random access. */ page_addr_list = kvmalloc_array(n_pages, sizeof(dma_addr_t), - GFP_TEMPORARY); + GFP_KERNEL); if (!page_addr_list) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 23fd18bd1b56b7f54c7d16cc3d5f1b72a03aaf71..709efe2357eac9948ba71a76e11c02ace9f6ae4c 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; @@ -643,7 +643,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (mm == current->mm) { pvec = kvmalloc_array(num_pages, sizeof(struct page *), - GFP_TEMPORARY | + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (pvec) /* defer to worker if malloc fails */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index ed5a1eb839ad57951bf56b3ae0e886cbfbf88428..0c779671fe2df976eceba035b45df01e9dc82f64 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -787,16 +787,16 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, */ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; ebuf->buf = kmalloc(ebuf->size, - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (ebuf->buf == NULL) { ebuf->size = PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) { ebuf->size = 128; - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); } if (ebuf->buf == NULL) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e21ce9c18b6eee10ad8556d5fb310cd544987c88..b63893eeca73ddf78070bbecf055f6560f8636b7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, pipe); int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; - bool in_vbl = true; unsigned long irqflags; if (WARN_ON(!mode->crtc_clock)) { @@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - in_vbl = position >= vbl_start && position < vbl_end; - /* * While in vblank, position will be negative * counting up towards 0 at vbl_end. And outside diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f17275519484bd8f598401269959c038fe139171..00cd17c76fdcc3bc5427f53bf45e416d66400655 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14030,7 +14030,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { DRM_DEBUG_KMS("bad plane %d handle\n", i); - return -EINVAL; + goto err; } stride_alignment = intel_fb_stride_alignment(fb, i); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index f0c11aec5ea5c99933f4243d9ce408692cd66cda..7442891762be4e486f8e1bd4c4f9a365819d7c88 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -902,15 +900,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_panel_disable_backlight(old_conn_state); - /* - * Disable Device ready before the port shutdown in order - * to avoid split screen - */ - if (IS_BROXTON(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) - I915_WRITE(MIPI_DEVICE_READY(port), 0); - } - /* * According to the spec we should send SHUTDOWN before * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a17b1de7d7e0edf9ec2f605a2d257227587599ba..3b1c5d783ee7ccf12fc9d0cda009ab695fef4e57 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, @@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bxt_get_backlight(connector); val = intel_panel_compute_brightness(connector, val); panel->backlight.level = clamp(val, panel->backlight.min, diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index d044bf9a6feb37e6036b44e83a0a70dc1b7f03cb..222c511bea494585b08d9a6c41701065517526bd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -62,7 +62,7 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index 7276194c04f7fab3330e032963e47f04c1bd3e78..828904b7d46831f4dc6040cdf7085ffff515275b 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -117,12 +117,12 @@ static int igt_random_insert_remove(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -187,12 +187,12 @@ static int igt_insert_complete(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto out_waiters; @@ -368,7 +368,7 @@ static int igt_wakeup(void *arg) mock_engine_reset(engine); - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 2d0fef2cfca6342b25736f2e4d9a33c8b7623c8f..3cac22eb47ce10efaac456b25d0a3f210b6b6ff5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -127,7 +127,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri return 0; valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), - GFP_TEMPORARY); + GFP_KERNEL); if (!valid) return -ENOMEM; diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c index 7b12a68c3b54bc4b670c2f7024e661760a1fe5d7..a78c4b483e8dd613228f41e695155c850b00366d 100644 --- a/drivers/gpu/drm/lib/drm_random.c +++ b/drivers/gpu/drm/lib/drm_random.c @@ -28,7 +28,7 @@ unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) { unsigned int *order, i; - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); if (!order) return order; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 8a75c0bd8a78b1481e30fdab63f2d14bfc64536d..5d0a75d4b249c439ff9ff072f89c87c5e95bd776 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -40,7 +40,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return NULL; - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!submit) return NULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 4a57defc99b38ae2de923ba830b0c70de9dff24a..1399d923d44623940c000dc0a54be40865e75624 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -171,7 +171,7 @@ nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static struct dmi_system_id gpio_reset_ids[] = { +static const struct dmi_system_id gpio_reset_ids[] = { { .ident = "Apple Macbook 10,1", .matches = { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 14c5613b4388a839461915fc448d4772422861a3..afbf50d0c08fa1c89c49120717b7a51cb05d5b98 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .y2 = qfb->base.height }; - if (!old_state->fb) { - qxl_io_log(qdev, - "create primary fb: %dx%d,%d,%d\n", - bo->surf.width, bo->surf.height, - bo->surf.stride, bo->surf.format); + if (old_state->fb) { + qfb_old = to_qxl_framebuffer(old_state->fb); + bo_old = gem_to_qxl_bo(qfb_old->obj); + } else { + bo_old = NULL; + } - qxl_io_create_primary(qdev, 0, bo); - bo->is_primary = true; + if (bo == bo_old) return; - } else { - qfb_old = to_qxl_framebuffer(old_state->fb); - bo_old = gem_to_qxl_bo(qfb_old->obj); + if (bo_old && bo_old->is_primary) { + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; } - bo->is_primary = true; + if (!bo->is_primary) { + qxl_io_create_primary(qdev, 0, bo); + bo->is_primary = true; + } qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, { struct qxl_device *qdev = plane->dev->dev_private; - if (old_state->fb) - { struct qxl_framebuffer *qfb = + if (old_state->fb) { + struct qxl_framebuffer *qfb = to_qxl_framebuffer(old_state->fb); struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); - qxl_io_destroy_primary(qdev); - bo->is_primary = false; + if (bo->is_primary) { + qxl_io_destroy_primary(qdev); + bo->is_primary = false; + } } } @@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *user_bo; - if (!plane->state->fb) { - /* we never executed prepare_fb, so there's nothing to + if (!old_state->fb) { + /* + * we never executed prepare_fb, so there's nothing to * unpin. */ return; } - obj = to_qxl_framebuffer(plane->state->fb)->obj; + obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 997131d58c7f639f8dbb765e51433a35a7da27ba..ffc10cadcf34ccb79a7d09ae253b4a2dffda7cfe 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); pci_save_state(dev->pdev); - if (freeze && rdev->family >= CHIP_CEDAR) { + if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { rdev->asic->asic_reset(rdev, true); pci_restore_state(dev->pdev); } else if (suspend) { diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index dfdd858eda0a0640addf1511948d78c0e04678dd..86eb4c185a28547589fb4b8a53433351a6738536 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1627,7 +1627,7 @@ static int igt_topdown(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1741,7 +1741,7 @@ static int igt_bottomup(void *ignored) goto err; bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), - GFP_TEMPORARY); + GFP_KERNEL); if (!bitmap) goto err_nodes; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 06f05302ee75e33b2a05f9c468b1a3bbd60619be..882d85db90539ae1b00e194a0cae43bf8fb4918d 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC bool "Allwinner A10 HDMI CEC Support" depends on DRM_SUN4I_HDMI select CEC_CORE - depends on CEC_PIN + select CEC_PIN help Choose this option if you have an Allwinner SoC with an HDMI controller and want to use CEC. diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 1457750988da4930792eceb3e543f211d66f84f7..a1f8cba251a245af7227d853da784518d19d405d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -15,7 +15,7 @@ #include #include -#include +#include #define SUN4I_HDMI_CTRL_REG 0x004 #define SUN4I_HDMI_CTRL_ENABLE BIT(31) diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h index e9b7cdad5c4c1beac73bca274321cc57280006bd..5a1ab4046e926fefe4cb6d73d2c3ebe031eb45bb 100644 --- a/drivers/gpu/drm/tegra/trace.h +++ b/drivers/gpu/drm/tegra/trace.h @@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra #define TRACE_INCLUDE_FILE trace #include diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cba11f13d994fbb9b51ee99d8beac08cac26d8fe..180ce629641618adb2ec001ce0c25a563502da0e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,8 +109,8 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj, struct ttm_bo_global *glob = container_of(kobj, struct ttm_bo_global, kobj); - return snprintf(buffer, PAGE_SIZE, "%lu\n", - (unsigned long) atomic_read(&glob->bo_count)); + return snprintf(buffer, PAGE_SIZE, "%d\n", + atomic_read(&glob->bo_count)); } static struct attribute *ttm_bo_global_attrs[] = { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d0459b392e5eb07324b2ad9e9f77f4a7a6f278cb..c934ad5b39036c6de16265e8c2e9cbf61873d6fe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -469,6 +469,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ + atomic_inc(&bo->glob->bo_count); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 579bdf93be433b54a23fa8827badd7ace8dbb762..14a94d90c028a0f77c22e6b0a3f9c3e48da070a4 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pm_dmi_table[] = { +static const struct dmi_system_id pm_dmi_table[] __initconst = { { enable_cap_knobs, "IBM Active Energy Manager", { diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 76c34f4fde132cef26d2a76900401dce82a2e23f..5c677ba44014307c7651928641393da6e90f5eae 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -1247,7 +1247,7 @@ static int applesmc_dmi_match(const struct dmi_system_id *id) * Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". * So we need to put "Apple MacBook Pro" before "Apple MacBook". */ -static __initdata struct dmi_system_id applesmc_whitelist[] = { +static const struct dmi_system_id applesmc_whitelist[] __initconst = { { applesmc_dmi_match, "Apple MacBook Air", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 3189246302a6b33940072df8fb1e7d05a611024d..c7c9e95e58a83298e57b38c80cccb639ab9e17b3 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -890,7 +890,7 @@ static const struct i8k_config_data i8k_config_data[] = { }, }; -static struct dmi_system_id i8k_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_dmi_table[] __initconst = { { .ident = "Dell Inspiron", .matches = { @@ -1013,7 +1013,7 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 */ -static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { +static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst = { { .ident = "Dell Studio XPS 8000", .matches = { diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index edc0cfb6fc1ad550750eaf4f0058ca884828f4fb..c06dce2c1da745fab9727715f5ffaa01a40d4d04 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -336,6 +336,16 @@ config I2C_POWERMAC comment "I2C system bus drivers (mostly embedded / system-on-chip)" +config I2C_ALTERA + tristate "Altera Soft IP I2C" + depends on (ARCH_SOCFPGA || NIOS2) && OF + help + If you say yes to this option, support will be included for the + Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures. + + This driver can also be built as a module. If so, the module + will be called i2c-altera. + config I2C_ASPEED tristate "Aspeed I2C Controller" depends on ARCH_ASPEED || COMPILE_TEST @@ -935,6 +945,16 @@ config I2C_STM32F4 This driver can also be built as module. If so, the module will be called i2c-stm32f4. +config I2C_STM32F7 + tristate "STMicroelectronics STM32F7 I2C support" + depends on ARCH_STM32 || COMPILE_TEST + help + Enable this option to add support for STM32 I2C controller embedded + in STM32F7 SoCs. + + This driver can also be built as module. If so, the module + will be called i2c-stm32f7. + config I2C_STU300 tristate "ST Microelectronics DDC I2C interface" depends on MACH_U300 diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 562daf738048610c57585514f3c2646a5a5f3619..47f3ac9a695ad21066fc5ab47df40a564913e024 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers +obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o @@ -93,6 +94,7 @@ obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o obj-$(CONFIG_I2C_ST) += i2c-st.o obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o +obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c new file mode 100644 index 0000000000000000000000000000000000000000..f5e1941e65b5a2dd6cebfd8a2f60798e258b986b --- /dev/null +++ b/drivers/i2c/busses/i2c-altera.c @@ -0,0 +1,511 @@ +/* + * Copyright Intel Corporation (C) 2017. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Based on the i2c-axxia.c driver. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ALTR_I2C_TFR_CMD 0x00 /* Transfer Command register */ +#define ALTR_I2C_TFR_CMD_STA BIT(9) /* send START before byte */ +#define ALTR_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */ +#define ALTR_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */ +#define ALTR_I2C_RX_DATA 0x04 /* RX data FIFO register */ +#define ALTR_I2C_CTRL 0x08 /* Control register */ +#define ALTR_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */ +#define ALTR_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */ +#define ALTR_I2C_CTRL_BSPEED BIT(1) /* Bus Speed (1=Fast) */ +#define ALTR_I2C_CTRL_EN BIT(0) /* Enable Core (1=Enable) */ +#define ALTR_I2C_ISER 0x0C /* Interrupt Status Enable register */ +#define ALTR_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */ +#define ALTR_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */ +#define ALTR_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */ +#define ALTR_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */ +#define ALTR_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */ +#define ALTR_I2C_ISR 0x10 /* Interrupt Status register */ +#define ALTR_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW IRQ */ +#define ALTR_I2C_ISR_ARB BIT(3) /* ARB LOST IRQ */ +#define ALTR_I2C_ISR_NACK BIT(2) /* NACK DET IRQ */ +#define ALTR_I2C_ISR_RXRDY BIT(1) /* RX Ready IRQ */ +#define ALTR_I2C_ISR_TXRDY BIT(0) /* TX Ready IRQ */ +#define ALTR_I2C_STATUS 0x14 /* Status register */ +#define ALTR_I2C_STAT_CORE BIT(0) /* Core Status (0=idle) */ +#define ALTR_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */ +#define ALTR_I2C_RX_FIFO_LVL 0x1C /* Receive FIFO LVL register */ +#define ALTR_I2C_SCL_LOW 0x20 /* SCL low count register */ +#define ALTR_I2C_SCL_HIGH 0x24 /* SCL high count register */ +#define ALTR_I2C_SDA_HOLD 0x28 /* SDA hold count register */ + +#define ALTR_I2C_ALL_IRQ (ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | \ + ALTR_I2C_ISR_NACK | ALTR_I2C_ISR_RXRDY | \ + ALTR_I2C_ISR_TXRDY) + +#define ALTR_I2C_THRESHOLD 0 /* IRQ Threshold at 1 element */ +#define ALTR_I2C_DFLT_FIFO_SZ 4 +#define ALTR_I2C_TIMEOUT 100000 /* 100ms */ +#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250)) + +/** + * altr_i2c_dev - I2C device context + * @base: pointer to register struct + * @msg: pointer to current message + * @msg_len: number of bytes transferred in msg + * @msg_err: error code for completed message + * @msg_complete: xfer completion object + * @dev: device reference + * @adapter: core i2c abstraction + * @i2c_clk: clock reference for i2c input clock + * @bus_clk_rate: current i2c bus clock rate + * @buf: ptr to msg buffer for easier use. + * @fifo_size: size of the FIFO passed in. + * @isr_mask: cached copy of local ISR enables. + * @isr_status: cached copy of local ISR status. + * @lock: spinlock for IRQ synchronization. + */ +struct altr_i2c_dev { + void __iomem *base; + struct i2c_msg *msg; + size_t msg_len; + int msg_err; + struct completion msg_complete; + struct device *dev; + struct i2c_adapter adapter; + struct clk *i2c_clk; + u32 bus_clk_rate; + u8 *buf; + u32 fifo_size; + u32 isr_mask; + u32 isr_status; + spinlock_t lock; /* IRQ synchronization */ +}; + +static void +altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable) +{ + unsigned long flags; + u32 int_en; + + spin_lock_irqsave(&idev->lock, flags); + + int_en = readl(idev->base + ALTR_I2C_ISER); + if (enable) + idev->isr_mask = int_en | mask; + else + idev->isr_mask = int_en & ~mask; + + writel(idev->isr_mask, idev->base + ALTR_I2C_ISER); + + spin_unlock_irqrestore(&idev->lock, flags); +} + +static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask) +{ + u32 int_en = readl(idev->base + ALTR_I2C_ISR); + + writel(int_en | mask, idev->base + ALTR_I2C_ISR); +} + +static void altr_i2c_core_disable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp & ~ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_core_enable(struct altr_i2c_dev *idev) +{ + u32 tmp = readl(idev->base + ALTR_I2C_CTRL); + + writel(tmp | ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL); +} + +static void altr_i2c_reset(struct altr_i2c_dev *idev) +{ + altr_i2c_core_disable(idev); + altr_i2c_core_enable(idev); +} + +static inline void altr_i2c_stop(struct altr_i2c_dev *idev) +{ + writel(ALTR_I2C_TFR_CMD_STO, idev->base + ALTR_I2C_TFR_CMD); +} + +static void altr_i2c_init(struct altr_i2c_dev *idev) +{ + u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate; + u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000; + u32 tmp = (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_RXT_SHFT) | + (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT); + u32 t_high, t_low; + + if (idev->bus_clk_rate <= 100000) { + tmp &= ~ALTR_I2C_CTRL_BSPEED; + /* Standard mode SCL 50/50 */ + t_high = divisor * 1 / 2; + t_low = divisor * 1 / 2; + } else { + tmp |= ALTR_I2C_CTRL_BSPEED; + /* Fast mode SCL 33/66 */ + t_high = divisor * 1 / 3; + t_low = divisor * 2 / 3; + } + writel(tmp, idev->base + ALTR_I2C_CTRL); + + dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n", + idev->bus_clk_rate, clk_mhz, divisor); + + /* Reset controller */ + altr_i2c_reset(idev); + + /* SCL High Time */ + writel(t_high, idev->base + ALTR_I2C_SCL_HIGH); + /* SCL Low Time */ + writel(t_low, idev->base + ALTR_I2C_SCL_LOW); + /* SDA Hold Time, 300ns */ + writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD); + + /* Mask all master interrupt bits */ + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); +} + +/** + * altr_i2c_transfer - On the last byte to be transmitted, send + * a Stop bit on the last byte. + */ +static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data) +{ + /* On the last byte to be transmitted, send STOP */ + if (idev->msg_len == 1) + data |= ALTR_I2C_TFR_CMD_STO; + if (idev->msg_len > 0) + writel(data, idev->base + ALTR_I2C_TFR_CMD); +} + +/** + * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of + * transfer. Send a Stop bit on the last byte. + */ +static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev) +{ + size_t rx_fifo_avail = readl(idev->base + ALTR_I2C_RX_FIFO_LVL); + int bytes_to_transfer = min(rx_fifo_avail, idev->msg_len); + + while (bytes_to_transfer-- > 0) { + *idev->buf++ = readl(idev->base + ALTR_I2C_RX_DATA); + idev->msg_len--; + altr_i2c_transfer(idev, 0); + } +} + +/** + * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer. + * @return: Number of bytes left to transfer. + */ +static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev) +{ + size_t tx_fifo_avail = idev->fifo_size - readl(idev->base + + ALTR_I2C_TC_FIFO_LVL); + int bytes_to_transfer = min(tx_fifo_avail, idev->msg_len); + int ret = idev->msg_len - bytes_to_transfer; + + while (bytes_to_transfer-- > 0) { + altr_i2c_transfer(idev, *idev->buf++); + idev->msg_len--; + } + + return ret; +} + +static irqreturn_t altr_i2c_isr_quick(int irq, void *_dev) +{ + struct altr_i2c_dev *idev = _dev; + irqreturn_t ret = IRQ_HANDLED; + + /* Read IRQ status but only interested in Enabled IRQs. */ + idev->isr_status = readl(idev->base + ALTR_I2C_ISR) & idev->isr_mask; + if (idev->isr_status) + ret = IRQ_WAKE_THREAD; + + return ret; +} + +static irqreturn_t altr_i2c_isr(int irq, void *_dev) +{ + int ret; + bool read, finish = false; + struct altr_i2c_dev *idev = _dev; + u32 status = idev->isr_status; + + if (!idev->msg) { + dev_warn(idev->dev, "unexpected interrupt\n"); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + return IRQ_HANDLED; + } + read = (idev->msg->flags & I2C_M_RD) != 0; + + /* handle Lost Arbitration */ + if (unlikely(status & ALTR_I2C_ISR_ARB)) { + altr_i2c_int_clear(idev, ALTR_I2C_ISR_ARB); + idev->msg_err = -EAGAIN; + finish = true; + } else if (unlikely(status & ALTR_I2C_ISR_NACK)) { + dev_dbg(idev->dev, "Could not get ACK\n"); + idev->msg_err = -ENXIO; + altr_i2c_int_clear(idev, ALTR_I2C_ISR_NACK); + altr_i2c_stop(idev); + finish = true; + } else if (read && unlikely(status & ALTR_I2C_ISR_RXOF)) { + /* handle RX FIFO Overflow */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + altr_i2c_stop(idev); + dev_err(idev->dev, "RX FIFO Overflow\n"); + finish = true; + } else if (read && (status & ALTR_I2C_ISR_RXRDY)) { + /* RX FIFO needs service? */ + altr_i2c_empty_rx_fifo(idev); + altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY); + if (!idev->msg_len) + finish = true; + } else if (!read && (status & ALTR_I2C_ISR_TXRDY)) { + /* TX FIFO needs service? */ + altr_i2c_int_clear(idev, ALTR_I2C_ISR_TXRDY); + if (idev->msg_len > 0) + altr_i2c_fill_tx_fifo(idev); + else + finish = true; + } else { + dev_warn(idev->dev, "Unexpected interrupt: 0x%x\n", status); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + } + + if (finish) { + /* Wait for the Core to finish */ + ret = readl_poll_timeout_atomic(idev->base + ALTR_I2C_STATUS, + status, + !(status & ALTR_I2C_STAT_CORE), + 1, ALTR_I2C_TIMEOUT); + if (ret) + dev_err(idev->dev, "message timeout\n"); + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false); + altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); + complete(&idev->msg_complete); + dev_dbg(idev->dev, "Message Complete\n"); + } + + return IRQ_HANDLED; +} + +static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) +{ + u32 imask = ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | ALTR_I2C_ISR_NACK; + unsigned long time_left; + u32 value; + u8 addr = i2c_8bit_addr_from_msg(msg); + + idev->msg = msg; + idev->msg_len = msg->len; + idev->buf = msg->buf; + idev->msg_err = 0; + reinit_completion(&idev->msg_complete); + altr_i2c_core_enable(idev); + + /* Make sure RX FIFO is empty */ + do { + readl(idev->base + ALTR_I2C_RX_DATA); + } while (readl(idev->base + ALTR_I2C_RX_FIFO_LVL)); + + writel(ALTR_I2C_TFR_CMD_STA | addr, idev->base + ALTR_I2C_TFR_CMD); + + if ((msg->flags & I2C_M_RD) != 0) { + imask |= ALTR_I2C_ISER_RXOF_EN | ALTR_I2C_ISER_RXRDY_EN; + altr_i2c_int_enable(idev, imask, true); + /* write the first byte to start the RX */ + altr_i2c_transfer(idev, 0); + } else { + imask |= ALTR_I2C_ISR_TXRDY; + altr_i2c_int_enable(idev, imask, true); + altr_i2c_fill_tx_fifo(idev); + } + + time_left = wait_for_completion_timeout(&idev->msg_complete, + ALTR_I2C_XFER_TIMEOUT); + altr_i2c_int_enable(idev, imask, false); + + value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE; + if (value) + dev_err(idev->dev, "Core Status not IDLE...\n"); + + if (time_left == 0) { + idev->msg_err = -ETIMEDOUT; + dev_dbg(idev->dev, "Transaction timed out.\n"); + } + + altr_i2c_core_disable(idev); + + return idev->msg_err; +} + +static int +altr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct altr_i2c_dev *idev = i2c_get_adapdata(adap); + int i, ret; + + for (i = 0; i < num; i++) { + ret = altr_i2c_xfer_msg(idev, msgs++); + if (ret) + return ret; + } + return num; +} + +static u32 altr_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm altr_i2c_algo = { + .master_xfer = altr_i2c_xfer, + .functionality = altr_i2c_func, +}; + +static int altr_i2c_probe(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = NULL; + struct resource *res; + int irq, ret; + u32 val; + + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); + if (!idev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + idev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(idev->base)) + return PTR_ERR(idev->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "missing interrupt resource\n"); + return irq; + } + + idev->i2c_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(idev->i2c_clk)) { + dev_err(&pdev->dev, "missing clock\n"); + return PTR_ERR(idev->i2c_clk); + } + + idev->dev = &pdev->dev; + init_completion(&idev->msg_complete); + spin_lock_init(&idev->lock); + + val = device_property_read_u32(idev->dev, "fifo-size", + &idev->fifo_size); + if (val) { + dev_err(&pdev->dev, "FIFO size set to default of %d\n", + ALTR_I2C_DFLT_FIFO_SZ); + idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; + } + + val = device_property_read_u32(idev->dev, "clock-frequency", + &idev->bus_clk_rate); + if (val) { + dev_err(&pdev->dev, "Default to 100kHz\n"); + idev->bus_clk_rate = 100000; /* default clock rate */ + } + + if (idev->bus_clk_rate > 400000) { + dev_err(&pdev->dev, "invalid clock-frequency %d\n", + idev->bus_clk_rate); + return -EINVAL; + } + + ret = devm_request_threaded_irq(&pdev->dev, irq, altr_i2c_isr_quick, + altr_i2c_isr, IRQF_ONESHOT, + pdev->name, idev); + if (ret) { + dev_err(&pdev->dev, "failed to claim IRQ %d\n", irq); + return ret; + } + + ret = clk_prepare_enable(idev->i2c_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock\n"); + return ret; + } + + altr_i2c_init(idev); + + i2c_set_adapdata(&idev->adapter, idev); + strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name)); + idev->adapter.owner = THIS_MODULE; + idev->adapter.algo = &altr_i2c_algo; + idev->adapter.dev.parent = &pdev->dev; + idev->adapter.dev.of_node = pdev->dev.of_node; + + platform_set_drvdata(pdev, idev); + + ret = i2c_add_adapter(&idev->adapter); + if (ret) { + clk_disable_unprepare(idev->i2c_clk); + return ret; + } + dev_info(&pdev->dev, "Altera SoftIP I2C Probe Complete\n"); + + return 0; +} + +static int altr_i2c_remove(struct platform_device *pdev) +{ + struct altr_i2c_dev *idev = platform_get_drvdata(pdev); + + clk_disable_unprepare(idev->i2c_clk); + i2c_del_adapter(&idev->adapter); + + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id altr_i2c_of_match[] = { + { .compatible = "altr,softip-i2c-v1.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, altr_i2c_of_match); + +static struct platform_driver altr_i2c_driver = { + .probe = altr_i2c_probe, + .remove = altr_i2c_remove, + .driver = { + .name = "altera-i2c", + .of_match_table = altr_i2c_of_match, + }, +}; + +module_platform_driver(altr_i2c_driver); + +MODULE_DESCRIPTION("Altera Soft IP I2C bus driver"); +MODULE_AUTHOR("Thor Thayer "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h new file mode 100644 index 0000000000000000000000000000000000000000..dab51761f8c52b0aab12e4b49334aa1607d0a7a0 --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32.h @@ -0,0 +1,20 @@ +/* + * i2c-stm32.h + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _I2C_STM32_H +#define _I2C_STM32_H + +enum stm32_i2c_speed { + STM32_I2C_SPEED_STANDARD, /* 100 kHz */ + STM32_I2C_SPEED_FAST, /* 400 kHz */ + STM32_I2C_SPEED_FAST_PLUS, /* 1 MHz */ + STM32_I2C_SPEED_END, +}; + +#endif /* _I2C_STM32_H */ diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index aceb6f7885641f254ee09f23af32db627089c0d7..4ec108496f15cdf5dcedd3bad1b368cac5482bd8 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -27,6 +27,8 @@ #include #include +#include "i2c-stm32.h" + /* STM32F4 I2C offset registers */ #define STM32F4_I2C_CR1 0x00 #define STM32F4_I2C_CR2 0x04 @@ -90,12 +92,6 @@ #define STM32F4_I2C_MAX_FREQ 46U #define HZ_TO_MHZ 1000000 -enum stm32f4_i2c_speed { - STM32F4_I2C_SPEED_STANDARD, /* 100 kHz */ - STM32F4_I2C_SPEED_FAST, /* 400 kHz */ - STM32F4_I2C_SPEED_END, -}; - /** * struct stm32f4_i2c_msg - client specific data * @addr: 8-bit slave addr, including r/w bit @@ -159,7 +155,7 @@ static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev) i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk); freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ); - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * To reach 100 kHz, the parent clk frequency should be between * a minimum value of 2 MHz and a maximum value of 46 MHz due @@ -216,7 +212,7 @@ static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev) * is not higher than 46 MHz . As a result trise is at most 4 bits wide * and so fits into the TRISE bits [5:0]. */ - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) trise = freq + 1; else trise = freq * 3 / 10 + 1; @@ -230,7 +226,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev) u32 val; u32 ccr = 0; - if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) { + if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) { /* * In standard mode: * t_scl_high = t_scl_low = CCR * I2C parent clk period @@ -808,10 +804,10 @@ static int stm32f4_i2c_probe(struct platform_device *pdev) udelay(2); reset_control_deassert(rst); - i2c_dev->speed = STM32F4_I2C_SPEED_STANDARD; + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!ret && clk_rate >= 400000) - i2c_dev->speed = STM32F4_I2C_SPEED_FAST; + i2c_dev->speed = STM32_I2C_SPEED_FAST; i2c_dev->dev = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c new file mode 100644 index 0000000000000000000000000000000000000000..47c67b0ca8960a2436ef899f9e1cecd5d35ab19c --- /dev/null +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -0,0 +1,972 @@ +/* + * Driver for STMicroelectronics STM32F7 I2C controller + * + * This I2C controller is described in the STM32F75xxx and STM32F74xxx Soc + * reference manual. + * Please see below a link to the documentation: + * http://www.st.com/resource/en/reference_manual/dm00124865.pdf + * + * Copyright (C) M'boumba Cedric Madianga 2017 + * Author: M'boumba Cedric Madianga + * + * This driver is based on i2c-stm32f4.c + * + * License terms: GNU General Public License (GPL), version 2 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-stm32.h" + +/* STM32F7 I2C registers */ +#define STM32F7_I2C_CR1 0x00 +#define STM32F7_I2C_CR2 0x04 +#define STM32F7_I2C_TIMINGR 0x10 +#define STM32F7_I2C_ISR 0x18 +#define STM32F7_I2C_ICR 0x1C +#define STM32F7_I2C_RXDR 0x24 +#define STM32F7_I2C_TXDR 0x28 + +/* STM32F7 I2C control 1 */ +#define STM32F7_I2C_CR1_ANFOFF BIT(12) +#define STM32F7_I2C_CR1_ERRIE BIT(7) +#define STM32F7_I2C_CR1_TCIE BIT(6) +#define STM32F7_I2C_CR1_STOPIE BIT(5) +#define STM32F7_I2C_CR1_NACKIE BIT(4) +#define STM32F7_I2C_CR1_ADDRIE BIT(3) +#define STM32F7_I2C_CR1_RXIE BIT(2) +#define STM32F7_I2C_CR1_TXIE BIT(1) +#define STM32F7_I2C_CR1_PE BIT(0) +#define STM32F7_I2C_ALL_IRQ_MASK (STM32F7_I2C_CR1_ERRIE \ + | STM32F7_I2C_CR1_TCIE \ + | STM32F7_I2C_CR1_STOPIE \ + | STM32F7_I2C_CR1_NACKIE \ + | STM32F7_I2C_CR1_RXIE \ + | STM32F7_I2C_CR1_TXIE) + +/* STM32F7 I2C control 2 */ +#define STM32F7_I2C_CR2_RELOAD BIT(24) +#define STM32F7_I2C_CR2_NBYTES_MASK GENMASK(23, 16) +#define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16) +#define STM32F7_I2C_CR2_NACK BIT(15) +#define STM32F7_I2C_CR2_STOP BIT(14) +#define STM32F7_I2C_CR2_START BIT(13) +#define STM32F7_I2C_CR2_RD_WRN BIT(10) +#define STM32F7_I2C_CR2_SADD7_MASK GENMASK(7, 1) +#define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1) + +/* STM32F7 I2C Interrupt Status */ +#define STM32F7_I2C_ISR_BUSY BIT(15) +#define STM32F7_I2C_ISR_ARLO BIT(9) +#define STM32F7_I2C_ISR_BERR BIT(8) +#define STM32F7_I2C_ISR_TCR BIT(7) +#define STM32F7_I2C_ISR_TC BIT(6) +#define STM32F7_I2C_ISR_STOPF BIT(5) +#define STM32F7_I2C_ISR_NACKF BIT(4) +#define STM32F7_I2C_ISR_RXNE BIT(2) +#define STM32F7_I2C_ISR_TXIS BIT(1) + +/* STM32F7 I2C Interrupt Clear */ +#define STM32F7_I2C_ICR_ARLOCF BIT(9) +#define STM32F7_I2C_ICR_BERRCF BIT(8) +#define STM32F7_I2C_ICR_STOPCF BIT(5) +#define STM32F7_I2C_ICR_NACKCF BIT(4) + +/* STM32F7 I2C Timing */ +#define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28) +#define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20) +#define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16) +#define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8) +#define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff) + +#define STM32F7_I2C_MAX_LEN 0xff + +#define STM32F7_I2C_DNF_DEFAULT 0 +#define STM32F7_I2C_DNF_MAX 16 + +#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ +#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */ + +#define STM32F7_I2C_RISE_TIME_DEFAULT 25 /* ns */ +#define STM32F7_I2C_FALL_TIME_DEFAULT 10 /* ns */ + +#define STM32F7_PRESC_MAX BIT(4) +#define STM32F7_SCLDEL_MAX BIT(4) +#define STM32F7_SDADEL_MAX BIT(4) +#define STM32F7_SCLH_MAX BIT(8) +#define STM32F7_SCLL_MAX BIT(8) + +/** + * struct stm32f7_i2c_spec - private i2c specification timing + * @rate: I2C bus speed (Hz) + * @rate_min: 80% of I2C bus speed (Hz) + * @rate_max: 100% of I2C bus speed (Hz) + * @fall_max: Max fall time of both SDA and SCL signals (ns) + * @rise_max: Max rise time of both SDA and SCL signals (ns) + * @hddat_min: Min data hold time (ns) + * @vddat_max: Max data valid time (ns) + * @sudat_min: Min data setup time (ns) + * @l_min: Min low period of the SCL clock (ns) + * @h_min: Min high period of the SCL clock (ns) + */ +struct stm32f7_i2c_spec { + u32 rate; + u32 rate_min; + u32 rate_max; + u32 fall_max; + u32 rise_max; + u32 hddat_min; + u32 vddat_max; + u32 sudat_min; + u32 l_min; + u32 h_min; +}; + +/** + * struct stm32f7_i2c_setup - private I2C timing setup parameters + * @speed: I2C speed mode (standard, Fast Plus) + * @speed_freq: I2C speed frequency (Hz) + * @clock_src: I2C clock source frequency (Hz) + * @rise_time: Rise time (ns) + * @fall_time: Fall time (ns) + * @dnf: Digital filter coefficient (0-16) + * @analog_filter: Analog filter delay (On/Off) + */ +struct stm32f7_i2c_setup { + enum stm32_i2c_speed speed; + u32 speed_freq; + u32 clock_src; + u32 rise_time; + u32 fall_time; + u8 dnf; + bool analog_filter; +}; + +/** + * struct stm32f7_i2c_timings - private I2C output parameters + * @prec: Prescaler value + * @scldel: Data setup time + * @sdadel: Data hold time + * @sclh: SCL high period (master mode) + * @sclh: SCL low period (master mode) + */ +struct stm32f7_i2c_timings { + struct list_head node; + u8 presc; + u8 scldel; + u8 sdadel; + u8 sclh; + u8 scll; +}; + +/** + * struct stm32f7_i2c_msg - client specific data + * @addr: 8-bit slave addr, including r/w bit + * @count: number of bytes to be transferred + * @buf: data buffer + * @result: result of the transfer + * @stop: last I2C msg to be sent, i.e. STOP to be generated + */ +struct stm32f7_i2c_msg { + u8 addr; + u32 count; + u8 *buf; + int result; + bool stop; +}; + +/** + * struct stm32f7_i2c_dev - private data of the controller + * @adap: I2C adapter for this controller + * @dev: device for this controller + * @base: virtual memory area + * @complete: completion of I2C message + * @clk: hw i2c clock + * @speed: I2C clock frequency of the controller. Standard, Fast or Fast+ + * @msg: Pointer to data to be written + * @msg_num: number of I2C messages to be executed + * @msg_id: message identifiant + * @f7_msg: customized i2c msg for driver usage + * @setup: I2C timing input setup + * @timing: I2C computed timings + */ +struct stm32f7_i2c_dev { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + struct completion complete; + struct clk *clk; + int speed; + struct i2c_msg *msg; + unsigned int msg_num; + unsigned int msg_id; + struct stm32f7_i2c_msg f7_msg; + struct stm32f7_i2c_setup *setup; + struct stm32f7_i2c_timings timing; +}; + +/** + * All these values are coming from I2C Specification, Version 6.0, 4th of + * April 2014. + * + * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast, + * and Fast-mode Plus I2C-bus devices + */ +static struct stm32f7_i2c_spec i2c_specs[] = { + [STM32_I2C_SPEED_STANDARD] = { + .rate = 100000, + .rate_min = 80000, + .rate_max = 100000, + .fall_max = 300, + .rise_max = 1000, + .hddat_min = 0, + .vddat_max = 3450, + .sudat_min = 250, + .l_min = 4700, + .h_min = 4000, + }, + [STM32_I2C_SPEED_FAST] = { + .rate = 400000, + .rate_min = 320000, + .rate_max = 400000, + .fall_max = 300, + .rise_max = 300, + .hddat_min = 0, + .vddat_max = 900, + .sudat_min = 100, + .l_min = 1300, + .h_min = 600, + }, + [STM32_I2C_SPEED_FAST_PLUS] = { + .rate = 1000000, + .rate_min = 800000, + .rate_max = 1000000, + .fall_max = 100, + .rise_max = 120, + .hddat_min = 0, + .vddat_max = 450, + .sudat_min = 50, + .l_min = 500, + .h_min = 260, + }, +}; + +struct stm32f7_i2c_setup stm32f7_setup = { + .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, + .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, + .dnf = STM32F7_I2C_DNF_DEFAULT, + .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE, +}; + +static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) | mask, reg); +} + +static inline void stm32f7_i2c_clr_bits(void __iomem *reg, u32 mask) +{ + writel_relaxed(readl_relaxed(reg) & ~mask, reg); +} + +static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup, + struct stm32f7_i2c_timings *output) +{ + u32 p_prev = STM32F7_PRESC_MAX; + u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->clock_src); + u32 i2cbus = DIV_ROUND_CLOSEST(NSEC_PER_SEC, + setup->speed_freq); + u32 clk_error_prev = i2cbus; + u32 tsync; + u32 af_delay_min, af_delay_max; + u32 dnf_delay; + u32 clk_min, clk_max; + int sdadel_min, sdadel_max; + int scldel_min; + struct stm32f7_i2c_timings *v, *_v, *s; + struct list_head solutions; + u16 p, l, a, h; + int ret = 0; + + if (setup->speed >= STM32_I2C_SPEED_END) { + dev_err(i2c_dev->dev, "speed out of bound {%d/%d}\n", + setup->speed, STM32_I2C_SPEED_END - 1); + return -EINVAL; + } + + if ((setup->rise_time > i2c_specs[setup->speed].rise_max) || + (setup->fall_time > i2c_specs[setup->speed].fall_max)) { + dev_err(i2c_dev->dev, + "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n", + setup->rise_time, i2c_specs[setup->speed].rise_max, + setup->fall_time, i2c_specs[setup->speed].fall_max); + return -EINVAL; + } + + if (setup->dnf > STM32F7_I2C_DNF_MAX) { + dev_err(i2c_dev->dev, + "DNF out of bound %d/%d\n", + setup->dnf, STM32F7_I2C_DNF_MAX); + return -EINVAL; + } + + if (setup->speed_freq > i2c_specs[setup->speed].rate) { + dev_err(i2c_dev->dev, "ERROR: Freq {%d/%d}\n", + setup->speed_freq, i2c_specs[setup->speed].rate); + return -EINVAL; + } + + /* Analog and Digital Filters */ + af_delay_min = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0); + af_delay_max = + (setup->analog_filter ? + STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); + dnf_delay = setup->dnf * i2cclk; + + sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - + af_delay_min - (setup->dnf + 3) * i2cclk; + + sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - + af_delay_max - (setup->dnf + 4) * i2cclk; + + scldel_min = setup->rise_time + i2c_specs[setup->speed].sudat_min; + + if (sdadel_min < 0) + sdadel_min = 0; + if (sdadel_max < 0) + sdadel_max = 0; + + dev_dbg(i2c_dev->dev, "SDADEL(min/max): %i/%i, SCLDEL(Min): %i\n", + sdadel_min, sdadel_max, scldel_min); + + INIT_LIST_HEAD(&solutions); + /* Compute possible values for PRESC, SCLDEL and SDADEL */ + for (p = 0; p < STM32F7_PRESC_MAX; p++) { + for (l = 0; l < STM32F7_SCLDEL_MAX; l++) { + u32 scldel = (l + 1) * (p + 1) * i2cclk; + + if (scldel < scldel_min) + continue; + + for (a = 0; a < STM32F7_SDADEL_MAX; a++) { + u32 sdadel = (a * (p + 1) + 1) * i2cclk; + + if (((sdadel >= sdadel_min) && + (sdadel <= sdadel_max)) && + (p != p_prev)) { + v = kmalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto exit; + } + + v->presc = p; + v->scldel = l; + v->sdadel = a; + p_prev = p; + + list_add_tail(&v->node, + &solutions); + } + } + } + } + + if (list_empty(&solutions)) { + dev_err(i2c_dev->dev, "no Prescaler solution\n"); + ret = -EPERM; + goto exit; + } + + tsync = af_delay_min + dnf_delay + (2 * i2cclk); + s = NULL; + clk_max = NSEC_PER_SEC / i2c_specs[setup->speed].rate_min; + clk_min = NSEC_PER_SEC / i2c_specs[setup->speed].rate_max; + + /* + * Among Prescaler possibilities discovered above figures out SCL Low + * and High Period. Provided: + * - SCL Low Period has to be higher than SCL Clock Low Period + * defined by I2C Specification. I2C Clock has to be lower than + * (SCL Low Period - Analog/Digital filters) / 4. + * - SCL High Period has to be lower than SCL Clock High Period + * defined by I2C Specification + * - I2C Clock has to be lower than SCL High Period + */ + list_for_each_entry(v, &solutions, node) { + u32 prescaler = (v->presc + 1) * i2cclk; + + for (l = 0; l < STM32F7_SCLL_MAX; l++) { + u32 tscl_l = (l + 1) * prescaler + tsync; + + if ((tscl_l < i2c_specs[setup->speed].l_min) || + (i2cclk >= + ((tscl_l - af_delay_min - dnf_delay) / 4))) { + continue; + } + + for (h = 0; h < STM32F7_SCLH_MAX; h++) { + u32 tscl_h = (h + 1) * prescaler + tsync; + u32 tscl = tscl_l + tscl_h + + setup->rise_time + setup->fall_time; + + if ((tscl >= clk_min) && (tscl <= clk_max) && + (tscl_h >= i2c_specs[setup->speed].h_min) && + (i2cclk < tscl_h)) { + int clk_error = tscl - i2cbus; + + if (clk_error < 0) + clk_error = -clk_error; + + if (clk_error < clk_error_prev) { + clk_error_prev = clk_error; + v->scll = l; + v->sclh = h; + s = v; + } + } + } + } + } + + if (!s) { + dev_err(i2c_dev->dev, "no solution at all\n"); + ret = -EPERM; + goto exit; + } + + output->presc = s->presc; + output->scldel = s->scldel; + output->sdadel = s->sdadel; + output->scll = s->scll; + output->sclh = s->sclh; + + dev_dbg(i2c_dev->dev, + "Presc: %i, scldel: %i, sdadel: %i, scll: %i, sclh: %i\n", + output->presc, + output->scldel, output->sdadel, + output->scll, output->sclh); + +exit: + /* Release list and memory */ + list_for_each_entry_safe(v, _v, &solutions, node) { + list_del(&v->node); + kfree(v); + } + + return ret; +} + +static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev, + struct stm32f7_i2c_setup *setup) +{ + int ret = 0; + + setup->speed = i2c_dev->speed; + setup->speed_freq = i2c_specs[setup->speed].rate; + setup->clock_src = clk_get_rate(i2c_dev->clk); + + if (!setup->clock_src) { + dev_err(i2c_dev->dev, "clock rate is 0\n"); + return -EINVAL; + } + + do { + ret = stm32f7_i2c_compute_timing(i2c_dev, setup, + &i2c_dev->timing); + if (ret) { + dev_err(i2c_dev->dev, + "failed to compute I2C timings.\n"); + if (i2c_dev->speed > STM32_I2C_SPEED_STANDARD) { + i2c_dev->speed--; + setup->speed = i2c_dev->speed; + setup->speed_freq = + i2c_specs[setup->speed].rate; + dev_warn(i2c_dev->dev, + "downgrade I2C Speed Freq to (%i)\n", + i2c_specs[setup->speed].rate); + } else { + break; + } + } + } while (ret); + + if (ret) { + dev_err(i2c_dev->dev, "Impossible to compute I2C timings.\n"); + return ret; + } + + dev_dbg(i2c_dev->dev, "I2C Speed(%i), Freq(%i), Clk Source(%i)\n", + setup->speed, setup->speed_freq, setup->clock_src); + dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n", + setup->rise_time, setup->fall_time); + dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n", + (setup->analog_filter ? "On" : "Off"), setup->dnf); + + return 0; +} + +static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_timings *t = &i2c_dev->timing; + u32 timing = 0; + + /* Timing settings */ + timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc); + timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel); + timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel); + timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh); + timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll); + writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); + + /* Enable I2C */ + if (i2c_dev->setup->analog_filter) + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + else + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_ANFOFF); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_PE); +} + +static void stm32f7_i2c_write_tx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + writeb_relaxed(*f7_msg->buf++, base + STM32F7_I2C_TXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_read_rx_data(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + + if (f7_msg->count) { + *f7_msg->buf++ = readb_relaxed(base + STM32F7_I2C_RXDR); + f7_msg->count--; + } +} + +static void stm32f7_i2c_reload(struct stm32f7_i2c_dev *i2c_dev) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + u32 cr2; + + cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2); + + cr2 &= ~STM32F7_I2C_CR2_NBYTES_MASK; + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + } else { + cr2 &= ~STM32F7_I2C_CR2_RELOAD; + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2); +} + +static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev) +{ + u32 status; + int ret; + + ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F7_I2C_ISR, + status, + !(status & STM32F7_I2C_ISR_BUSY), + 10, 1000); + if (ret) { + dev_dbg(i2c_dev->dev, "bus busy\n"); + ret = -EBUSY; + } + + return ret; +} + +static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, + struct i2c_msg *msg) +{ + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 cr1, cr2; + + f7_msg->addr = msg->addr; + f7_msg->buf = msg->buf; + f7_msg->count = msg->len; + f7_msg->result = 0; + f7_msg->stop = (i2c_dev->msg_id >= i2c_dev->msg_num - 1); + + reinit_completion(&i2c_dev->complete); + + cr1 = readl_relaxed(base + STM32F7_I2C_CR1); + cr2 = readl_relaxed(base + STM32F7_I2C_CR2); + + /* Set transfer direction */ + cr2 &= ~STM32F7_I2C_CR2_RD_WRN; + if (msg->flags & I2C_M_RD) + cr2 |= STM32F7_I2C_CR2_RD_WRN; + + /* Set slave address */ + cr2 &= ~STM32F7_I2C_CR2_SADD7_MASK; + cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr); + + /* Set nb bytes to transfer and reload if needed */ + cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD); + if (f7_msg->count > STM32F7_I2C_MAX_LEN) { + cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN); + cr2 |= STM32F7_I2C_CR2_RELOAD; + } else { + cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count); + } + + /* Enable NACK, STOP, error and transfer complete interrupts */ + cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE | + STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE; + + /* Clear TX/RX interrupt */ + cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE); + + /* Enable RX/TX interrupt according to msg direction */ + if (msg->flags & I2C_M_RD) + cr1 |= STM32F7_I2C_CR1_RXIE; + else + cr1 |= STM32F7_I2C_CR1_TXIE; + + /* Configure Start/Repeated Start */ + cr2 |= STM32F7_I2C_CR2_START; + + /* Write configurations registers */ + writel_relaxed(cr1, base + STM32F7_I2C_CR1); + writel_relaxed(cr2, base + STM32F7_I2C_CR2); +} + +static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask) +{ + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask); +} + +static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + u32 status, mask; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Tx empty */ + if (status & STM32F7_I2C_ISR_TXIS) + stm32f7_i2c_write_tx_data(i2c_dev); + + /* RX not empty */ + if (status & STM32F7_I2C_ISR_RXNE) + stm32f7_i2c_read_rx_data(i2c_dev); + + /* NACK received */ + if (status & STM32F7_I2C_ISR_NACKF) { + dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); + f7_msg->result = -ENXIO; + } + + /* STOP detection flag */ + if (status & STM32F7_I2C_ISR_STOPF) { + /* Disable interrupts */ + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + /* Clear STOP flag */ + writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); + + complete(&i2c_dev->complete); + } + + /* Transfer complete */ + if (status & STM32F7_I2C_ISR_TC) { + if (f7_msg->stop) { + mask = STM32F7_I2C_CR2_STOP; + stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); + } else { + i2c_dev->msg_id++; + i2c_dev->msg++; + stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg); + } + } + + /* + * Transfer Complete Reload: 255 data bytes have been transferred + * We have to prepare the I2C controller to transfer the remaining + * data. + */ + if (status & STM32F7_I2C_ISR_TCR) + stm32f7_i2c_reload(i2c_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) +{ + struct stm32f7_i2c_dev *i2c_dev = data; + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + void __iomem *base = i2c_dev->base; + struct device *dev = i2c_dev->dev; + u32 status; + + status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); + + /* Bus error */ + if (status & STM32F7_I2C_ISR_BERR) { + dev_err(dev, "<%s>: Bus error\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EIO; + } + + /* Arbitration loss */ + if (status & STM32F7_I2C_ISR_ARLO) { + dev_dbg(dev, "<%s>: Arbitration loss\n", __func__); + writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR); + f7_msg->result = -EAGAIN; + } + + stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK); + + complete(&i2c_dev->complete); + + return IRQ_HANDLED; +} + +static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); + struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + unsigned long time_left; + int ret; + + i2c_dev->msg = msgs; + i2c_dev->msg_num = num; + i2c_dev->msg_id = 0; + + ret = clk_enable(i2c_dev->clk); + if (ret) { + dev_err(i2c_dev->dev, "Failed to enable clock\n"); + return ret; + } + + ret = stm32f7_i2c_wait_free_bus(i2c_dev); + if (ret) + goto clk_free; + + stm32f7_i2c_xfer_msg(i2c_dev, msgs); + + time_left = wait_for_completion_timeout(&i2c_dev->complete, + i2c_dev->adap.timeout); + ret = f7_msg->result; + + if (!time_left) { + dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", + i2c_dev->msg->addr); + ret = -ETIMEDOUT; + } + +clk_free: + clk_disable(i2c_dev->clk); + + return (ret < 0) ? ret : num; +} + +static u32 stm32f7_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static struct i2c_algorithm stm32f7_i2c_algo = { + .master_xfer = stm32f7_i2c_xfer, + .functionality = stm32f7_i2c_func, +}; + +static int stm32f7_i2c_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct stm32f7_i2c_dev *i2c_dev; + const struct stm32f7_i2c_setup *setup; + struct resource *res; + u32 irq_error, irq_event, clk_rate, rise_time, fall_time; + struct i2c_adapter *adap; + struct reset_control *rst; + int ret; + + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); + if (!i2c_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c_dev->base)) + return PTR_ERR(i2c_dev->base); + + irq_event = irq_of_parse_and_map(np, 0); + if (!irq_event) { + dev_err(&pdev->dev, "IRQ event missing or invalid\n"); + return -EINVAL; + } + + irq_error = irq_of_parse_and_map(np, 1); + if (!irq_error) { + dev_err(&pdev->dev, "IRQ error missing or invalid\n"); + return -EINVAL; + } + + i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_dev->clk)) { + dev_err(&pdev->dev, "Error: Missing controller clock\n"); + return PTR_ERR(i2c_dev->clk); + } + ret = clk_prepare_enable(i2c_dev->clk); + if (ret) { + dev_err(&pdev->dev, "Failed to prepare_enable clock\n"); + return ret; + } + + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &clk_rate); + if (!ret && clk_rate >= 1000000) + i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS; + else if (!ret && clk_rate >= 400000) + i2c_dev->speed = STM32_I2C_SPEED_FAST; + else if (!ret && clk_rate >= 100000) + i2c_dev->speed = STM32_I2C_SPEED_STANDARD; + + rst = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(rst)) { + dev_err(&pdev->dev, "Error: Missing controller reset\n"); + ret = PTR_ERR(rst); + goto clk_free; + } + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + + i2c_dev->dev = &pdev->dev; + + ret = devm_request_irq(&pdev->dev, irq_event, stm32f7_i2c_isr_event, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq event %i\n", + irq_event); + goto clk_free; + } + + ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0, + pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq error %i\n", + irq_error); + goto clk_free; + } + + setup = of_device_get_match_data(&pdev->dev); + i2c_dev->setup->rise_time = setup->rise_time; + i2c_dev->setup->fall_time = setup->fall_time; + i2c_dev->setup->dnf = setup->dnf; + i2c_dev->setup->analog_filter = setup->analog_filter; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", + &rise_time); + if (!ret) + i2c_dev->setup->rise_time = rise_time; + + ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", + &fall_time); + if (!ret) + i2c_dev->setup->fall_time = fall_time; + + ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); + if (ret) + goto clk_free; + + stm32f7_i2c_hw_config(i2c_dev); + + adap = &i2c_dev->adap; + i2c_set_adapdata(adap, i2c_dev); + snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)", + &res->start); + adap->owner = THIS_MODULE; + adap->timeout = 2 * HZ; + adap->retries = 3; + adap->algo = &stm32f7_i2c_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + init_completion(&i2c_dev->complete); + + ret = i2c_add_adapter(adap); + if (ret) + goto clk_free; + + platform_set_drvdata(pdev, i2c_dev); + + clk_disable(i2c_dev->clk); + + dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr); + + return 0; + +clk_free: + clk_disable_unprepare(i2c_dev->clk); + + return ret; +} + +static int stm32f7_i2c_remove(struct platform_device *pdev) +{ + struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c_dev->adap); + + clk_unprepare(i2c_dev->clk); + + return 0; +} + +static const struct of_device_id stm32f7_i2c_match[] = { + { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup}, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32f7_i2c_match); + +static struct platform_driver stm32f7_i2c_driver = { + .driver = { + .name = "stm32f7-i2c", + .of_match_table = stm32f7_i2c_match, + }, + .probe = stm32f7_i2c_probe, + .remove = stm32f7_i2c_remove, +}; + +module_platform_driver(stm32f7_i2c_driver); + +MODULE_AUTHOR("M'boumba Cedric Madianga "); +MODULE_DESCRIPTION("STMicroelectronics STM32F7 I2C driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 70ad19c4c73e77da961ffccd9958887dcead8027..88bdafb297f5fe9f722c7a8edee27554ea3b6862 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) atomic_set(&qp->qp_sec->error_list_count, 0); init_completion(&qp->qp_sec->error_complete); ret = security_ib_alloc_security(&qp->qp_sec->security); - if (ret) + if (ret) { kfree(qp->qp_sec); + qp->qp_sec = NULL; + } return ret; } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4ab30d832ac5b8a5ac4994b9c4f0b3f3802e019c..52a2cf2d83aaf483944ad9720e55121f2adb6484 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, resp.raw_packet_caps = attr.raw_packet_caps; resp.response_length += sizeof(resp.raw_packet_caps); - if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) + if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) goto end; - resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; - resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; - resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; - resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; - resp.xrq_caps.flags = attr.xrq_caps.flags; - resp.response_length += sizeof(resp.xrq_caps); + resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; + resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; + resp.tm_caps.max_ops = attr.tm_caps.max_ops; + resp.tm_caps.max_sge = attr.tm_caps.max_sge; + resp.tm_caps.flags = attr.tm_caps.flags; + resp.response_length += sizeof(resp.tm_caps); end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); return err; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ee9e27dc799b50ead4663cd5fd13c94a3ac020d4..de57d6c11a25428d96e8bcb4868fc19794cc374d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) */ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (attr.qp_state >= IB_QPS_INIT) { - if (qp->device->get_link_layer(qp->device, attr.port_num) != + if (rdma_port_get_link_layer(qp->device, attr.port_num) != IB_LINK_LAYER_INFINIBAND) return true; goto lid_check; @@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) /* Can't get a quick answer, iterate over all ports */ for (port = 0; port < qp->device->phys_port_cnt; port++) - if (qp->device->get_link_layer(qp->device, port) != + if (rdma_port_get_link_layer(qp->device, port) != IB_LINK_LAYER_INFINIBAND) num_eth_ports++; diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index b3ad37fec578be497cc55e3dc48552e62d71c595..ecbac91b2e1441acf4529d6d40fa4256892349c0 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -93,11 +93,13 @@ struct bnxt_re_dev { struct ib_device ibdev; struct list_head list; unsigned long flags; -#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 -#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 -#define BNXT_RE_FLAG_GOT_MSIX 2 -#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 -#define BNXT_RE_FLAG_QOS_WORK_REG 16 +#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 +#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 +#define BNXT_RE_FLAG_GOT_MSIX 2 +#define BNXT_RE_FLAG_HAVE_L2_REF 3 +#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 +#define BNXT_RE_FLAG_QOS_WORK_REG 5 +#define BNXT_RE_FLAG_TASK_IN_PROG 6 struct net_device *netdev; unsigned int version, major, minor; struct bnxt_en_dev *en_dev; @@ -108,6 +110,8 @@ struct bnxt_re_dev { struct delayed_work worker; u8 cur_prio_map; + u8 active_speed; + u8 active_width; /* FP Notification Queue (CQ & SRQ) */ struct tasklet_struct nq_task; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 01eee15bbd6598fa6ae103aa1b86a81023e67f1d..0d89621d9fe8ed81b0d28c031d48ffcacb31fdfd 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, port_attr->sm_sl = 0; port_attr->subnet_timeout = 0; port_attr->init_type_reply = 0; - /* call the underlying netdev's ethtool hooks to query speed settings - * for which we acquire rtnl_lock _only_ if it's registered with - * IB stack to avoid race in the NETDEV_UNREG path - */ - if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) - if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed, - &port_attr->active_width)) - return -EINVAL; + port_attr->active_speed = rdev->active_speed; + port_attr->active_width = rdev->active_width; + return 0; } @@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_qplib_gid *gid_to_del; /* Delete the entry from the hardware */ ctx = *context; @@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, if (sgid_tbl && sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; + gid_to_del = &sgid_tbl->tbl[ctx->idx]; + /* DEL_GID is called in WQ context(netdevice_event_work_handler) + * or via the ib_unregister_device path. In the former case QP1 + * may not be destroyed yet, in which case just return as FW + * needs that entry to be present and will fail it's deletion. + * We could get invoked again after QP1 is destroyed OR get an + * ADD_GID call with a different GID value for the same index + * where we issue MODIFY_GID cmd to update the GID entry -- TBD + */ + if (ctx->idx == 0 && + rdma_link_local_addr((struct in6_addr *)gid_to_del) && + ctx->refcnt == 1 && rdev->qp1_sqp) { + dev_dbg(rdev_to_dev(rdev), + "Trying to delete GID0 while QP1 is alive\n"); + return -EFAULT; + } ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid(sgid_tbl, - &sgid_tbl->tbl[ctx->idx], - true); + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); @@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) kfree(rdev->sqp_ah); kfree(rdev->qp1_sqp); + rdev->qp1_sqp = NULL; + rdev->sqp_ah = NULL; } if (!IS_ERR_OR_NULL(qp->rumem)) @@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); + qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); } else if (qp_attr->qp_state == IB_QPS_RTR) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; qp->qplib_qp.path_mtu = __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); + qp->qplib_qp.mtu = + ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); } if (qp_attr_mask & IB_QP_TIMEOUT) { @@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_qplib_qp qplib_qp; + struct bnxt_qplib_qp *qplib_qp; int rc; - memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); - qplib_qp.id = qp->qplib_qp.id; - qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; + qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); + if (!qplib_qp) + return -ENOMEM; + + qplib_qp->id = qp->qplib_qp.id; + qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; - rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); + rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); - return rc; + goto out; } - qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); - qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; - qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); - qp_attr->pkey_index = qplib_qp.pkey_index; - qp_attr->qkey = qplib_qp.qkey; + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); + qp_attr->pkey_index = qplib_qp->pkey_index; + qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, - qplib_qp.ah.host_sgid_index, - qplib_qp.ah.hop_limit, - qplib_qp.ah.traffic_class); - rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); - rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); - ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); - qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); - qp_attr->timeout = qplib_qp.timeout; - qp_attr->retry_cnt = qplib_qp.retry_cnt; - qp_attr->rnr_retry = qplib_qp.rnr_retry; - qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; - qp_attr->rq_psn = qplib_qp.rq.psn; - qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; - qp_attr->sq_psn = qplib_qp.sq.psn; - qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; - qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : - IB_SIGNAL_REQ_WR; - qp_attr->dest_qp_num = qplib_qp.dest_qpn; + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, + qplib_qp->ah.host_sgid_index, + qplib_qp->ah.hop_limit, + qplib_qp->ah.traffic_class); + rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); + rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); + ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); + qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); + qp_attr->timeout = qplib_qp->timeout; + qp_attr->retry_cnt = qplib_qp->retry_cnt; + qp_attr->rnr_retry = qplib_qp->rnr_retry; + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; + qp_attr->rq_psn = qplib_qp->rq.psn; + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; + qp_attr->sq_psn = qplib_qp->sq.psn; + qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; + qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : + IB_SIGNAL_REQ_WR; + qp_attr->dest_qp_num = qplib_qp->dest_qpn; qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; @@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; qp_init_attr->cap = qp_attr->cap; - return 0; +out: + kfree(qplib_qp); + return rc; } /* Routine for sending QP1 packets for RoCE V1 an V2 @@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; + wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; wqe->atomic.swap_data = atomic_wr(wr)->swap; break; case IB_WR_ATOMIC_FETCH_AND_ADD: @@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) return rc; } - if (mr->npages && mr->pages) { + if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); kfree(mr->pages); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 82d1cbc27aeec80af4ac927227af347eba6f9714..e7450ea92aa9e11ba0d28792f54fcf373e470a66 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) } } set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); @@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work) else if (netif_carrier_ok(rdev->netdev)) bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); + ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, + &rdev->active_width); break; default: break; } + smp_mb__before_atomic(); + clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); kfree(re_work); } @@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, break; case NETDEV_UNREGISTER: + /* netdev notifier will call NETDEV_UNREGISTER again later since + * we are still holding the reference to the netdev + */ + if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) + goto exit; bnxt_re_ib_unreg(rdev, false); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); @@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, re_work->vlan_dev = (real_dev == netdev ? NULL : netdev); INIT_WORK(&re_work->work, bnxt_re_task); + set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); queue_work(bnxt_re_wq, &re_work->work); } } @@ -1375,6 +1387,22 @@ static int __init bnxt_re_mod_init(void) static void __exit bnxt_re_mod_exit(void) { + struct bnxt_re_dev *rdev; + LIST_HEAD(to_be_deleted); + + mutex_lock(&bnxt_re_dev_lock); + /* Free all adapter allocated resources */ + if (!list_empty(&bnxt_re_dev_list)) + list_splice_init(&bnxt_re_dev_list, &to_be_deleted); + mutex_unlock(&bnxt_re_dev_lock); + + list_for_each_entry(rdev, &to_be_deleted, list) { + dev_info(rdev_to_dev(rdev), "Unregistering Device"); + bnxt_re_dev_stop(rdev); + bnxt_re_ib_unreg(rdev, true); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } unregister_netdevice_notifier(&bnxt_re_netdev_notifier); if (bnxt_re_wq) destroy_workqueue(bnxt_re_wq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 391bb7006e8ffb5fe42175e662c3b80c87d74f35..2bdb1562bd2197e850f14bcc353d6ee12c3271c4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, return -EINVAL; } + if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) + return -ETIMEDOUT; + /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ @@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, /* timed out */ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 0ed312f17c8de8025feea00fdd47feb063d65aa5..85b16da287f99edfee396fcdaa11ccef1c7c8d3b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw { unsigned long *cmdq_bitmap; u32 bmap_size; unsigned long flags; -#define FIRMWARE_INITIALIZED_FLAG 1 +#define FIRMWARE_INITIALIZED_FLAG BIT(0) #define FIRMWARE_FIRST_FLAG BIT(31) +#define FIRMWARE_TIMED_OUT BIT(3) wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ceaa2fa54d322d2c9368576602a8d47e448527d0..daf7a56e5d7ebe4a7b6dffd93245d5a1655b3805 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); + if (!ep) { + pr_debug("%s stid %d lookup failure!\n", __func__, stid); + goto out; + } pr_debug("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_put_ep(&ep->com); +out: return 0; } @@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_put_ep(&child_ep->com); reject: reject_cr(dev, hwtid, skb); +out: if (parent_ep) c4iw_put_ep(&parent_ep->com); -out: return 0; } @@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) cm_id->provider_data = ep; goto out; } - + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b2ed4b9cda6eef62f81b5ac22b71b4cc3a5a2d10..0be42787759fa78c73d0e0d9776209c2362a3ddc 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); static int thermal_init(struct hfi1_devdata *dd); static void update_statusp(struct hfi1_pportdata *ppd, u32 state); +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs); static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); @@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data) u64 regs[CCE_NUM_INT_CSRS]; u32 bit; int i; + irqreturn_t handled = IRQ_NONE; this_cpu_inc(*dd->int_counter); @@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data) for_each_set_bit(bit, (unsigned long *)®s[0], CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); + handled = IRQ_HANDLED; } - return IRQ_HANDLED; + return handled; } static irqreturn_t sdma_interrupt(int irq, void *data) @@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); } -void reset_qsfp(struct hfi1_pportdata *ppd) +int reset_qsfp(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask, qsfp_mask; @@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd) * for alarms and warnings */ set_qsfp_int_n(ppd, 1); + + /* + * After the reset, AOC transmitters are enabled by default. They need + * to be turned off to complete the QSFP setup before they can be + * enabled again. + */ + return set_qsfp_tx(ppd, 0); } static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, @@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) { struct hfi1_devdata *dd = ppd->dd; u32 previous_state; + int offline_state_ret; int ret; update_lcb_cache(dd); @@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); - /* - * Wait for offline transition. It can take a while for - * the link to go down. - */ - ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000); - if (ret < 0) - return ret; - - /* - * Now in charge of LCB - must be after the physical state is - * offline.quiet and before host_link_state is changed. - */ - set_host_lcb_access(dd); - write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ - - /* make sure the logical state is also down */ - ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); - if (ret) - force_logical_link_state_down(ppd); - - ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); + if (offline_state_ret < 0) + return offline_state_ret; + /* Disabling AOC transmitters */ if (ppd->port_type == PORT_TYPE_QSFP && ppd->qsfp_info.limiting_active && qsfp_mod_present(ppd)) { @@ -10364,6 +10359,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) } } + /* + * Wait for the offline.Quiet transition if it hasn't happened yet. It + * can take a while for the link to go down. + */ + if (offline_state_ret != PLS_OFFLINE_QUIET) { + ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); + if (ret < 0) + return ret; + } + + /* + * Now in charge of LCB - must be after the physical state is + * offline.quiet and before host_link_state is changed. + */ + set_host_lcb_access(dd); + write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ + + /* make sure the logical state is also down */ + ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); + if (ret) + force_logical_link_state_down(ppd); + + ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + /* * The LNI has a mandatory wait time after the physical state * moves to Offline.Quiet. The wait time may be different @@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { /* went down while attempting link up */ check_lni_states(ppd); + + /* The QSFP doesn't need to be reset on LNI failure */ + ppd->qsfp_info.reset_needed = 0; } /* the active link width (downgrade) is 0 on link down */ @@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, return 0; } +/* + * wait_phys_link_offline_quiet_substates - wait for any offline substate + * @ppd: port device + * @msecs: the number of milliseconds to wait + * + * Wait up to msecs milliseconds for any offline physical link + * state change to occur. + * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. + */ +static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, + int msecs) +{ + u32 read_state; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(msecs); + while (1) { + read_state = read_physical_state(ppd->dd); + if ((read_state & 0xF0) == PLS_OFFLINE) + break; + if (time_after(jiffies, timeout)) { + dd_dev_err(ppd->dd, + "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", + read_state, msecs); + return -ETIMEDOUT; + } + usleep_range(1950, 2050); /* sleep 2ms-ish */ + } + + log_state_transition(ppd, read_state); + return read_state; +} + #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b8345a60a0fbc452215dba3c679daa6785fb4e4a..50b8645d0b876dbf6d58f56d677e3cfcc7984e55 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -204,6 +204,7 @@ #define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 #define PLS_OFFLINE_REPORT_FAILURE 0x93 #define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 +#define PLS_OFFLINE_QUIET_DURATION 0x95 #define PLS_POLLING 0x20 #define PLS_POLLING_QUIET 0x20 #define PLS_POLLING_ACTIVE 0x21 @@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work); void handle_link_bounce(struct work_struct *work); void handle_start_link(struct work_struct *work); void handle_sma_message(struct work_struct *work); -void reset_qsfp(struct hfi1_pportdata *ppd); +int reset_qsfp(struct hfi1_pportdata *ppd); void qsfp_event(struct work_struct *work); void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); int send_idle_sma(struct hfi1_devdata *dd, u64 message); diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index d46b171079010d2cf7e42dd7557f0abc2bef1348..1613af1c58d9ddaafca831b1282a6462fdc10b66 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -204,7 +204,10 @@ int eprom_init(struct hfi1_devdata *dd) return ret; } -/* magic character sequence that trails an image */ +/* magic character sequence that begins an image */ +#define IMAGE_START_MAGIC "APO=" + +/* magic character sequence that might trail an image */ #define IMAGE_TRAIL_MAGIC "egamiAPO" /* EPROM file types */ @@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, { void *buffer; void *p; + u32 length; int ret; buffer = kmalloc(P1_SIZE, GFP_KERNEL); @@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, return ret; } - /* scan for image magic that may trail the actual data */ - p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); - if (!p) { + /* config partition is valid only if it starts with IMAGE_START_MAGIC */ + if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) { kfree(buffer); return -ENOENT; } + /* scan for image magic that may trail the actual data */ + p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); + if (p) + length = p - buffer; + else + length = P1_SIZE; + *data = buffer; - *size = p - buffer; + *size = length; return 0; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 2bc89260235a1db1c744931cde7ec07fd3202495..d9a1e989313641b06f32ffdd4677ce8aa7e32802 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) switch (ret) { case 0: ret = setup_base_ctxt(fd, uctxt); - if (uctxt->subctxt_cnt) { - /* - * Base context is done (successfully or not), notify - * anybody using a sub-context that is waiting for - * this completion. - */ - clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); - wake_up(&uctxt->wait); - } + if (ret) + deallocate_ctxt(uctxt); break; case 1: ret = complete_subctxt(fd); @@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, /* Now allocate the RcvHdr queue and eager buffers. */ ret = hfi1_create_rcvhdrq(dd, uctxt); if (ret) - return ret; + goto done; ret = hfi1_setup_eagerbufs(uctxt); if (ret) - goto setup_failed; + goto done; /* If sub-contexts are enabled, do the appropriate setup */ if (uctxt->subctxt_cnt) ret = setup_subctxt(uctxt); if (ret) - goto setup_failed; + goto done; ret = hfi1_alloc_ctxt_rcv_groups(uctxt); if (ret) - goto setup_failed; + goto done; ret = init_user_ctxt(fd, uctxt); if (ret) - goto setup_failed; + goto done; user_init(uctxt); @@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, fd->uctxt = uctxt; hfi1_rcd_get(uctxt); - return 0; +done: + if (uctxt->subctxt_cnt) { + /* + * On error, set the failed bit so sub-contexts will clean up + * correctly. + */ + if (ret) + set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); -setup_failed: - /* Set the failed bit so sub-context init can do the right thing */ - set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); - deallocate_ctxt(uctxt); + /* + * Base context is done (successfully or not), notify anybody + * using a sub-context that is waiting for this completion. + */ + clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); + wake_up(&uctxt->wait); + } return ret; } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 82447b7cdda1e958d7e7c42c3447ec4a72c1fb4a..09e50fd2a08f07bf7b2d42d3d4b4a1a00644b2ce 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -68,7 +68,7 @@ /* * Code to adjust PCIe capabilities. */ -static int tune_pcie_caps(struct hfi1_devdata *); +static void tune_pcie_caps(struct hfi1_devdata *); /* * Do all the common PCIe setup and initialization. @@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd) */ int request_msix(struct hfi1_devdata *dd, u32 msireq) { - int nvec, ret; + int nvec; nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, PCI_IRQ_MSIX | PCI_IRQ_LEGACY); @@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq) return nvec; } - ret = tune_pcie_caps(dd); - if (ret) { - dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret); - pci_free_irq_vectors(dd->pcidev); - return ret; - } + tune_pcie_caps(dd); /* check for legacy IRQ */ if (nvec == 1 && !dd->pcidev->msix_enabled) @@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED; module_param_named(aspm, aspm_mode, uint, S_IRUGO); MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); -static int tune_pcie_caps(struct hfi1_devdata *dd) +static void tune_pcie_caps(struct hfi1_devdata *dd) { struct pci_dev *parent; u16 rc_mpss, rc_mps, ep_mpss, ep_mps; @@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * Turn on extended tags in DevCtl in case the BIOS has turned it off * to improve WFR SDMA bandwidth */ - ret = pcie_capability_read_word(dd->pcidev, - PCI_EXP_DEVCTL, &ectl); - if (ret) { - dd_dev_err(dd, "Unable to read from PCI config\n"); - return ret; - } - - if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { + ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); + if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { dd_dev_info(dd, "Enabling PCIe extended tags\n"); ectl |= PCI_EXP_DEVCTL_EXT_TAG; ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); - if (ret) { - dd_dev_err(dd, "Unable to write to PCI config\n"); - return ret; - } + if (ret) + dd_dev_info(dd, "Unable to write to PCI config\n"); } /* Find out supported and configured values for parent (root) */ parent = dd->pcidev->bus->self; @@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) * The driver cannot perform the tuning if it does not have * access to the upstream component. */ - if (!parent) - return -EINVAL; + if (!parent) { + dd_dev_info(dd, "Parent not found\n"); + return; + } if (!pci_is_root_bus(parent->bus)) { dd_dev_info(dd, "Parent not root\n"); - return -EINVAL; + return; + } + if (!pci_is_pcie(parent)) { + dd_dev_info(dd, "Parent is not PCI Express capable\n"); + return; + } + if (!pci_is_pcie(dd->pcidev)) { + dd_dev_info(dd, "PCI device is not PCI Express capable\n"); + return; } - - if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) - return -EINVAL; rc_mpss = parent->pcie_mpss; rc_mps = ffs(pcie_get_mps(parent)) - 8; /* Find out supported and configured values for endpoint (us) */ @@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) ep_mrrs = max_mrrs; pcie_set_readrq(dd->pcidev, ep_mrrs); } - - return 0; } /* End of PCIe capability tuning */ diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index a8af96d2b1b0ae0dc5e98ae08e3b5c9b5faa50e9..d486355880cb0da37e23755e8ed49197fed90fc8 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, * reuse of stale settings established in our previous pass through. */ if (ppd->qsfp_info.reset_needed) { - reset_qsfp(ppd); + ret = reset_qsfp(ppd); + if (ret) + return ret; refresh_qsfp_cache(ppd, &ppd->qsfp_info); } else { ppd->qsfp_info.reset_needed = 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 9b1566468744ed81a922188f220e7ef64629273b..a65e4cbdce2f6ad336d58a226e84ac35fc7f7eda 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -201,7 +201,6 @@ enum init_completion_state { CEQ_CREATED, ILQ_CREATED, IEQ_CREATED, - INET_NOTIFIER, IP_ADDR_REGISTERED, RDMA_DEV_REGISTERED }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 14f36ba4e5bebf8e35970c9cc5b6030c95ffa213..5230dd3c938c2c506bc590afd8ef054d300ac480 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, } /** - * listen_port_in_use - determine if port is in use - * @port: Listen port number + * i40iw_port_in_use - determine if port is in use + * @port: port number + * @active_side: flag for listener side vs active side */ -static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) +static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side) { struct i40iw_cm_listener *listen_node; + struct i40iw_cm_node *cm_node; unsigned long flags; bool ret = false; - spin_lock_irqsave(&cm_core->listen_list_lock, flags); - list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { - if (listen_node->loc_port == port) { - ret = true; - break; + if (active_side) { + /* search connected node list */ + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_entry(cm_node, &cm_core->connected_nodes, list) { + if (cm_node->loc_port == port) { + ret = true; + break; + } + } + if (!ret) + clear_bit(port, cm_core->active_side_ports); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } else { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (listen_node->loc_port == port) { + ret = true; + break; + } } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } - spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return ret; } @@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); if (listener->iwdev) { - if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) + if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false)) i40iw_manage_apbvt(listener->iwdev, listener->loc_port, I40IW_MANAGE_APBVT_DEL); @@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) if (cm_node->listener) { i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); } else { - if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && - cm_node->apbvt_set) { + if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) { i40iw_manage_apbvt(cm_node->iwdev, cm_node->loc_port, I40IW_MANAGE_APBVT_DEL); - i40iw_get_addr_info(cm_node, &nfo); - if (cm_node->qhash_set) { - i40iw_manage_qhash(cm_node->iwdev, - &nfo, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - cm_node->qhash_set = 0; - } + cm_node->apbvt_set = 0; + } + i40iw_get_addr_info(cm_node, &nfo); + if (cm_node->qhash_set) { + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; } } @@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); if (cm_node->vlan_id < VLAN_TAG_PRESENT) { tcp_info->insert_vlan_tag = true; - tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); + tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | + cm_node->vlan_id); } if (cm_node->ipv4) { tcp_info->src_port = cpu_to_le16(cm_node->loc_port); @@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; - bool qhash_set = false; - int apbvt_set = 0; - int err = 0; - enum i40iw_status_code status; + int ret = 0; + unsigned long flags; ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_info.user_pri = rt_tos2priority(cm_id->tos); i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", __func__, cm_id->tos, cm_info.user_pri); - if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || - (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, - raddr6->sin6_addr.in6_u.u6_addr32, - sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { - status = i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_ADD, - NULL, - true); - if (status) - return -EINVAL; - qhash_set = true; - } - status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); - if (status) { - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - return -EINVAL; - } - - apbvt_set = 1; cm_id->add_ref(cm_id); cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, conn_param->private_data_len, @@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) &cm_info); if (IS_ERR(cm_node)) { - err = PTR_ERR(cm_node); - goto err_out; + ret = PTR_ERR(cm_node); + cm_id->rem_ref(cm_id); + return ret; + } + + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, + raddr6->sin6_addr.in6_u.u6_addr32, + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { + if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { + ret = -EINVAL; + goto err; + } + cm_node->qhash_set = true; } + spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); + if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) { + ret = -EINVAL; + goto err; + } + } else { + spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); + } + + cm_node->apbvt_set = true; i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && !cm_node->ord_size) cm_node->ord_size = 1; - cm_node->apbvt_set = apbvt_set; - cm_node->qhash_set = qhash_set; iwqp->cm_node = cm_node; cm_node->iwqp = iwqp; iwqp->cm_id = cm_id; @@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { cm_node->state = I40IW_CM_STATE_SYN_SENT; - err = i40iw_send_syn(cm_node, 0); - if (err) { - i40iw_rem_ref_cm_node(cm_node); - goto err_out; - } + ret = i40iw_send_syn(cm_node, 0); + if (ret) + goto err; } i40iw_debug(cm_node->dev, @@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_node->rem_port, cm_node, cm_node->cm_id); + return 0; -err_out: +err: if (cm_info.ipv4) i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, @@ -3867,22 +3879,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) "Api - connect() FAILED: dest addr=%pI6", cm_info.rem_addr); - if (qhash_set) - i40iw_manage_qhash(iwdev, - &cm_info, - I40IW_QHASH_TYPE_TCP_ESTABLISHED, - I40IW_QHASH_MANAGE_TYPE_DELETE, - NULL, - false); - - if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, - cm_info.loc_port)) - i40iw_manage_apbvt(iwdev, - cm_info.loc_port, - I40IW_MANAGE_APBVT_DEL); + i40iw_rem_ref_cm_node(cm_node); cm_id->rem_ref(cm_id); iwdev->cm_core.stats_connect_errs++; - return err; + return ret; } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h index 2e52e38ffcf37cf5673868f873536907dddc2cf1..45abef76295b24429accdce4e7a49d91dbc33cce 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.h +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -71,6 +71,9 @@ #define I40IW_HW_IRD_SETTING_32 32 #define I40IW_HW_IRD_SETTING_64 64 +#define MAX_PORTS 65536 +#define I40IW_VLAN_PRIO_SHIFT 13 + enum ietf_mpa_flags { IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ @@ -411,6 +414,8 @@ struct i40iw_cm_core { spinlock_t ht_lock; /* manage hash table */ spinlock_t listen_list_lock; /* listen list */ + unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)]; + u64 stats_nodes_created; u64 stats_nodes_destroyed; u64 stats_listen_created; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index cc742c3132c6f49afa38282a9f694e2fa68385f7..27590ae21881e91508bc0eb88308e97a7693f094 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = { .notifier_call = i40iw_net_event }; -static atomic_t i40iw_notifiers_registered; - /** * i40iw_find_i40e_handler - find a handler given a client info * @ldev: pointer to a client info @@ -1376,11 +1374,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, */ static void i40iw_register_notifiers(void) { - if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { - register_inetaddr_notifier(&i40iw_inetaddr_notifier); - register_inet6addr_notifier(&i40iw_inetaddr6_notifier); - register_netevent_notifier(&i40iw_net_notifier); - } + register_inetaddr_notifier(&i40iw_inetaddr_notifier); + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); + register_netevent_notifier(&i40iw_net_notifier); +} + +/** + * i40iw_unregister_notifiers - unregister tcp ip notifiers + */ + +static void i40iw_unregister_notifiers(void) +{ + unregister_netevent_notifier(&i40iw_net_notifier); + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); } /** @@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, u32 i; u32 size; + if (!ldev->msix_count) { + i40iw_pr_err("No MSI-X vectors\n"); + return I40IW_ERR_CONFIG; + } + iwdev->msix_count = ldev->msix_count; size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; @@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); /* fallthrough */ - case INET_NOTIFIER: - if (!atomic_dec_return(&i40iw_notifiers_registered)) { - unregister_netevent_notifier(&i40iw_net_notifier); - unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); - unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); - } /* fallthrough */ case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); @@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, status = i40iw_save_msix_info(iwdev, ldev); if (status) - goto exit; + return status; iwdev->hw.dev_context = (void *)ldev->pcidev; iwdev->hw.hw_addr = ldev->hw_addr; status = i40iw_allocate_dma_mem(&iwdev->hw, @@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) break; iwdev->init_state = PBLE_CHUNK_MEM; iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); - i40iw_register_notifiers(); - iwdev->init_state = INET_NOTIFIER; status = i40iw_add_mac_ip(iwdev); if (status) break; @@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void) i40iw_client.type = I40E_CLIENT_IWARP; spin_lock_init(&i40iw_handler_lock); ret = i40e_register_client(&i40iw_client); + i40iw_register_notifiers(); + return ret; } @@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void) */ static void __exit i40iw_exit_module(void) { + i40iw_unregister_notifiers(); i40e_unregister_client(&i40iw_client); } diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 62f1f45b8737a4716d04d8a340ab5612ef4f1047..e52dbbb4165ec555d94eb5f55f5310c71562b387 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, return NOTIFY_DONE; iwdev = &hdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; netdev = iwdev->ldev->netdev; @@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * if (!iwhdl) return NOTIFY_DONE; iwdev = &iwhdl->device; - if (iwdev->init_state < INET_NOTIFIER) + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) return NOTIFY_DONE; p = (__be32 *)neigh->primary_key; i40iw_copy_ip_ntohl(local_ipaddr, p); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 1aa411034a272457ba9a0fda16761d808b38f5f8..28b3d02d511b35f60c44437c47ed88bb4a04d424 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1027,7 +1027,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; iwqp->last_aeq = I40IW_AE_RESET_SENT; spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); } + } else { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + } + spin_unlock_irqrestore(&iwqp->lock, flags); } } return 0; diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 0ba5ba7540c87e32bbcf795a7b581f1b4b7a49aa..e219093d2764536a58c7c362a08167361ed0e93a 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[10]; + char buff[11]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ab3c562d5ba7f7fbfa95cb730576a64a2d038e49..d6fbad8f34aa71848c7291122ad847b6944a27a6 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (MLX5_CAP_GEN(mdev, tag_matching)) { - props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; - props->xrq_caps.max_num_tags = + props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; + props->tm_caps.max_num_tags = (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; - props->xrq_caps.flags = IB_TM_CAP_RC; - props->xrq_caps.max_ops = + props->tm_caps.flags = IB_TM_CAP_RC; + props->tm_caps.max_ops = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); - props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; + props->tm_caps.max_sge = MLX5_TM_MAX_SGE; } if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { @@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg) return -ENOMEM; + dev->delay_drop.dbg = dbg; + dbg->dir_debugfs = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); if (!dbg->dir_debugfs) - return -ENOMEM; + goto out_debugfs; dbg->events_cnt_debugfs = debugfs_create_atomic_t("num_timeout_events", 0400, @@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg->timeout_debugfs) goto out_debugfs; - dev->delay_drop.dbg = dbg; - return 0; out_debugfs: diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 914f212e7ef60652a655682a2d668c9cec19b64a..f3dbd75a0a968eade1c316e0fec9179e0012730a 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, { unsigned long tmp; unsigned long m; - int i, k; - u64 base = 0; - int p = 0; - int skip; - int mask; - u64 len; - u64 pfn; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; struct scatterlist *sg; int entry; unsigned long page_shift = umem->page_shift; @@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, m = find_first_bit(&tmp, BITS_PER_LONG); if (max_page_shift) m = min_t(unsigned long, max_page_shift - page_shift, m); - skip = 1 << m; - mask = skip - 1; - i = 0; + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { len = sg_dma_len(sg) >> page_shift; pfn = sg_dma_address(sg) >> page_shift; - for (k = 0; k < len; k++) { - if (!(i & mask)) { - tmp = (unsigned long)pfn; - m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } else { - if (base + p != pfn) { - tmp = (unsigned long)p; - m = find_first_bit(&tmp, BITS_PER_LONG); - skip = 1 << m; - mask = skip - 1; - base = pfn; - p = 0; - } - } - p++; - i++; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; } + + p += len; + i += len; } if (i) { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 0e2789d9bb4d0575561aab569e227125cbc38882..37bbc543847a528f73f613df066fdbb4e36484c1 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -47,7 +47,8 @@ enum { #define MLX5_UMR_ALIGN 2048 -static int clean_mr(struct mlx5_ib_mr *mr); +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); @@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, update_xlt_flags); + if (err) { - mlx5_ib_dereg_mr(&mr->ibmr); + dereg_mr(dev, mr); return ERR_PTR(err); } } @@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); - clean_mr(mr); + clean_mr(dev, mr); return err; } } @@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) } } -static int clean_mr(struct mlx5_ib_mr *mr) +static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); int allocated_from_cache = mr->allocated_from_cache; int err; @@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr) return 0; } -int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(ibmr->device); - struct mlx5_ib_mr *mr = to_mmr(ibmr); int npages = mr->npages; struct ib_umem *umem = mr->umem; @@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) } #endif - clean_mr(mr); + clean_mr(dev, mr); if (umem) { ib_umem_release(umem); @@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) return 0; } +int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + struct mlx5_ib_mr *mr = to_mmr(ibmr); + + return dereg_mr(dev, mr); +} + struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f0dc5f4aa177e26d622574ec638368fcb7d988cf..442b9bdc0f03bc70d955700e51703b14676b1661 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->ibmr.iova); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, - mr->ibmr.length); + lower_32_bits(mr->ibmr.length)); set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); set_wqe_32bit_value(wqe->wqe_words, @@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, mr->npages * 8); nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " - "length: %d, rkey: %0x, pgl_paddr: %llx, " + "length: %lld, rkey: %0x, pgl_paddr: %llx, " "page_list_len: %u, wqe_misc: %x\n", (unsigned long long) mr->ibmr.iova, mr->ibmr.length, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index dcb5942f9fb5a830f37e1757dd64cc0d836523b9..65b166cc743748fde5f89153dd96ed1950abb762 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status) case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: err_num = -EAGAIN; break; + default: + err_num = -EFAULT; } + break; default: err_num = -EFAULT; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 663a0c301c4382d942fc594d1529b53a0936811b..984aa3484928d691db083c2c694aa90632d803d7 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib( return (enum ib_wc_status)status; } -static inline int pvrdma_wc_opcode_to_ib(int opcode) -{ - return opcode; +static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) +{ + switch (opcode) { + case PVRDMA_WC_SEND: + return IB_WC_SEND; + case PVRDMA_WC_RDMA_WRITE: + return IB_WC_RDMA_WRITE; + case PVRDMA_WC_RDMA_READ: + return IB_WC_RDMA_READ; + case PVRDMA_WC_COMP_SWAP: + return IB_WC_COMP_SWAP; + case PVRDMA_WC_FETCH_ADD: + return IB_WC_FETCH_ADD; + case PVRDMA_WC_LOCAL_INV: + return IB_WC_LOCAL_INV; + case PVRDMA_WC_FAST_REG_MR: + return IB_WC_REG_MR; + case PVRDMA_WC_MASKED_COMP_SWAP: + return IB_WC_MASKED_COMP_SWAP; + case PVRDMA_WC_MASKED_FETCH_ADD: + return IB_WC_MASKED_FETCH_ADD; + case PVRDMA_WC_RECV: + return IB_WC_RECV; + case PVRDMA_WC_RECV_RDMA_WITH_IMM: + return IB_WC_RECV_RDMA_WITH_IMM; + default: + return IB_WC_SEND; + } } static inline int pvrdma_wc_flags_to_ib(int flags) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 14b62f7472b4f6632f344d7d4ac2eb81518cf3c8..7774654c2ccbce600f99e751c7b37e15e7f1dd8a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_neigh *neigh; - if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) - ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, + * so don't make waves. + */ + if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || + wc->status == IB_WC_RETRY_EXC_ERR) + ipoib_dbg(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); else - ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); + ipoib_warn(priv, + "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", + __func__, wc->status, wr_id, wc->vendor_err); spin_lock_irqsave(&priv->lock, flags); neigh = tx->neigh; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2e075377242e2baccc54cda5859d5b3ba7e768d0..6cd61638b44142029b85d0f057b8d04c6b2138c6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; - - /* - * Update the broadcast address in the priv->broadcast object, - * in case it already exists, otherwise no one will do that. - */ - if (priv->broadcast) { - spin_lock_irq(&priv->lock); - memcpy(priv->broadcast->mcmember.mgid.raw, - priv->dev->broadcast + 4, - sizeof(union ib_gid)); - spin_unlock_irq(&priv->lock); - } - return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bac95b509a9b2edc23af7b5e8505fc03acbb80c0..dcc77014018db037cd24540dfe8ddf06d9b70fef 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format, { struct ipoib_dev_priv *priv; struct ib_port_attr attr; + struct rdma_netdev *rn; int result = -ENOMEM; priv = ipoib_intf_alloc(hca, port, format); @@ -2279,7 +2280,8 @@ static struct net_device *ipoib_add_port(const char *format, ipoib_dev_cleanup(priv->dev); device_init_failed: - free_netdev(priv->dev); + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); alloc_mem_failed: @@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { - struct rdma_netdev *rn = netdev_priv(priv->dev); + struct rdma_netdev *parent_rn = netdev_priv(priv->dev); ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); @@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) unregister_netdev(priv->dev); mutex_unlock(&priv->sysfs_mutex); - rn->free_rdma_netdev(priv->dev); + parent_rn->free_rdma_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { + struct rdma_netdev *child_rn; - list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + child_rn = netdev_priv(cpriv->dev); + child_rn->free_rdma_netdev(cpriv->dev); kfree(cpriv); + } kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 9927cd6b7082b1dc24cbfa38431e6156646bf15f..55a9b71ed05a7ff8ff0ac9f7eb84af1d4f6476a0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); - if (!priv) { + if (!down_write_trylock(&ppriv->vlan_rwsem)) { rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - return -ENOMEM; + return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); + if (!priv) { + result = -ENOMEM; + goto out; + } /* * First ensure this isn't a duplicate. We check the parent device and @@ -175,8 +178,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) rtnl_unlock(); mutex_unlock(&ppriv->sysfs_mutex); - if (result) { - free_netdev(priv->dev); + if (result && priv) { + struct rdma_netdev *rn; + + rn = netdev_priv(priv->dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); } @@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) return restart_syscall(); } - down_write(&ppriv->vlan_rwsem); + if (!down_write_trylock(&ppriv->vlan_rwsem)) { + rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); + return restart_syscall(); + } + list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) mutex_unlock(&ppriv->sysfs_mutex); if (dev) { - free_netdev(dev); + struct rdma_netdev *rn; + + rn = netdev_priv(dev); + rn->free_rdma_netdev(priv->dev); kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9c3e9ab53a415710b0e8bef65647efc4716dc239..322209d5ff5829b6dfd5f4de54f4d11fe141c63d 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) { int i; - iser_err("page vec npages %d data length %d\n", + iser_err("page vec npages %d data length %lld\n", page_vec->npages, page_vec->fake_mr.length); for (i = 0; i < page_vec->npages; i++) iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 8f2042432c85151b25fab1e885fe321e1563d641..66a46c84e28f543bc27f91479d5da669984e0d89 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) EXPORT_SYMBOL_GPL(input_ff_erase); /* - * flush_effects - erase all effects owned by a file handle + * input_ff_flush - erase all effects owned by a file handle + * @dev: input device to erase effect from + * @file: purported owner of the effects + * + * This function erases all force-feedback effects associated with + * the given owner from specified device. Note that @file may be %NULL, + * in which case all effects will be erased. */ -static int flush_effects(struct input_dev *dev, struct file *file) +int input_ff_flush(struct input_dev *dev, struct file *file) { struct ff_device *ff = dev->ff; int i; @@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file) return 0; } +EXPORT_SYMBOL_GPL(input_ff_flush); /** * input_ff_event() - generic handler for force-feedback events @@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) mutex_init(&ff->mutex); dev->ff = ff; - dev->flush = flush_effects; + dev->flush = input_ff_flush; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c index d09cefa379316a302df754394e342cc6fe4b088c..15a71acb6997265fe7ad261f289b544bc473e03e 100644 --- a/drivers/input/joystick/adi.c +++ b/drivers/input/joystick/adi.c @@ -313,7 +313,7 @@ static void adi_close(struct input_dev *dev) static void adi_init_digital(struct gameport *gameport) { - int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; + static const int seq[] = { 4, -2, -3, 10, -6, -11, -7, -9, 11, 0 }; int i; for (i = 0; seq[i]; i++) { diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index f8e34ef643c73fe6dc1aa8caf3be57a619e31713..d86e59515b9c9eadeab8346e8f97780e3edf1690 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1764,10 +1764,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_endpoint_descriptor *ep = &intf->cur_altsetting->endpoint[i].desc; - if (usb_endpoint_dir_in(ep)) - ep_irq_in = ep; - else - ep_irq_out = ep; + if (usb_endpoint_xfer_int(ep)) { + if (usb_endpoint_dir_in(ep)) + ep_irq_in = ep; + else + ep_irq_out = ep; + } } if (!ep_irq_in || !ep_irq_out) { diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index f47e836eaa0f6de06071a3299be8f91b61687db5..9f082a388388b8aa542905d5c58b126c70b99cfb 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -581,6 +581,18 @@ config INPUT_PWM_BEEPER To compile this driver as a module, choose M here: the module will be called pwm-beeper. +config INPUT_PWM_VIBRA + tristate "PWM vibrator support" + depends on PWM + select INPUT_FF_MEMLESS + help + Say Y here to get support for PWM based vibrator devices. + + If unsure, say N. + + To compile this driver as a module, choose M here: the module will be + called pwm-vibra. + config INPUT_RK805_PWRKEY tristate "Rockchip RK805 PMIC power key support" depends on MFD_RK808 diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 1072e0760c1989f28c0236faaf9e408912e80d88..03fd4262ada9b9cfe64a676f664f0bd99f5ac5c0 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o obj-$(CONFIG_INPUT_POWERMATE) += powermate.o obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o +obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c new file mode 100644 index 0000000000000000000000000000000000000000..55da191ae550743edf6505a01cc3af53404910a9 --- /dev/null +++ b/drivers/input/misc/pwm-vibra.c @@ -0,0 +1,267 @@ +/* + * PWM vibrator driver + * + * Copyright (C) 2017 Collabora Ltd. + * + * Based on previous work from: + * Copyright (C) 2012 Dmitry Torokhov + * + * Based on PWM beeper driver: + * Copyright (C) 2010, Lars-Peter Clausen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pwm_vibrator { + struct input_dev *input; + struct pwm_device *pwm; + struct pwm_device *pwm_dir; + struct regulator *vcc; + + struct work_struct play_work; + u16 level; + u32 direction_duty_cycle; +}; + +static int pwm_vibrator_start(struct pwm_vibrator *vibrator) +{ + struct device *pdev = vibrator->input->dev.parent; + struct pwm_state state; + int err; + + err = regulator_enable(vibrator->vcc); + if (err) { + dev_err(pdev, "failed to enable regulator: %d", err); + return err; + } + + pwm_get_state(vibrator->pwm, &state); + pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff); + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(pdev, "failed to apply pwm state: %d", err); + return err; + } + + if (vibrator->pwm_dir) { + pwm_get_state(vibrator->pwm_dir, &state); + state.duty_cycle = vibrator->direction_duty_cycle; + state.enabled = true; + + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(pdev, "failed to apply dir-pwm state: %d", err); + pwm_disable(vibrator->pwm); + return err; + } + } + + return 0; +} + +static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) +{ + regulator_disable(vibrator->vcc); + + if (vibrator->pwm_dir) + pwm_disable(vibrator->pwm_dir); + pwm_disable(vibrator->pwm); +} + +static void pwm_vibrator_play_work(struct work_struct *work) +{ + struct pwm_vibrator *vibrator = container_of(work, + struct pwm_vibrator, play_work); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + else + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_play_effect(struct input_dev *dev, void *data, + struct ff_effect *effect) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(dev); + + vibrator->level = effect->u.rumble.strong_magnitude; + if (!vibrator->level) + vibrator->level = effect->u.rumble.weak_magnitude; + + schedule_work(&vibrator->play_work); + + return 0; +} + +static void pwm_vibrator_close(struct input_dev *input) +{ + struct pwm_vibrator *vibrator = input_get_drvdata(input); + + cancel_work_sync(&vibrator->play_work); + pwm_vibrator_stop(vibrator); +} + +static int pwm_vibrator_probe(struct platform_device *pdev) +{ + struct pwm_vibrator *vibrator; + struct pwm_state state; + int err; + + vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL); + if (!vibrator) + return -ENOMEM; + + vibrator->input = devm_input_allocate_device(&pdev->dev); + if (!vibrator->input) + return -ENOMEM; + + vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc"); + err = PTR_ERR_OR_ZERO(vibrator->vcc); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request regulator: %d", + err); + return err; + } + + vibrator->pwm = devm_pwm_get(&pdev->dev, "enable"); + err = PTR_ERR_OR_ZERO(vibrator->pwm); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to request main pwm: %d", + err); + return err; + } + + INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work); + + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->pwm_dir = devm_pwm_get(&pdev->dev, "direction"); + err = PTR_ERR_OR_ZERO(vibrator->pwm_dir); + switch (err) { + case 0: + /* Sync up PWM state and ensure it is off. */ + pwm_init_state(vibrator->pwm_dir, &state); + state.enabled = false; + err = pwm_apply_state(vibrator->pwm_dir, &state); + if (err) { + dev_err(&pdev->dev, "failed to apply initial PWM state: %d", + err); + return err; + } + + vibrator->direction_duty_cycle = + pwm_get_period(vibrator->pwm_dir) / 2; + device_property_read_u32(&pdev->dev, "direction-duty-cycle-ns", + &vibrator->direction_duty_cycle); + break; + + case -ENODATA: + /* Direction PWM is optional */ + vibrator->pwm_dir = NULL; + break; + + default: + dev_err(&pdev->dev, "Failed to request direction pwm: %d", err); + /* Fall through */ + + case -EPROBE_DEFER: + return err; + } + + vibrator->input->name = "pwm-vibrator"; + vibrator->input->id.bustype = BUS_HOST; + vibrator->input->dev.parent = &pdev->dev; + vibrator->input->close = pwm_vibrator_close; + + input_set_drvdata(vibrator->input, vibrator); + input_set_capability(vibrator->input, EV_FF, FF_RUMBLE); + + err = input_ff_create_memless(vibrator->input, NULL, + pwm_vibrator_play_effect); + if (err) { + dev_err(&pdev->dev, "Couldn't create FF dev: %d", err); + return err; + } + + err = input_register_device(vibrator->input); + if (err) { + dev_err(&pdev->dev, "Couldn't register input dev: %d", err); + return err; + } + + platform_set_drvdata(pdev, vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_suspend(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + cancel_work_sync(&vibrator->play_work); + if (vibrator->level) + pwm_vibrator_stop(vibrator); + + return 0; +} + +static int __maybe_unused pwm_vibrator_resume(struct device *dev) +{ + struct pwm_vibrator *vibrator = dev_get_drvdata(dev); + + if (vibrator->level) + pwm_vibrator_start(vibrator); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(pwm_vibrator_pm_ops, + pwm_vibrator_suspend, pwm_vibrator_resume); + +#ifdef CONFIG_OF +static const struct of_device_id pwm_vibra_dt_match_table[] = { + { .compatible = "pwm-vibrator" }, + {}, +}; +MODULE_DEVICE_TABLE(of, pwm_vibra_dt_match_table); +#endif + +static struct platform_driver pwm_vibrator_driver = { + .probe = pwm_vibrator_probe, + .driver = { + .name = "pwm-vibrator", + .pm = &pwm_vibrator_pm_ops, + .of_match_table = of_match_ptr(pwm_vibra_dt_match_table), + }, +}; +module_platform_driver(pwm_vibrator_driver); + +MODULE_AUTHOR("Sebastian Reichel "); +MODULE_DESCRIPTION("PWM vibrator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pwm-vibrator"); diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 022be0e22eba97b10b95e653f48cee44f1d4ba36..443151de90c6b506f6847b656a68d84577576ad1 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev, uinput_request_alloc_id(udev, request)); } -static void uinput_request_done(struct uinput_device *udev, - struct uinput_request *request) +static void uinput_request_release_slot(struct uinput_device *udev, + unsigned int id) { /* Mark slot as available */ - udev->requests[request->id] = NULL; - wake_up(&udev->requests_waitq); + spin_lock(&udev->requests_lock); + udev->requests[id] = NULL; + spin_unlock(&udev->requests_lock); - complete(&request->done); + wake_up(&udev->requests_waitq); } static int uinput_request_send(struct uinput_device *udev, @@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev, static int uinput_request_submit(struct uinput_device *udev, struct uinput_request *request) { - int error; + int retval; - error = uinput_request_reserve_slot(udev, request); - if (error) - return error; + retval = uinput_request_reserve_slot(udev, request); + if (retval) + return retval; - error = uinput_request_send(udev, request); - if (error) { - uinput_request_done(udev, request); - return error; - } + retval = uinput_request_send(udev, request); + if (retval) + goto out; wait_for_completion(&request->done); - return request->retval; + retval = request->retval; + + out: + uinput_request_release_slot(udev, request->id); + return retval; } /* @@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev) request = udev->requests[i]; if (request) { request->retval = -ENODEV; - uinput_request_done(udev, request); + complete(&request->done); } } @@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) return uinput_request_submit(udev, &request); } +static int uinput_dev_flush(struct input_dev *dev, struct file *file) +{ + /* + * If we are called with file == NULL that means we are tearing + * down the device, and therefore we can not handle FF erase + * requests: either we are handling UI_DEV_DESTROY (and holding + * the udev->mutex), or the file descriptor is closed and there is + * nobody on the other side anymore. + */ + return file ? input_ff_flush(dev, file) : 0; +} + static void uinput_destroy_device(struct uinput_device *udev) { const char *name, *phys; @@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev) dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = uinput_dev_set_autocenter; + /* + * The standard input_ff_flush() implementation does + * not quite work for uinput as we can't reasonably + * handle FF requests during device teardown. + */ + dev->flush = uinput_dev_flush; } error = input_register_device(udev->dev); @@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_up.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; case UI_END_FF_ERASE: @@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } req->retval = ff_erase.retval; - uinput_request_done(udev, req); + complete(&req->done); goto out; } diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 15b1330606c1c3fcfc28cf109f89987d0425f75a..e19eb60b3d2f5adcdf0182673680de25ddd15483 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, } /* Wait for F/W to update one page ROM data. */ - msleep(20); + msleep(35); error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); if (error) { diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 6428d6f4d568d4e5bfbcbd0bcce2c8b913892489..b84cd978fce2da18df706ee0a6ef72ab0a45cb5b 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -700,7 +700,9 @@ static int elantech_debounce_check_v2(struct psmouse *psmouse) * When we encounter packet that matches this exactly, it means the * hardware is in debounce status. Just ignore the whole packet. */ - const u8 debounce_packet[] = { 0x84, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0x84, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; return !memcmp(packet, debounce_packet, sizeof(debounce_packet)); @@ -741,7 +743,9 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) static int elantech_packet_check_v3(struct psmouse *psmouse) { struct elantech_data *etd = psmouse->private; - const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; + static const u8 debounce_packet[] = { + 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff + }; unsigned char *packet = psmouse->packet; /* diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ae81e57e13b9519dbe3b49697a0db5322f2dd0a7..6cbbdc6e968756a495301f340a2085d3fdf74e65 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -839,6 +839,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P34"), }, }, + { + /* Gigabyte P57 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P57"), + }, + }, { /* Schenker XMG C504 - Elantech touchpad */ .matches = { diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index f872817e81e46ba6d962a5edc207302e9bd25f3f..5bf63f76dddac657083bd3ab2ab2b6b0160dd3da 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -593,7 +593,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) tsdata->gain); edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, tsdata->offset); - if (reg_addr->reg_report_rate) + if (reg_addr->reg_report_rate != NO_REGISTER) edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate, tsdata->report_rate); @@ -874,6 +874,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata) case M09: reg_addr->reg_threshold = M09_REGISTER_THRESHOLD; + reg_addr->reg_report_rate = NO_REGISTER; reg_addr->reg_gain = M09_REGISTER_GAIN; reg_addr->reg_offset = M09_REGISTER_OFFSET; reg_addr->reg_num_x = M09_REGISTER_NUM_X; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 240b16f3ee9797c4961709ef8f9a50b874febb3f..32d2762448aa24168c9dd0ad38c8c9eb1ec13ff3 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -267,6 +267,12 @@ static void goodix_process_events(struct goodix_ts_data *ts) if (touch_num < 0) return; + /* + * Bit 4 of the first byte reports the status of the capacitive + * Windows/Home button. + */ + input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4)); + for (i = 0; i < touch_num; i++) goodix_ts_report_touch(ts, &point_data[1 + GOODIX_CONTACT_SIZE * i]); @@ -612,6 +618,9 @@ static int goodix_request_input_dev(struct goodix_ts_data *ts) ts->input_dev->id.product = ts->id; ts->input_dev->id.version = ts->version; + /* Capacitive Windows/Home button on some devices */ + input_set_capability(ts->input_dev, EV_KEY, KEY_LEFTMETA); + error = input_register_device(ts->input_dev); if (error) { dev_err(&ts->client->dev, diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c index 92e2243fb77d9a3b1b3e13316ea38e0072aed7aa..8fd909285877ef438323e68c9a200b5e88c75ec9 100644 --- a/drivers/input/touchscreen/htcpen.c +++ b/drivers/input/touchscreen/htcpen.c @@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = { } }; -static struct dmi_system_id htcshift_dmi_table[] __initdata = { +static const struct dmi_system_id htcshift_dmi_table[] __initconst = { { .ident = "Shift", .matches = { diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c index e12fb9b63f3113294f4b745d4c8320dfcc1b016c..5db0f1c4ef38404f1b69f50d011b864365f41aff 100644 --- a/drivers/input/touchscreen/surface3_spi.c +++ b/drivers/input/touchscreen/surface3_spi.c @@ -173,7 +173,7 @@ static void surface3_spi_process_pen(struct surface3_ts_data *ts_data, u8 *data) static void surface3_spi_process(struct surface3_ts_data *ts_data) { - const char header[] = { + static const char header[] = { 0xff, 0xff, 0xff, 0xff, 0xa5, 0x5a, 0xe7, 0x7e, 0x01 }; u8 *data = ts_data->rd_buf; diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index c1e23cfc6155eb855b19d115c65714862dcbb3c5..1a86cbd9326faade2043fe9cadacf2a0b0c2e806 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -414,7 +414,7 @@ static int __maybe_unused ucb1400_ts_suspend(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_start(ucb); + ucb1400_ts_stop(ucb); mutex_unlock(&idev->mutex); return 0; @@ -428,7 +428,7 @@ static int __maybe_unused ucb1400_ts_resume(struct device *dev) mutex_lock(&idev->mutex); if (idev->users) - ucb1400_ts_stop(ucb); + ucb1400_ts_start(ucb); mutex_unlock(&idev->mutex); return 0; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 49bd2ab8c5075859bca8e2e90519e7da6cd6f48f..f3a21343e636a8f26066f206129d45924cdc9588 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARM || IOMMU_DMA - depends on ARCH_RENESAS || COMPILE_TEST + depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU @@ -373,7 +373,8 @@ config MTK_IOMMU_V1 config QCOM_IOMMU # Note: iommu drivers cannot (yet?) be built as modules bool "Qualcomm IOMMU Support" - depends on ARCH_QCOM || COMPILE_TEST + depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on HAS_DMA select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 382de42b835939e167604053060e9944e19131ba..6fe2d03460730cabe0aa78060114844ad1a2a3cf 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -874,7 +874,7 @@ static bool copy_device_table(void) hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); entry = (((u64) hi) << 32) + lo; if (last_entry && last_entry != entry) { - pr_err("IOMMU:%d should use the same dev table as others!/n", + pr_err("IOMMU:%d should use the same dev table as others!\n", iommu->index); return false; } @@ -882,7 +882,7 @@ static bool copy_device_table(void) old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; if (old_devtb_size != dev_table_size) { - pr_err("The device table size of IOMMU:%d is not expected!/n", + pr_err("The device table size of IOMMU:%d is not expected!\n", iommu->index); return false; } @@ -890,7 +890,7 @@ static bool copy_device_table(void) old_devtb_phys = entry & PAGE_MASK; if (old_devtb_phys >= 0x100000000ULL) { - pr_err("The address of old device table is above 4G, not trustworthy!/n"); + pr_err("The address of old device table is above 4G, not trustworthy!\n"); return false; } old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); @@ -901,7 +901,7 @@ static bool copy_device_table(void) old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, get_order(dev_table_size)); if (old_dev_tbl_cpy == NULL) { - pr_err("Failed to allocate memory for copying old device table!/n"); + pr_err("Failed to allocate memory for copying old device table!\n"); return false; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index ca5ebaeafd6acee8d11033b34ba912eeebc04689..57c920c1372d09f927a7dcdeadc25375bf4164a8 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) #define dmar_parse_one_rhsa dmar_res_noop #endif -static void __init +static void dmar_table_print_dmar_entry(struct acpi_dmar_header *header) { struct acpi_dmar_hardware_unit *drhd; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index d665d0dc16e8f787813a6106d15bd83afacc4f34..6961fc393f0b25828f5981fad35ea744c7768b41 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index bd515be5b380e32ac224b55d5b9a2dfff26a15ef..16d33ac19db0f77837c30f44de044a3a46b9c558 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&dom->pgtlock, flags); - ret = dom->iop->map(dom->iop, iova, paddr, size, prot); + ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), + size, prot); spin_unlock_irqrestore(&dom->pgtlock, flags); return ret; diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index e60e3dba85a0d7c0457d9bdf650de2c350a5d11b..50947ebb6d1700e3f81acb5806c1b1a8b183c145 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) err = of_iommu_xlate(info->dev, &iommu_spec); of_node_put(iommu_spec.np); - if (err) - return err; - - return info->np == pdev->bus->dev.of_node; + return err; } const struct iommu_ops *of_iommu_configure(struct device *dev, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 519149ec905378d59e9abb931515542b91b133c6..b5df99c6f680f940a48223455a377a1ef8e07cb2 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn) { const __be32 *cell; u64 hwid; - int i; + int cpu; cell = of_get_property(dn, "reg", NULL); if (!cell) @@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn) if (hwid & ~MPIDR_HWID_BITMASK) return -1; - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_logical_map(i) == hwid) - return i; + for_each_possible_cpu(cpu) + if (cpu_logical_map(cpu) == hwid) + return cpu; return -1; } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 2370e6d9e603ad17a57016974608b8aade9f85a2..cd0bcc3b7e33709a472c1952c5ee7ccdf1ea0382 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = MAP_VLPI, - .map = map, + { + .map = map, + }, }; /* @@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) { struct its_cmd_info info = { .cmd_type = GET_VLPI, - .map = map, + { + .map = map, + }, }; return irq_set_vcpu_affinity(irq, &info); @@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) { struct its_cmd_info info = { .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, - .config = config, + { + .config = config, + }, }; return irq_set_vcpu_affinity(irq, &info); diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 14461cbfab2fa234d365a947cc4e3d68e90378ed..66f97fde13d80053ef6d67cb1fef7677e0d6e8b8 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -101,7 +101,7 @@ static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu) local_irq_save(flags); /* We can only send IPIs to VPEs within the local core */ - WARN_ON(cpu_data[cpu].core != current_cpu_data.core); + WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu)); vpflags = dvpe(); settc(cpu_vpe_id(&cpu_data[cpu])); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b3a60da088db4c7e7c17fc31f51445f091e3a2e9..c90976d7e53ccc596b65a0864ef169f1aa1fafd8 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -12,27 +12,38 @@ #include #include #include -#include #include +#include #include #include -#include +#include #include #include #include -unsigned int gic_present; +#define GIC_MAX_INTRS 256 +#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) -struct gic_pcpu_mask { - DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); -}; +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +void __iomem *mips_gic_base; -static unsigned long __gic_base_addr; +DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); -static void __iomem *gic_base; -static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_ipi_domain; @@ -44,202 +55,13 @@ static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); -static void __gic_irq_dispatch(void); - -static inline u32 gic_read32(unsigned int reg) -{ - return __raw_readl(gic_base + reg); -} - -static inline u64 gic_read64(unsigned int reg) -{ - return __raw_readq(gic_base + reg); -} - -static inline unsigned long gic_read(unsigned int reg) -{ - if (!mips_cm_is64) - return gic_read32(reg); - else - return gic_read64(reg); -} - -static inline void gic_write32(unsigned int reg, u32 val) -{ - return __raw_writel(val, gic_base + reg); -} - -static inline void gic_write64(unsigned int reg, u64 val) -{ - return __raw_writeq(val, gic_base + reg); -} - -static inline void gic_write(unsigned int reg, unsigned long val) -{ - if (!mips_cm_is64) - return gic_write32(reg, (u32)val); - else - return gic_write64(reg, (u64)val); -} - -static inline void gic_update_bits(unsigned int reg, unsigned long mask, - unsigned long val) -{ - unsigned long regval; - - regval = gic_read(reg); - regval &= ~mask; - regval |= val; - gic_write(reg, regval); -} - -static inline void gic_reset_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_mask(unsigned int intr) -{ - gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_polarity(unsigned int intr, unsigned int pol) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)pol << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_trigger(unsigned int intr, unsigned int trig) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + - GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), - (unsigned long)trig << GIC_INTR_BIT(intr)); -} - -static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) -{ - gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), - 1ul << GIC_INTR_BIT(intr), - (unsigned long)dual << GIC_INTR_BIT(intr)); -} - -static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) -{ - gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + - GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); -} - -static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) -{ - gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + - GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), - GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); -} - -#ifdef CONFIG_CLKSRC_MIPS_GIC -u64 notrace gic_read_count(void) -{ - unsigned int hi, hi2, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); - - do { - hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); - hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); - } while (hi2 != hi); - - return (((u64) hi) << 32) + lo; -} - -unsigned int gic_get_count_width(void) +static void gic_clear_pcpu_masks(unsigned int intr) { - unsigned int bits, config; - - config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> - GIC_SH_CONFIG_COUNTBITS_SHF); - - return bits; -} - -void notrace gic_write_compare(u64 cnt) -{ - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } -} - -void notrace gic_write_cpu_compare(u64 cnt, int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu)); - - if (mips_cm_is64) { - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt); - } else { - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), - (int)(cnt >> 32)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), - (int)(cnt & 0xffffffff)); - } - - local_irq_restore(flags); -} - -u64 gic_read_compare(void) -{ - unsigned int hi, lo; - - if (mips_cm_is64) - return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); - - hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); - lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); - - return (((u64) hi) << 32) + lo; -} - -void gic_start_count(void) -{ - u32 gicconfig; - - /* Start the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -void gic_stop_count(void) -{ - u32 gicconfig; - - /* Stop the counter */ - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; - gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); -} - -#endif - -unsigned gic_read_local_vp_id(void) -{ - unsigned long ident; + unsigned int i; - ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT)); - return ident & GIC_VP_IDENT_VCNUM_MSK; + /* Clear the interrupt's bit in all pcpu_masks */ + for_each_possible_cpu(i) + clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); } static bool gic_local_irq_is_routable(int intr) @@ -250,17 +72,17 @@ static bool gic_local_irq_is_routable(int intr) if (cpu_has_veic) return true; - vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); + vpe_ctl = read_gic_vl_ctl(); switch (intr) { case GIC_LOCAL_INT_TIMER: - return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; case GIC_LOCAL_INT_PERFCTR: - return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; case GIC_LOCAL_INT_FDC: - return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; case GIC_LOCAL_INT_SWINT0: case GIC_LOCAL_INT_SWINT1: - return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; + return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; default: return true; } @@ -272,15 +94,14 @@ static void gic_bind_eic_interrupt(int irq, int set) irq -= GIC_PIN_TO_VEC_OFFSET; /* Set irq to use shadow set */ - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + - GIC_VPE_EIC_SS(irq), set); + write_gic_vl_eic_shadow_set(irq, set); } static void gic_send_ipi(struct irq_data *d, unsigned int cpu) { irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq)); + write_gic_wedge(GIC_WEDGE_RW | hwirq); } int gic_get_c0_compare_int(void) @@ -316,47 +137,22 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -int gic_get_usm_range(struct resource *gic_usm_res) -{ - if (!gic_present) - return -1; - - gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS; - gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1); - - return 0; -} - static void gic_handle_shared_int(bool chained) { - unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4; + unsigned int intr, virq; unsigned long *pcpu_mask; - unsigned long pending_reg, intrmask_reg; DECLARE_BITMAP(pending, GIC_MAX_INTRS); - DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); /* Get per-cpu bitmaps */ - pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; - - pending_reg = GIC_REG(SHARED, GIC_SH_PEND); - intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); - - for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { - pending[i] = gic_read(pending_reg); - intrmask[i] = gic_read(intrmask_reg); - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; + pcpu_mask = this_cpu_ptr(pcpu_masks); - if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64) - continue; - - pending[i] |= (u64)gic_read(pending_reg) << 32; - intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; - pending_reg += gic_reg_step; - intrmask_reg += gic_reg_step; - } + if (mips_cm_is64) + __ioread64_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 64)); + else + __ioread32_copy(pending, addr_gic_pend(), + DIV_ROUND_UP(gic_shared_intrs, 32)); - bitmap_and(pending, pending, intrmask, gic_shared_intrs); bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); for_each_set_bit(intr, pending, gic_shared_intrs) { @@ -371,19 +167,29 @@ static void gic_handle_shared_int(bool chained) static void gic_mask_irq(struct irq_data *d) { - gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + + write_gic_rmask(intr); + gic_clear_pcpu_masks(intr); } static void gic_unmask_irq(struct irq_data *d) { - gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); + unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned int cpu; + + write_gic_smask(intr); + + gic_clear_pcpu_masks(intr); + cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); } static void gic_ack_irq(struct irq_data *d) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); + write_gic_wedge(irq); } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -395,34 +201,34 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_FALLING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_RISING: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_RISING_EDGE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = true; break; case IRQ_TYPE_EDGE_BOTH: /* polarity is irrelevant in this case */ - gic_set_trigger(irq, GIC_TRIG_EDGE); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); + change_gic_trig(irq, GIC_TRIG_EDGE); + change_gic_dual(irq, GIC_DUAL_DUAL); is_edge = true; break; case IRQ_TYPE_LEVEL_LOW: - gic_set_polarity(irq, GIC_POL_NEG); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_LOW); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; case IRQ_TYPE_LEVEL_HIGH: default: - gic_set_polarity(irq, GIC_POL_POS); - gic_set_trigger(irq, GIC_TRIG_LEVEL); - gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); + change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); + change_gic_trig(irq, GIC_TRIG_LEVEL); + change_gic_dual(irq, GIC_DUAL_SINGLE); is_edge = false; break; } @@ -443,32 +249,28 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); - cpumask_t tmp = CPU_MASK_NONE; - unsigned long flags; - int i, cpu; + unsigned long flags; + unsigned int cpu; - cpumask_and(&tmp, cpumask, cpu_online_mask); - if (cpumask_empty(&tmp)) + cpu = cpumask_first_and(cpumask, cpu_online_mask); + if (cpu >= NR_CPUS) return -EINVAL; - cpu = cpumask_first(&tmp); - /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, mips_cm_vp_id(cpu)); + write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); /* Update the pcpu_masks */ - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(irq, pcpu_masks[i].pcpu_mask); - set_bit(irq, pcpu_masks[cpu].pcpu_mask); + gic_clear_pcpu_masks(irq); + if (read_gic_mask(irq)) + set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); - cpumask_copy(irq_data_get_affinity_mask(d), cpumask); irq_data_update_effective_affinity(d, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); - return IRQ_SET_MASK_OK_NOCOPY; + return IRQ_SET_MASK_OK; } #endif @@ -499,8 +301,8 @@ static void gic_handle_local_int(bool chained) unsigned long pending, masked; unsigned int intr, virq; - pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); - masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); + pending = read_gic_vl_pend(); + masked = read_gic_vl_mask(); bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); @@ -518,14 +320,14 @@ static void gic_mask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_rmask(BIT(intr)); } static void gic_unmask_local_irq(struct irq_data *d) { int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_smask(BIT(intr)); } static struct irq_chip gic_local_irq_controller = { @@ -542,9 +344,8 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_rmask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -557,9 +358,8 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d) spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_smask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } @@ -582,103 +382,54 @@ static void gic_irq_dispatch(struct irq_desc *desc) gic_handle_shared_int(true); } -static void __init gic_basic_init(void) -{ - unsigned int i; - - board_bind_eic_interrupt = &gic_bind_eic_interrupt; - - /* Setup defaults */ - for (i = 0; i < gic_shared_intrs; i++) { - gic_set_polarity(i, GIC_POL_POS); - gic_set_trigger(i, GIC_TRIG_LEVEL); - gic_reset_mask(i); - } - - for (i = 0; i < gic_vpes; i++) { - unsigned int j; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { - if (!gic_local_irq_is_routable(j)) - continue; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); - } - } -} - static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { int intr = GIC_HWIRQ_TO_LOCAL(hw); - int ret = 0; int i; unsigned long flags; + u32 val; if (!gic_local_irq_is_routable(intr)) return -EPERM; + if (intr > GIC_LOCAL_INT_FDC) { + pr_err("Invalid local IRQ %d\n", intr); + return -EINVAL; + } + + if (intr == GIC_LOCAL_INT_TIMER) { + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; + } else { + val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + } + spin_lock_irqsave(&gic_lock, flags); for (i = 0; i < gic_vpes; i++) { - u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; - - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(i)); - - switch (intr) { - case GIC_LOCAL_INT_WD: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); - break; - case GIC_LOCAL_INT_COMPARE: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), - val); - break; - case GIC_LOCAL_INT_TIMER: - /* CONFIG_MIPS_CMP workaround (see __gic_init) */ - val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), - val); - break; - case GIC_LOCAL_INT_PERFCTR: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT0: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), - val); - break; - case GIC_LOCAL_INT_SWINT1: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), - val); - break; - case GIC_LOCAL_INT_FDC: - gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); - break; - default: - pr_err("Invalid local IRQ %d\n", intr); - ret = -EINVAL; - break; - } + write_gic_vl_other(mips_cm_vp_id(i)); + write_gic_vo_map(intr, val); } spin_unlock_irqrestore(&gic_lock, flags); - return ret; + return 0; } static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw, unsigned int vpe) + irq_hw_number_t hw, unsigned int cpu) { int intr = GIC_HWIRQ_TO_SHARED(hw); + struct irq_data *data; unsigned long flags; - int i; + + data = irq_get_irq_data(virq); spin_lock_irqsave(&gic_lock, flags); - gic_map_to_pin(intr, gic_cpu_pin); - gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); - for (i = 0; i < min(gic_vpes, NR_CPUS); i++) - clear_bit(intr, pcpu_masks[i].pcpu_mask); - set_bit(intr, pcpu_masks[vpe].pcpu_mask); + write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); + write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); + gic_clear_pcpu_masks(intr); + set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); return 0; @@ -885,34 +636,69 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; -static void __init __gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase, - struct device_node *node) + +static int __init gic_of_init(struct device_node *node, + struct device_node *parent) { - unsigned int gicconfig, cpu; - unsigned int v[2]; + unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; + unsigned long reserved; + phys_addr_t gic_base; + struct resource res; + size_t gic_len; - __gic_base_addr = gic_base_addr; + /* Find the first available CPU vector. */ + i = 0; + reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); + while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", + i++, &cpu_vec)) + reserved |= BIT(cpu_vec); - gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); + cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); + if (cpu_vec == hweight_long(ST0_IM)) { + pr_err("No CPU vectors available for GIC\n"); + return -ENODEV; + } - gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); - gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> - GIC_SH_CONFIG_NUMINTRS_SHF; - gic_shared_intrs = ((gic_shared_intrs + 1) * 8); + if (of_address_to_resource(node, 0, &res)) { + /* + * Probe the CM for the GIC base address if not specified + * in the device-tree. + */ + if (mips_cm_present()) { + gic_base = read_gcr_gic_base() & + ~CM_GCR_GIC_BASE_GICEN; + gic_len = 0x20000; + } else { + pr_err("Failed to get GIC memory range\n"); + return -ENODEV; + } + } else { + gic_base = res.start; + gic_len = resource_size(&res); + } + + if (mips_cm_present()) { + write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); + /* Ensure GIC region is enabled before trying to access it */ + __sync(); + } + + mips_gic_base = ioremap_nocache(gic_base, gic_len); - gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> - GIC_SH_CONFIG_NUMVPES_SHF; + gicconfig = read_gic_config(); + gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; + gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); + gic_shared_intrs = (gic_shared_intrs + 1) * 8; + + gic_vpes = gicconfig & GIC_CONFIG_PVPS; + gic_vpes >>= __ffs(GIC_CONFIG_PVPS); gic_vpes = gic_vpes + 1; if (cpu_has_veic) { /* Set EIC mode for all VPEs */ for_each_present_cpu(cpu) { - gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), - mips_cm_vp_id(cpu)); - gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), - GIC_VPE_CTL_EIC_MODE_MSK); + write_gic_vl_other(mips_cm_vp_id(cpu)); + write_gic_vo_ctl(GIC_VX_CTL_EIC); } /* Always use vector 1 in EIC mode */ @@ -937,9 +723,7 @@ static void __init __gic_init(unsigned long gic_base_addr, */ if (IS_ENABLED(CONFIG_MIPS_CMP) && gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { - timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL, - GIC_VPE_TIMER_MAP)) & - GIC_MAP_MSK; + timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; irq_set_chained_handler(MIPS_CPU_IRQ_BASE + GIC_CPU_PIN_OFFSET + timer_cpu_pin, @@ -950,17 +734,21 @@ static void __init __gic_init(unsigned long gic_base_addr, } gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + - gic_shared_intrs, irqbase, + gic_shared_intrs, 0, &gic_irq_domain_ops, NULL); - if (!gic_irq_domain) - panic("Failed to add GIC IRQ domain"); + if (!gic_irq_domain) { + pr_err("Failed to add GIC IRQ domain"); + return -ENXIO; + } gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, GIC_NUM_LOCAL_INTRS + gic_shared_intrs, node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) - panic("Failed to add GIC IPI domain"); + if (!gic_ipi_domain) { + pr_err("Failed to add GIC IPI domain"); + return -ENXIO; + } irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); @@ -975,64 +763,25 @@ static void __init __gic_init(unsigned long gic_base_addr, } bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); - gic_basic_init(); -} - -void __init gic_init(unsigned long gic_base_addr, - unsigned long gic_addrspace_size, - unsigned int cpu_vec, unsigned int irqbase) -{ - __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); -} -static int __init gic_of_init(struct device_node *node, - struct device_node *parent) -{ - struct resource res; - unsigned int cpu_vec, i = 0, reserved = 0; - phys_addr_t gic_base; - size_t gic_len; + board_bind_eic_interrupt = &gic_bind_eic_interrupt; - /* Find the first available CPU vector. */ - while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", - i++, &cpu_vec)) - reserved |= BIT(cpu_vec); - for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { - if (!(reserved & BIT(cpu_vec))) - break; - } - if (cpu_vec == 8) { - pr_err("No CPU vectors available for GIC\n"); - return -ENODEV; + /* Setup defaults */ + for (i = 0; i < gic_shared_intrs; i++) { + change_gic_pol(i, GIC_POL_ACTIVE_HIGH); + change_gic_trig(i, GIC_TRIG_LEVEL); + write_gic_rmask(i); } - if (of_address_to_resource(node, 0, &res)) { - /* - * Probe the CM for the GIC base address if not specified - * in the device-tree. - */ - if (mips_cm_present()) { - gic_base = read_gcr_gic_base() & - ~CM_GCR_GIC_BASE_GICEN_MSK; - gic_len = 0x20000; - } else { - pr_err("Failed to get GIC memory range\n"); - return -ENODEV; + for (i = 0; i < gic_vpes; i++) { + write_gic_vl_other(mips_cm_vp_id(i)); + for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { + if (!gic_local_irq_is_routable(j)) + continue; + write_gic_vo_rmask(BIT(j)); } - } else { - gic_base = res.start; - gic_len = resource_size(&res); } - if (mips_cm_present()) { - write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); - /* Ensure GIC region is enabled before trying to access it */ - __sync(); - } - gic_present = true; - - __gic_init(gic_base, gic_len, cpu_vec, 0, node); - return 0; } IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 6c44609fd83a0f266df74b80bbe91f84b3bd4a67..cd2b3c69771a24b62a7952a8e7c626fde5459c6b 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) isdn_net_local *lp; struct ippp_struct *is; int proto; - unsigned char protobuf[4]; is = file->private_data; @@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { - /* - * Don't reset huptimer for - * LCP packets. (Echo requests). - */ - if (copy_from_user(protobuf, buf, 4)) - return -EFAULT; - proto = PPP_PROTOCOL(protobuf); - if (proto != PPP_LCP) - lp->huptimer = 0; + if (lp->isdn_device < 0 || lp->isdn_channel < 0) { + unsigned char protobuf[4]; + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + if (copy_from_user(protobuf, buf, 4)) + return -EFAULT; + + proto = PPP_PROTOCOL(protobuf); + if (proto != PPP_LCP) + lp->huptimer = 0; - if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; + } if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; + unsigned char *cpy_buf; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved @@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) return count; } skb_reserve(skb, hl); - if (copy_from_user(skb_put(skb, count), buf, count)) + cpy_buf = skb_put(skb, count); + if (copy_from_user(cpy_buf, buf, count)) { kfree_skb(skb); return -EFAULT; } + + /* + * Don't reset huptimer for + * LCP packets. (Echo requests). + */ + proto = PPP_PROTOCOL(cpy_buf); + if (proto != PPP_LCP) + lp->huptimer = 0; + if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c index bbbbe08982332a53fdad499cd5643f85b97e5898..9a257f9693009c7ec3a163dc83e7c0b2e5839dde 100644 --- a/drivers/leds/leds-as3645a.c +++ b/drivers/leds/leds-as3645a.c @@ -112,6 +112,10 @@ #define AS_PEAK_mA_TO_REG(a) \ ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) +/* LED numbers for Devicetree */ +#define AS_LED_FLASH 0 +#define AS_LED_INDICATOR 1 + enum as_mode { AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, @@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash, struct device_node *node) { struct as3645a_config *cfg = &flash->cfg; + struct device_node *child; const char *name; int rval; - flash->flash_node = of_get_child_by_name(node, "flash"); + for_each_child_of_node(node, child) { + u32 id = 0; + + of_property_read_u32(child, "reg", &id); + + switch (id) { + case AS_LED_FLASH: + flash->flash_node = of_node_get(child); + break; + case AS_LED_INDICATOR: + flash->indicator_node = of_node_get(child); + break; + default: + dev_warn(&flash->client->dev, + "unknown LED %u encountered, ignoring\n", id); + break; + } + } + if (!flash->flash_node) { dev_err(&flash->client->dev, "can't find flash node\n"); return -ENODEV; @@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash, of_property_read_u32(flash->flash_node, "voltage-reference", &cfg->voltage_reference); - of_property_read_u32(flash->flash_node, "peak-current-limit", + of_property_read_u32(flash->flash_node, "ams,input-max-microamp", &cfg->peak); cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); - flash->indicator_node = of_get_child_by_name(node, "indicator"); if (!flash->indicator_node) { dev_warn(&flash->client->dev, "can't find indicator node\n"); @@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client) as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); v4l2_flash_release(flash->vf); + v4l2_flash_release(flash->vfind); led_classdev_flash_unregister(&flash->fled); led_classdev_unregister(&flash->iled_cdev); diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 0f9ed1ea0e891d90b3c44141b9d38ae20603c30c..492789f56896d5a88047b9c7aceb72ecf2009c6c 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id) * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = { +static const struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = { { .callback = clevo_mail_led_dmi_callback, .ident = "Clevo D410J", diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 732eb86bc1a5ca2307971ed4e9eb0cf80227e6a2..a9db8674cd026ee00d53f85f5ea1bb5b1a0e6ea8 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); * detected as working, but in reality it is not) as low as * possible. */ -static struct dmi_system_id nas_led_whitelist[] __initdata = { +static const struct dmi_system_id nas_led_whitelist[] __initconst = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9601225e0ae9add198ed874598fb91121e6c0ede..d216a8f7bc224c815c383cc588dd24b04c94ea1c 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -63,6 +63,12 @@ #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1) #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) +/* + * Align buffer writes to this boundary. + * Tests show that SSDs have the highest IOPS when using 4k writes. + */ +#define DM_BUFIO_WRITE_ALIGN 4096 + /* * dm_buffer->list_mode */ @@ -149,6 +155,10 @@ struct dm_buffer { blk_status_t write_error; unsigned long state; unsigned long last_accessed; + unsigned dirty_start; + unsigned dirty_end; + unsigned write_start; + unsigned write_end; struct dm_bufio_client *c; struct list_head write_list; struct bio bio; @@ -560,7 +570,7 @@ static void dmio_complete(unsigned long error, void *context) } static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { int r; struct dm_io_request io_req = { @@ -578,10 +588,10 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, if (b->data_mode != DATA_MODE_VMALLOC) { io_req.mem.type = DM_IO_KMEM; - io_req.mem.ptr.addr = b->data; + io_req.mem.ptr.addr = (char *)b->data + offset; } else { io_req.mem.type = DM_IO_VMA; - io_req.mem.ptr.vma = b->data; + io_req.mem.ptr.vma = (char *)b->data + offset; } b->bio.bi_end_io = end_io; @@ -609,10 +619,10 @@ static void inline_endio(struct bio *bio) } static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, - unsigned n_sectors, bio_end_io_t *end_io) + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) { char *ptr; - int len; + unsigned len; bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); b->bio.bi_iter.bi_sector = sector; @@ -625,29 +635,20 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, b->bio.bi_private = end_io; bio_set_op_attrs(&b->bio, rw, 0); - /* - * We assume that if len >= PAGE_SIZE ptr is page-aligned. - * If len < PAGE_SIZE the buffer doesn't cross page boundary. - */ - ptr = b->data; + ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - if (len >= PAGE_SIZE) - BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); - else - BUG_ON((unsigned long)ptr & (len - 1)); - do { - if (!bio_add_page(&b->bio, virt_to_page(ptr), - len < PAGE_SIZE ? len : PAGE_SIZE, + unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); + if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step, offset_in_page(ptr))) { BUG_ON(b->c->block_size <= PAGE_SIZE); - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); return; } - len -= PAGE_SIZE; - ptr += PAGE_SIZE; + len -= this_step; + ptr += this_step; } while (len > 0); submit_bio(&b->bio); @@ -657,18 +658,33 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) { unsigned n_sectors; sector_t sector; - - if (rw == WRITE && b->c->write_callback) - b->c->write_callback(b); + unsigned offset, end; sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; - n_sectors = 1 << b->c->sectors_per_block_bits; + + if (rw != WRITE) { + n_sectors = 1 << b->c->sectors_per_block_bits; + offset = 0; + } else { + if (b->c->write_callback) + b->c->write_callback(b); + offset = b->write_start; + end = b->write_end; + offset &= -DM_BUFIO_WRITE_ALIGN; + end += DM_BUFIO_WRITE_ALIGN - 1; + end &= -DM_BUFIO_WRITE_ALIGN; + if (unlikely(end > b->c->block_size)) + end = b->c->block_size; + + sector += offset >> SECTOR_SHIFT; + n_sectors = (end - offset) >> SECTOR_SHIFT; + } if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && b->data_mode != DATA_MODE_VMALLOC) - use_inline_bio(b, rw, sector, n_sectors, end_io); + use_inline_bio(b, rw, sector, n_sectors, offset, end_io); else - use_dmio(b, rw, sector, n_sectors, end_io); + use_dmio(b, rw, sector, n_sectors, offset, end_io); } /*---------------------------------------------------------------- @@ -720,6 +736,9 @@ static void __write_dirty_buffer(struct dm_buffer *b, clear_bit(B_DIRTY, &b->state); wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); + b->write_start = b->dirty_start; + b->write_end = b->dirty_end; + if (!write_list) submit_io(b, WRITE, write_endio); else @@ -1221,19 +1240,37 @@ void dm_bufio_release(struct dm_buffer *b) } EXPORT_SYMBOL_GPL(dm_bufio_release); -void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end) { struct dm_bufio_client *c = b->c; + BUG_ON(start >= end); + BUG_ON(end > b->c->block_size); + dm_bufio_lock(c); BUG_ON(test_bit(B_READING, &b->state)); - if (!test_and_set_bit(B_DIRTY, &b->state)) + if (!test_and_set_bit(B_DIRTY, &b->state)) { + b->dirty_start = start; + b->dirty_end = end; __relink_lru(b, LIST_DIRTY); + } else { + if (start < b->dirty_start) + b->dirty_start = start; + if (end > b->dirty_end) + b->dirty_end = end; + } dm_bufio_unlock(c); } +EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); + +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) +{ + dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); +} EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) @@ -1398,6 +1435,8 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); set_bit(B_DIRTY, &b->state); + b->dirty_start = 0; + b->dirty_end = c->block_size; __unlink_buffer(b); __link_buffer(b, new_block, LIST_DIRTY); } else { diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index b6d8f53ec15b440c025c6d37c64b88025abd0772..be732d3f86119a0698f6ce32e6c4951997898e9f 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h @@ -93,6 +93,15 @@ void dm_bufio_release(struct dm_buffer *b); */ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); +/* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + /* * Initiate writing of dirty buffers, without waiting for completion. */ diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index dcac25c2be7a25ef6ba0d67f24d69362e1abe79b..8785134c9f1f1aee8db79506a3f0384d6cf92bfa 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2306,7 +2306,7 @@ static void init_features(struct cache_features *cf) static int parse_features(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 2, "Invalid number of cache feature arguments"}, }; @@ -2348,7 +2348,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, char **error) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "Invalid number of policy arguments"}, }; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 54aef8ed97db22fb1b28f47d4be95721aea24d0d..a55ffd4f5933fc1247b6729dcc0344964c9f01b8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2529,7 +2529,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar { struct crypt_config *cc = ti->private; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, }; unsigned int opt_params, val; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 7146c2d9762dfdc14f9815b651d59b992c0583e0..b82cb1ab1eaa338a67800bfdb0c38d36f7913ef2 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -51,7 +51,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 6, "Invalid number of feature args"}, {1, UINT_MAX, "Invalid corrupt bio byte"}, {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, @@ -178,7 +178,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, */ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, UINT_MAX, "Invalid up interval"}, {0, UINT_MAX, "Invalid down interval"}, }; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 27c0f223f8ea8f6164293283f2da8ad50d2d034d..096fe9b66c50749cb841bcbafc4bc6cb6ca63be7 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -225,6 +225,8 @@ struct dm_integrity_c { struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; struct alg_spec journal_mac_alg; + + atomic64_t number_of_mismatches; }; struct dm_integrity_range { @@ -298,7 +300,7 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...) /* * DM Integrity profile, protection is performed layer above (dm-crypt) */ -static struct blk_integrity_profile dm_integrity_profile = { +static const struct blk_integrity_profile dm_integrity_profile = { .name = "DM-DIF-EXT-TAG", .generate_fn = NULL, .verify_fn = NULL, @@ -310,6 +312,8 @@ static void dm_integrity_dtr(struct dm_target *ti); static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) { + if (err == -EILSEQ) + atomic64_inc(&ic->number_of_mismatches); if (!cmpxchg(&ic->failed, 0, err)) DMERR("Error on %s: %d", msg, err); } @@ -770,13 +774,13 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi unsigned i; io_comp.ic = ic; - io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); + init_completion(&io_comp.comp); if (commit_start + commit_sections <= ic->journal_sections) { io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); @@ -792,18 +796,18 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi to_end = ic->journal_sections - commit_start; if (ic->journal_io) { crypt_comp_1.ic = ic; - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + init_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); if (try_wait_for_completion(&crypt_comp_1.comp)) { rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); + reinit_completion(&crypt_comp_1.comp); crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); wait_for_completion_io(&crypt_comp_1.comp); } else { crypt_comp_2.ic = ic; - crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); + init_completion(&crypt_comp_2.comp); crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); wait_for_completion_io(&crypt_comp_1.comp); @@ -1041,7 +1045,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se memcpy(tag, dp, to_copy); } else if (op == TAG_WRITE) { memcpy(dp, tag, to_copy); - dm_bufio_mark_buffer_dirty(b); + dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); } else { /* e.g.: op == TAG_CMP */ if (unlikely(memcmp(dp, tag, to_copy))) { @@ -1275,6 +1279,7 @@ static void integrity_metadata(struct work_struct *w) DMERR("Checksum failed at sector 0x%llx", (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); r = -EILSEQ; + atomic64_inc(&ic->number_of_mismatches); } if (likely(checksums != checksums_onstack)) kfree(checksums); @@ -1676,7 +1681,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map dio->in_flight = (atomic_t)ATOMIC_INIT(2); if (need_sync_io) { - read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); + init_completion(&read_comp); dio->completion = &read_comp; } else dio->completion = NULL; @@ -1700,7 +1705,11 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (need_sync_io) { wait_for_completion_io(&read_comp); - integrity_metadata(&dio->work); + if (likely(!bio->bi_status)) + integrity_metadata(&dio->work); + else + dec_in_flight(dio); + } else { INIT_WORK(&dio->work, integrity_metadata); queue_work(ic->metadata_wq, &dio->work); @@ -1834,7 +1843,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, comp.ic = ic; comp.in_flight = (atomic_t)ATOMIC_INIT(1); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); i = write_start; for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { @@ -2061,7 +2070,7 @@ static void replay_journal(struct dm_integrity_c *ic) if (ic->journal_io) { struct journal_completion crypt_comp; crypt_comp.ic = ic; - crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); + init_completion(&crypt_comp.comp); crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); wait_for_completion(&crypt_comp.comp); @@ -2233,7 +2242,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: - result[0] = '\0'; + DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches)); break; case STATUSTYPE_TABLE: { @@ -2634,7 +2643,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) memset(iv, 0x00, ivsize); skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2691,7 +2700,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_init_one(&sg, crypt_data, crypt_len); skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); + init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) wait_for_completion(&comp.comp); @@ -2778,7 +2787,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) int r; unsigned extra_args; struct dm_arg_set as; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 9, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; @@ -2806,6 +2815,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) bio_list_init(&ic->flush_bio_list); init_waitqueue_head(&ic->copy_to_journal_wait); init_completion(&ic->crypto_backoff); + atomic64_set(&ic->number_of_mismatches, 0); r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); if (r) { @@ -3202,7 +3212,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e06f0ef7d2ecf2b12beb1d13b6ae2f35580be885..8756a6850431d007f756079a595f5ae30d51a8df 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1629,7 +1629,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) { - static struct { + static const struct { int cmd; int flags; ioctl_fn fn; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 405eca206d67c3e8b877f61140bd89a5b7f33320..d5f8eff7c11d88a066d1dd83fe4707b33264cfae 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, @@ -212,7 +198,6 @@ static struct target_type linear_target = { .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_flush = linear_dax_flush, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 534a254eb977381cd6589921e33953f29cfa91fb..8b80a9ce9ea9c17ad9b6797aa51e6763f7f9fbc3 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -100,6 +100,7 @@ struct log_writes_c { struct dm_dev *logdev; u64 logged_entries; u32 sectorsize; + u32 sectorshift; atomic_t io_blocks; atomic_t pending_blocks; sector_t next_sector; @@ -128,6 +129,18 @@ struct per_bio_data { struct pending_block *block; }; +static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors >> (lc->sectorshift - SECTOR_SHIFT); +} + +static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, + sector_t sectors) +{ + return sectors << (lc->sectorshift - SECTOR_SHIFT); +} + static void put_pending_block(struct log_writes_c *lc) { if (atomic_dec_and_test(&lc->pending_blocks)) { @@ -253,7 +266,7 @@ static int log_one_block(struct log_writes_c *lc, if (!block->vec_cnt) goto out; - sector++; + sector += dev_to_bio_sectors(lc, 1); atomic_inc(&lc->io_blocks); bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); @@ -354,10 +367,9 @@ static int log_writes_kthread(void *arg) goto next; sector = lc->next_sector; - if (block->flags & LOG_DISCARD_FLAG) - lc->next_sector++; - else - lc->next_sector += block->nr_sectors + 1; + if (!(block->flags & LOG_DISCARD_FLAG)) + lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); + lc->next_sector += dev_to_bio_sectors(lc, 1); /* * Apparently the size of the device may not be known @@ -399,7 +411,7 @@ static int log_writes_kthread(void *arg) if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && - !atomic_read(&lc->pending_blocks)) + list_empty(&lc->logging_blocks)) schedule(); __set_current_state(TASK_RUNNING); } @@ -435,7 +447,6 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->logging_blocks); init_waitqueue_head(&lc->wait); - lc->sectorsize = 1 << SECTOR_SHIFT; atomic_set(&lc->io_blocks, 0); atomic_set(&lc->pending_blocks, 0); @@ -455,6 +466,8 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); + lc->sectorshift = ilog2(lc->sectorsize); lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); if (IS_ERR(lc->log_kthread)) { ret = PTR_ERR(lc->log_kthread); @@ -464,8 +477,12 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - /* We put the super at sector 0, start logging at sector 1 */ - lc->next_sector = 1; + /* + * next_sector is in 512b sectors to correspond to what bi_sector expects. + * The super starts at sector 0, and the next_sector is the next logical + * one based on the sectorsize of the device. + */ + lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; lc->logging_enabled = true; lc->end_sector = logdev_last_sector(lc); lc->device_supports_discard = true; @@ -599,8 +616,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) if (discard_bio) block->flags |= LOG_DISCARD_FLAG; - block->sector = bio->bi_iter.bi_sector; - block->nr_sectors = bio_sectors(bio); + block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); + block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); /* We don't need the data, just submit */ if (discard_bio) { @@ -767,9 +784,12 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit if (!q || !blk_queue_discard(q)) { lc->device_supports_discard = false; - limits->discard_granularity = 1 << SECTOR_SHIFT; + limits->discard_granularity = lc->sectorsize; limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); } + limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); + limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); + limits->io_min = limits->physical_block_size; } static struct target_type log_writes_target = { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 96aedaac2c644df49ccb7dbe590446763738ac26..11f273d2f018e722b1e9480c5a5eb0e59b0f0048 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -632,6 +632,10 @@ static void process_queued_bios(struct work_struct *work) case DM_MAPIO_REMAPPED: generic_make_request(bio); break; + case 0: + break; + default: + WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); } } blk_finish_plug(&plug); @@ -698,7 +702,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct path_selector_type *pst; unsigned ps_argc; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; @@ -822,7 +826,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; @@ -898,7 +902,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) int ret; struct dm_target *ti = m->ti; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; @@ -950,7 +954,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) struct dm_target *ti = m->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 8, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, @@ -1019,7 +1023,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) { /* target arguments */ - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; @@ -1379,6 +1383,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = 1; + /* fall through */ case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5bfe285ea9d1c815ae8014064a29e4a08566d374..1ac58c5651b7f8086ccba297547ff13823804706 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio) if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index c6ebc5b1e00eb8be80e94ea2f31c190613fb4244..eadfcfd106ffffa4773421d5b61ae77414e36d6d 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -117,9 +117,9 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_clone_bio_info *info = container_of(clone, struct dm_rq_clone_bio_info, clone); struct dm_rq_target_io *tio = info->tio; - struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; blk_status_t error = clone->bi_status; + bool is_last = !clone->bi_next; bio_put(clone); @@ -137,28 +137,23 @@ static void end_clone_bio(struct bio *clone) * when the request is completed. */ tio->error = error; - return; + goto exit; } /* * I/O for the bio successfully completed. * Notice the data completion to the upper layer. */ - - /* - * bios are processed from the head of the list. - * So the completing bio should always be rq->bio. - * If it's not, something wrong is happening. - */ - if (tio->orig->bio != bio) - DMERR("bio completion is going in the middle of the request"); + tio->completed += nr_bytes; /* * Update the original request. * Do not use blk_end_request() here, because it may complete * the original request before the clone, and break the ordering. */ - blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); + if (is_last) + exit: + blk_update_request(tio->orig, BLK_STS_OK, tio->completed); } static struct dm_rq_target_io *tio_from_request(struct request *rq) @@ -237,14 +232,14 @@ static void dm_end_request(struct request *clone, blk_status_t error) /* * Requeue the original request of a clone. */ -static void dm_old_requeue_request(struct request *rq) +static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); - blk_run_queue_async(q); + blk_delay_queue(q, delay_ms); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -270,6 +265,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ struct mapped_device *md = tio->md; struct request *rq = tio->orig; int rw = rq_data_dir(rq); + unsigned long delay_ms = delay_requeue ? 100 : 0; rq_end_stats(md, rq); if (tio->clone) { @@ -278,9 +274,9 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ } if (!rq->q->mq_ops) - dm_old_requeue_request(rq); + dm_old_requeue_request(rq, delay_ms); else - dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); + dm_mq_delay_requeue_request(rq, delay_ms); rq_completed(md, rw, false); } @@ -455,6 +451,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, tio->clone = NULL; tio->orig = rq; tio->error = 0; + tio->completed = 0; /* * Avoid initializing info for blk-mq; it passes * target-specific data through info.ptr diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 9813922e4fe583f64c16bdf5b9c51ab856e7c2b7..f43c45460aaccc291af6a4af5a36708b9310a5ca 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -29,6 +29,7 @@ struct dm_rq_target_io { struct dm_stats_aux stats_aux; unsigned long duration_jiffies; unsigned n_sectors; + unsigned completed; }; /* diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ab50d7c4377f8fd95fe86f875e23c70d22919fb6..b5e892149c542ba76f0cad8d819cb30c442075e9 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } -static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, - size_t size) -{ - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; - - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) - return; - dax_flush(dax_dev, pgoff, addr, size); -} - /* * Stripe status: * @@ -489,7 +470,6 @@ static struct target_type stripe_target = { .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, .dax_copy_from_iter = stripe_dax_copy_from_iter, - .dax_flush = stripe_dax_flush, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 2dcea4c56f37f7cf0c0fd9c5b8eea8f68b229707..4c8de1ff78cac8ad8a575fcbd42c81a93d6bd79e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -251,7 +251,7 @@ static void switch_dtr(struct dm_target *ti) */ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) { - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, {1, UINT_MAX, "Invalid region size"}, {0, 0, "Invalid number of optional args"}, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 28a4071cdf85e8ae15e8741d8770199a689de6b2..ef7b8f201f73ad9777cf336344fe59b11914cc73 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -806,7 +806,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, /* * Target argument parsing helpers. */ -static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +static int validate_next_arg(const struct dm_arg *arg, + struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); @@ -824,14 +825,14 @@ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, return 0; } -int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); -int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, +int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 69d88aee30554d017a6f7164d343bb17ee13f716..1e25705209c27fbb8e62f5d096c9dbe157d82c77 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3041,7 +3041,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, unsigned argc; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, 4, "Invalid number of pool feature arguments"}, }; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 1c5b6185c79d049e6621a495ba27e76bbafc5916..bda3caca23ca69af2fe97592aa817a29b87851d6 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -839,7 +839,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) struct dm_target *ti = v->ti; const char *arg_name; - static struct dm_arg _args[] = { + static const struct dm_arg _args[] = { {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, }; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 04ae795e8a5f4d5843260772d9f624d2734f3d31..6e54145969c5ce30184cf162283db0f01796f1ca 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, - size_t size) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (ti->type->dax_flush) - ti->type->dax_flush(ti, pgoff, addr, size); - out: - dm_put_live_table(md, srcu_idx); -} - /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH. @@ -2992,7 +2974,6 @@ static const struct block_device_operations dm_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .copy_from_iter = dm_dax_copy_from_iter, - .flush = dm_dax_flush, }; /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 08fcaebc61bdb52b92f8ba37ccd4d2b3dac41779..0ff1bbf6c90e5cebc782268313ce39f8c66f7270 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock); * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ +void md_handle_request(struct mddev *mddev, struct bio *bio) +{ +check_suspended: + rcu_read_lock(); + if (mddev->suspended) { + DEFINE_WAIT(__wait); + for (;;) { + prepare_to_wait(&mddev->sb_wait, &__wait, + TASK_UNINTERRUPTIBLE); + if (!mddev->suspended) + break; + rcu_read_unlock(); + schedule(); + rcu_read_lock(); + } + finish_wait(&mddev->sb_wait, &__wait); + } + atomic_inc(&mddev->active_io); + rcu_read_unlock(); + + if (!mddev->pers->make_request(mddev, bio)) { + atomic_dec(&mddev->active_io); + wake_up(&mddev->sb_wait); + goto check_suspended; + } + + if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) + wake_up(&mddev->sb_wait); +} +EXPORT_SYMBOL(md_handle_request); + static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); @@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) bio_endio(bio); return BLK_QC_T_NONE; } -check_suspended: - rcu_read_lock(); - if (mddev->suspended) { - DEFINE_WAIT(__wait); - for (;;) { - prepare_to_wait(&mddev->sb_wait, &__wait, - TASK_UNINTERRUPTIBLE); - if (!mddev->suspended) - break; - rcu_read_unlock(); - schedule(); - rcu_read_lock(); - } - finish_wait(&mddev->sb_wait, &__wait); - } - atomic_inc(&mddev->active_io); - rcu_read_unlock(); /* * save the sectors now since our bio can @@ -310,20 +324,14 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) sectors = bio_sectors(bio); /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; - if (!mddev->pers->make_request(mddev, bio)) { - atomic_dec(&mddev->active_io); - wake_up(&mddev->sb_wait); - goto check_suspended; - } + + md_handle_request(mddev, bio); cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); part_stat_unlock(); - if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) - wake_up(&mddev->sb_wait); - return BLK_QC_T_NONE; } @@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws) struct mddev *mddev = container_of(ws, struct mddev, flush_work); struct bio *bio = mddev->flush_bio; + /* + * must reset flush_bio before calling into md_handle_request to avoid a + * deadlock, because other bios passed md_handle_request suspend check + * could wait for this and below md_handle_request could wait for those + * bios because of suspend check + */ + mddev->flush_bio = NULL; + wake_up(&mddev->sb_wait); + if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ bio_endio(bio); else { bio->bi_opf &= ~REQ_PREFLUSH; - mddev->pers->make_request(mddev, bio); + md_handle_request(mddev, bio); } - - mddev->flush_bio = NULL; - wake_up(&mddev->sb_wait); } void md_flush_request(struct mddev *mddev, struct bio *bio) diff --git a/drivers/md/md.h b/drivers/md/md.h index 561d22b9a9a8acc9479cabc389948753ffe703eb..d8287d3cd1bf81b048e90166d91afe76566202b3 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev); +extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4188a488114814aadf7df7b4c2ba07240b8e0408..928e24a071338ab6e1fe7668c8caa42b2803ccfe 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh spin_unlock(&head->batch_head->batch_lock); goto unlock_out; } + /* + * We must assign batch_head of this stripe within the + * batch_lock, otherwise clear_batch_ready of batch head + * stripe could clear BATCH_READY bit of this stripe and + * this stripe->batch_head doesn't get assigned, which + * could confuse clear_batch_ready for this stripe + */ + sh->batch_head = head->batch_head; /* * at this point, head's BATCH_READY could be cleared, but we @@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh */ list_add(&sh->batch_list, &head->batch_list); spin_unlock(&head->batch_head->batch_lock); - - sh->batch_head = head->batch_head; } else { head->batch_head = head; sh->batch_head = head->batch_head; @@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DEGRADED)), + (1 << STRIPE_DEGRADED) | + (1 << STRIPE_ON_UNPLUG_LIST)), head_sh->state & (1 << STRIPE_INSYNC)); sh->check_state = head_sh->check_state; @@ -6568,14 +6575,17 @@ static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) { struct r5conf *conf; - unsigned long new; + unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; int group_cnt, worker_cnt_per_group; if (len >= PAGE_SIZE) return -EINVAL; - if (kstrtoul(page, 10, &new)) + if (kstrtouint(page, 10, &new)) + return -EINVAL; + /* 8192 should be big enough */ + if (new > 8192) return -EINVAL; err = mddev_lock(mddev); diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index dd769e40416f7a6f05c73b4e0dc9f2c2db3beafe..eed6c397d8400b0a25c57feb6ef23dc49eac71df 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -181,7 +181,10 @@ static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg) { static const struct cec_event ev_lost_msgs = { .event = CEC_EVENT_LOST_MSGS, - .lost_msgs.lost_msgs = 1, + .flags = 0, + { + .lost_msgs = { 1 }, + }, }; struct cec_msg_entry *entry; diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c index b94eb1c0023d810c9ed7fcb50b9c88d4b222b5fe..ada26d4acfb4087074dd6af618270eb52235fa20 100644 --- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c +++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c @@ -277,7 +277,7 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, p = (char *)dev->_audiodata_buf_virt_addr + frame_offset; for (i = 0; i < dev->_audio_lines_count; i++) { - int n = kernel_read(file, file_offset, mybuf, AUDIO_LINE_SIZE); + int n = kernel_read(file, mybuf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); @@ -290,7 +290,6 @@ static int cx25821_get_audio_data(struct cx25821_dev *dev, memcpy(p, mybuf, n); p += n; } - file_offset += n; } dev->_audioframe_count++; fput(file); @@ -318,7 +317,7 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, { char *p = (void *)dev->_audiodata_buf_virt_addr; struct file *file; - loff_t offset; + loff_t file_offset = 0; int i, j; file = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); @@ -328,11 +327,11 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, return PTR_ERR(file); } - for (j = 0, offset = 0; j < NUM_AUDIO_FRAMES; j++) { + for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { char buf[AUDIO_LINE_SIZE]; - int n = kernel_read(file, offset, buf, - AUDIO_LINE_SIZE); + loff_t offset = file_offset; + int n = kernel_read(file, buf, AUDIO_LINE_SIZE, &file_offset); if (n < AUDIO_LINE_SIZE) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", @@ -344,8 +343,6 @@ static int cx25821_openfile_audio(struct cx25821_dev *dev, if (p) memcpy(p + offset, buf, n); - - offset += n; } dev->_audioframe_count++; } diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index 895f655780a749ddd15b23bb90605c0255c6af3e..55d824b3a808e2376f55cac05ce84be6c0c4171b 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -494,7 +494,7 @@ static struct platform_driver kempld_driver = { .remove = kempld_remove, }; -static struct dmi_system_id kempld_dmi_table[] __initdata = { +static const struct dmi_system_id kempld_dmi_table[] __initconst = { { .ident = "BBD6", .matches = { diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d18b3d9292fdb718fa87a64542168d5f904aa5e8..3ba04f371380d24af78a99750f548aecfad7a34c 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1279,7 +1279,7 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, } /* use bounce buffer for copy */ - tbuf = (void *)__get_free_page(GFP_TEMPORARY); + tbuf = (void *)__get_free_page(GFP_KERNEL); if (!tbuf) return -ENOMEM; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index affa7370ba827917a78cff55c4e503220ab1ebaa..74c663b1c0a7418c53ef0d7fa4ae27e2499d1ff7 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + /* + * mmc_init_request() depends on card->bouncesz so it must be calculated + * before blk_init_allocated_queue() starts allocating requests. + */ + card->bouncesz = mmc_queue_calc_bouncesz(host); + mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) @@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); - card->bouncesz = mmc_queue_calc_bouncesz(host); if (card->bouncesz) { blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); blk_queue_max_segments(mq->queue, card->bouncesz / 512); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 02179ed2a40d7a87d0d1ae7ed43e95388ef36a72..8c15637178ff3cec73b637633710f84e4dd93a00 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -5,7 +5,7 @@ comment "MMC/SD/SDIO Host Controller Drivers" config MMC_DEBUG - bool "MMC host drivers debugginG" + bool "MMC host drivers debugging" depends on MMC != n help This is an option for use by developers; most people should diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index b9cc9599879978972b4c8c96f9dbddc26caebb2f..eee08d81b24214c3ea0c5555fa1f1f419249f9d6 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -7,6 +7,7 @@ * * Copyright (C) 2016 Cavium Inc. */ +#include #include #include #include @@ -149,8 +150,11 @@ static int thunder_mmc_probe(struct pci_dev *pdev, for (i = 0; i < CAVIUM_MAX_MMC; i++) { if (host->slot[i]) cvm_mmc_of_slot_remove(host->slot[i]); - if (host->slot_pdev[i]) + if (host->slot_pdev[i]) { + get_device(&host->slot_pdev[i]->dev); of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); + put_device(&host->slot_pdev[i]->dev); + } } clk_disable_unprepare(host->clk); return ret; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bbaddf18a1b3c4022592f631878441ad19a1fca2..d0ccc6729fd29e734996b28538cd6e8e1745bf2b 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { enum { INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_DRV_STRENGTH = 9, INTEL_DSM_D3_RETUNE = 10, }; @@ -557,6 +558,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, sdhci_writel(host, val, INTEL_HS400_ES_REG); } +static void sdhci_intel_voltage_switch(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + struct intel_host *intel_host = sdhci_pci_priv(slot); + struct device *dev = &slot->chip->pdev->dev; + u32 result = 0; + int err; + + err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result); + pr_debug("%s: %s DSM error %d result %u\n", + mmc_hostname(host->mmc), __func__, err, result); +} + static const struct sdhci_ops sdhci_intel_byt_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_intel_set_power, @@ -565,6 +579,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = { .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, + .voltage_switch = sdhci_intel_voltage_switch, }; static void byt_read_dsm(struct sdhci_pci_slot *slot) diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 12cf8288d6635eafef4677630d669c1a60238b45..a7293e186e03fc44ccb271405e99fbfaef06d770 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) #define CMDREQ_TIMEOUT 5000 -#ifdef CONFIG_MMC_DEBUG - -#define STATUS_TO_TEXT(a, status, i) \ - do { \ - if ((status) & TMIO_STAT_##a) { \ - if ((i)++) \ - printk(KERN_DEBUG " | "); \ - printk(KERN_DEBUG #a); \ - } \ - } while (0) - -static void pr_debug_status(u32 status) -{ - int i = 0; - - pr_debug("status: %08x = ", status); - STATUS_TO_TEXT(CARD_REMOVE, status, i); - STATUS_TO_TEXT(CARD_INSERT, status, i); - STATUS_TO_TEXT(SIGSTATE, status, i); - STATUS_TO_TEXT(WRPROTECT, status, i); - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); - STATUS_TO_TEXT(CARD_INSERT_A, status, i); - STATUS_TO_TEXT(SIGSTATE_A, status, i); - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); - STATUS_TO_TEXT(STOPBIT_ERR, status, i); - STATUS_TO_TEXT(ILL_FUNC, status, i); - STATUS_TO_TEXT(CMD_BUSY, status, i); - STATUS_TO_TEXT(CMDRESPEND, status, i); - STATUS_TO_TEXT(DATAEND, status, i); - STATUS_TO_TEXT(CRCFAIL, status, i); - STATUS_TO_TEXT(DATATIMEOUT, status, i); - STATUS_TO_TEXT(CMDTIMEOUT, status, i); - STATUS_TO_TEXT(RXOVERFLOW, status, i); - STATUS_TO_TEXT(TXUNDERRUN, status, i); - STATUS_TO_TEXT(RXRDY, status, i); - STATUS_TO_TEXT(TXRQ, status, i); - STATUS_TO_TEXT(ILL_ACCESS, status, i); - printk("\n"); -} - -#else -#define pr_debug_status(s) do { } while (0) -#endif - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct tmio_mmc_host *host = mmc_priv(mmc); @@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; - pr_debug_status(status); - pr_debug_status(ireg); - /* Clear the status except the interrupt status */ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 3e33ab66eb24f2e4ddb8da5afab4691a038d03f2..77b1d8013295fb4e8e9c457a10ed07fbd63b16a1 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c @@ -114,12 +114,6 @@ ltq_mtd_probe(struct platform_device *pdev) struct cfi_private *cfi; int err; - if (of_machine_is_compatible("lantiq,falcon") && - (ltq_boot_select() != BS_FLASH)) { - dev_err(&pdev->dev, "invalid bootstrap options\n"); - return -ENODEV; - } - ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL); if (!ltq_mtd) return -ENOMEM; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5736b0c90b339b6bc3e8e1ae3189341910b958f2..a308e707392d595902b77a03e2078ffe8da97d9e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, slave->mtd.erasesize = parent->erasesize; } + /* + * Slave erasesize might differ from the master one if the master + * exposes several regions with different erasesize. Adjust + * wr_alignment accordingly. + */ + if (!(slave->mtd.flags & MTD_NO_ERASE)) + wr_alignment = slave->mtd.erasesize; + tmp = slave->offset; remainder = do_div(tmp, wr_alignment); if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 146af82183145d9d5164665aeaccdf789c4cca98..8268636675efc8b3d81959f0f1911a145c42fcc1 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, size += (req->ecc.strength + 1) * sizeof(u16); /* Reserve space for mu, dmu and delta. */ size = ALIGN(size, sizeof(s32)); - size += (req->ecc.strength + 1) * sizeof(s32); + size += (req->ecc.strength + 1) * sizeof(s32) * 3; user = kzalloc(size, GFP_KERNEL); if (!user) diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index c3bb358ef01eee5b7c7dc0b0ad72b9a3e47f5220..5796468db653450494876bcf6c5da772f7f6bd3e 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } res = clk_prepare_enable(host->clk); if (res) - goto err_exit1; + goto err_put_clk; nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; nand_chip->dev_ready = lpc32xx_nand_device_ready; @@ -814,6 +814,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) dma_release_channel(host->dma_chan); err_exit2: clk_disable_unprepare(host->clk); +err_put_clk: clk_put(host->clk); err_exit1: lpc32xx_wp_enable(host); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index bcc8cef1c615c5b0336f5310f56ee29a9c44e88a..12edaae17d81f2228eefcc50b9d5de7433119987 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { - int chipnr, realpage, page, blockmask, column; + int chipnr, realpage, page, column; struct nand_chip *chip = mtd_to_nand(mtd); uint32_t writelen = ops->len; @@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, realpage = (int)(to >> chip->page_shift); page = realpage & chip->pagemask; - blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; /* Invalidate the page cache, when we write to the cached page */ if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index fec613221958c435ab3343e5e16ee02bb2174093..246b4393118e4df5c645b3e4eb338a70958ba4b9 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -1356,7 +1356,7 @@ static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_ if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_read(file, pos, buf, count); + tx = kernel_read(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; @@ -1372,7 +1372,7 @@ static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); - tx = kernel_write(file, buf, count, pos); + tx = kernel_write(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); put_pages(ns); return tx; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index cf1d4a15e10a63394b0410f4349e711f602c6c5d..19c000722cbc86f3246c0b84055eca92db331424 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, * @nor: pointer to a 'struct spi_nor' * @addr: offset in the SFDP area to start reading data from * @len: number of bytes to read - * @buf: buffer where the SFDP data are copied into + * @buf: buffer where the SFDP data are copied into (dma-safe memory) * * Whatever the actual numbers of bytes for address and dummy cycles are * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always @@ -1829,6 +1829,36 @@ static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, return ret; } +/** + * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. + * @nor: pointer to a 'struct spi_nor' + * @addr: offset in the SFDP area to start reading data from + * @len: number of bytes to read + * @buf: buffer where the SFDP data are copied into + * + * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not + * guaranteed to be dma-safe. + * + * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() + * otherwise. + */ +static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, + size_t len, void *buf) +{ + void *dma_safe_buf; + int ret; + + dma_safe_buf = kmalloc(len, GFP_KERNEL); + if (!dma_safe_buf) + return -ENOMEM; + + ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); + memcpy(buf, dma_safe_buf, len); + kfree(dma_safe_buf); + + return ret; +} + struct sfdp_parameter_header { u8 id_lsb; u8 minor; @@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, bfpt_header->length * sizeof(u32)); addr = SFDP_PARAM_HEADER_PTP(bfpt_header); memset(&bfpt, 0, sizeof(bfpt)); - err = spi_nor_read_sfdp(nor, addr, len, &bfpt); + err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); if (err < 0) return err; @@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, params->size = bfpt.dwords[BFPT_DWORD(2)]; if (params->size & BIT(31)) { params->size &= ~BIT(31); + + /* + * Prevent overflows on params->size. Anyway, a NOR of 2^64 + * bits is unlikely to exist so this error probably means + * the BFPT we are reading is corrupted/wrong. + */ + if (params->size > 63) + return -EINVAL; + params->size = 1ULL << params->size; } else { params->size++; @@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, int i, err; /* Get the SFDP header. */ - err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header); + err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); if (err < 0) return err; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index c3963f88044818d15a3b6a45d7a940f56d1972da..b210fdb31c983164af6cae9324498d03628a5b9d 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Initialize the gendisk of this ubiblock device */ gd = alloc_disk(1); if (!gd) { - pr_err("UBI: block: alloc_disk failed"); + pr_err("UBI: block: alloc_disk failed\n"); ret = -ENODEV; goto out_free_dev; } @@ -607,7 +607,7 @@ static void __init ubiblock_create_from_param(void) desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); if (IS_ERR(desc)) { pr_err( - "UBI: block: can't open volume on ubi%d_%d, err=%ld", + "UBI: block: can't open volume on ubi%d_%d, err=%ld\n", p->ubi_num, p->vol_id, PTR_ERR(desc)); continue; } @@ -618,7 +618,7 @@ static void __init ubiblock_create_from_param(void) ret = ubiblock_create(&vi); if (ret) { pr_err( - "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d", + "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n", vi.name, p->ubi_num, p->vol_id, ret); continue; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d854521962ef0921d917a03989ec7b1938da5fd8..842550b5712ae48a18e24c2c4945095b0c596ccf 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -825,7 +825,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, for (i = 0; i < UBI_MAX_DEVICES; i++) { ubi = ubi_devices[i]; if (ubi && mtd->index == ubi->mtd->index) { - pr_err("ubi: mtd%d is already attached to ubi%d", + pr_err("ubi: mtd%d is already attached to ubi%d\n", mtd->index, i); return -EEXIST; } @@ -840,7 +840,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * no sense to attach emulated MTD devices, so we prohibit this. */ if (mtd->type == MTD_UBIVOLUME) { - pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI", + pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n", mtd->index); return -EINVAL; } @@ -851,7 +851,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi_devices[ubi_num]) break; if (ubi_num == UBI_MAX_DEVICES) { - pr_err("ubi: only %d UBI devices may be created", + pr_err("ubi: only %d UBI devices may be created\n", UBI_MAX_DEVICES); return -ENFILE; } @@ -861,7 +861,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, /* Make sure ubi_num is not busy */ if (ubi_devices[ubi_num]) { - pr_err("ubi: ubi%i already exists", ubi_num); + pr_err("ubi: ubi%i already exists\n", ubi_num); return -EEXIST; } } @@ -1166,7 +1166,7 @@ static int __init ubi_init(void) BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); if (mtd_devs > UBI_MAX_DEVICES) { - pr_err("UBI error: too many MTD devices, maximum is %d", + pr_err("UBI error: too many MTD devices, maximum is %d\n", UBI_MAX_DEVICES); return -EINVAL; } @@ -1178,7 +1178,7 @@ static int __init ubi_init(void) err = misc_register(&ubi_ctrl_cdev); if (err) { - pr_err("UBI error: cannot register device"); + pr_err("UBI error: cannot register device\n"); goto out; } @@ -1205,7 +1205,7 @@ static int __init ubi_init(void) mtd = open_mtd_device(p->name); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); - pr_err("UBI error: cannot open mtd %s, error %d", + pr_err("UBI error: cannot open mtd %s, error %d\n", p->name, err); /* See comment below re-ubi_is_module(). */ if (ubi_is_module()) @@ -1218,7 +1218,7 @@ static int __init ubi_init(void) p->vid_hdr_offs, p->max_beb_per1024); mutex_unlock(&ubi_devices_mutex); if (err < 0) { - pr_err("UBI error: cannot attach mtd%d", + pr_err("UBI error: cannot attach mtd%d\n", mtd->index); put_mtd_device(mtd); @@ -1242,7 +1242,7 @@ static int __init ubi_init(void) err = ubiblock_init(); if (err) { - pr_err("UBI error: block: cannot initialize, error %d", err); + pr_err("UBI error: block: cannot initialize, error %d\n", err); /* See comment above re-ubi_is_module(). */ if (ubi_is_module()) @@ -1265,7 +1265,7 @@ static int __init ubi_init(void) misc_deregister(&ubi_ctrl_cdev); out: class_unregister(&ubi_class); - pr_err("UBI error: cannot initialize UBI, error %d", err); + pr_err("UBI error: cannot initialize UBI, error %d\n", err); return err; } late_initcall(ubi_init); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index b44c8d348e78ec0b19c884178e54c83a0967dc90..5a832bc79b1be0032735d2b16ac6add4eb54d872 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1667,7 +1667,7 @@ int ubi_update_fastmap(struct ubi_device *ubi) ret = invalidate_fastmap(ubi); if (ret < 0) { - ubi_err(ubi, "Unable to invalidiate current fastmap!"); + ubi_err(ubi, "Unable to invalidate current fastmap!"); ubi_ro_mode(ubi); } else { return_fm_pebs(ubi, old_fm); diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 22ed3f627506c4e2b4daee328b97fa8dbfce3896..bfceae5a890eb477a3b0fc74cb4af993c473486e 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -229,7 +229,7 @@ struct ubi_ec_hdr { * copy. UBI also calculates data CRC when the data is moved and stores it at * the @data_crc field of the copy (P1). So when UBI needs to pick one physical * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is - * examined. If it is cleared, the situation* is simple and the newer one is + * examined. If it is cleared, the situation is simple and the newer one is * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC * checksum is correct, this physical eraseblock is selected (P1). Otherwise * the older one (P) is selected. @@ -389,7 +389,7 @@ struct ubi_vtbl_record { #define UBI_FM_POOL_MAGIC 0x67AF4D08 #define UBI_FM_EBA_MAGIC 0xf0c040a8 -/* A fastmap supber block can be located between PEB 0 and +/* A fastmap super block can be located between PEB 0 and * UBI_FM_MAX_START */ #define UBI_FM_MAX_START 64 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fc63992ab0e0ad3300aa9a8d991fc107788ae780..c99dc59d729b30dd3ffc4a673deae38630fb0a84 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4289,7 +4289,7 @@ static int bond_check_params(struct bond_params *params) int bond_mode = BOND_MODE_ROUNDROBIN; int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; int lacp_fast = 0; - int tlb_dynamic_lb = 0; + int tlb_dynamic_lb; /* Convert string parameters. */ if (mode) { @@ -4601,16 +4601,13 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; - if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { - bond_opt_initstr(&newval, "default"); - valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), - &newval); - if (!valptr) { - pr_err("Error: No tlb_dynamic_lb default value"); - return -EINVAL; - } - tlb_dynamic_lb = valptr->value; + bond_opt_initstr(&newval, "default"); + valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); + if (!valptr) { + pr_err("Error: No tlb_dynamic_lb default value"); + return -EINVAL; } + tlb_dynamic_lb = valptr->value; if (lp_interval == 0) { pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n", diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index a12d603d41c6616ed3cba926b204f94492678ff3..5931aa2fe9974c1d20934be9d9ade6451af0cfcc 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -754,6 +754,9 @@ static int bond_option_mode_set(struct bonding *bond, bond->params.miimon); } + if (newval->value == BOND_MODE_ALB) + bond->params.tlb_dynamic_lb = 1; + /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; bond->params.mode = newval->value; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a6572b51435aef4fd630a3868b546a35b7e500cf..83eec9a8c27511b40e39d18b072dc0afc9aacff4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); } +static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, + u64 *tx_bytes, u64 *tx_packets) +{ + struct bcm_sysport_tx_ring *ring; + u64 bytes = 0, packets = 0; + unsigned int start; + unsigned int q; + + for (q = 0; q < priv->netdev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + bytes = ring->bytes; + packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + *tx_bytes += bytes; + *tx_packets += packets; + } +} + static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + u64 tx_bytes = 0, tx_packets = 0; unsigned int start; int i, j; - if (netif_running(dev)) + if (netif_running(dev)) { bcm_sysport_update_mib_counters(priv); + bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); + stats64->tx_bytes = tx_bytes; + stats64->tx_packets = tx_packets; + } for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { const struct bcm_sysport_stats *s; @@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev, continue; p += s->stat_offset; - if (s->stat_sizeof == sizeof(u64)) + if (s->stat_sizeof == sizeof(u64) && + s->type == BCM_SYSPORT_STAT_NETDEV64) { do { start = u64_stats_fetch_begin_irq(syncp); data[i] = *(u64 *)p; } while (u64_stats_fetch_retry_irq(syncp, start)); - else + } else data[i] = *(u32 *)p; j++; } @@ -1716,30 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev, { struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_stats64 *stats64 = &priv->stats64; - struct bcm_sysport_tx_ring *ring; - u64 tx_packets = 0, tx_bytes = 0; unsigned int start; - unsigned int q; netdev_stats_to_stats64(stats, &dev->stats); - for (q = 0; q < dev->num_tx_queues; q++) { - ring = &priv->tx_rings[q]; - do { - start = u64_stats_fetch_begin_irq(&priv->syncp); - tx_bytes = ring->bytes; - tx_packets = ring->packets; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); - - stats->tx_bytes += tx_bytes; - stats->tx_packets += tx_packets; - } - - /* lockless update tx_bytes and tx_packets */ - u64_stats_update_begin(&priv->syncp); - stats64->tx_bytes = stats->tx_bytes; - stats64->tx_packets = stats->tx_packets; - u64_stats_update_end(&priv->syncp); + bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, + &stats->tx_packets); do { start = u64_stats_fetch_begin_irq(&priv->syncp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index ccd699fb2d706ec51252998f7957ae875cd812ea..7dd3d131043a76ba4b83b43f755d2b782c07139e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, { int rc = 0; + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; + switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index af33dc15c55f98e518c222ed4e1a3d1a551bcdcb..656e6af70f0a03c0cf55f317784da0e1e7153994 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11536,11 +11536,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_napi_enable(tp); for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; err = tg3_request_irq(tp, i); if (err) { for (i--; i >= 0; i--) { - tnapi = &tp->napi[i]; + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); } goto out_napi_fini; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 674cf9d13b9841de0176aaf5d11e67d2c37eb452..8984c49388810aa8bfcc9ced97f70c433ace8db9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -930,6 +930,14 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb) return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; } +static inline bool is_ipv6_ext_hdr(struct sk_buff *skb) +{ + if (ip_hdr(skb)->version == 6) + return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr); + else + return false; +} + #define be_error_recovering(adapter) \ (adapter->flags & BE_FLAGS_TRY_RECOVERY) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 319eee36649bedb17717c8b21d908903555708e3..0e3d9f39a80756b42542d60ff1a8df7846cb837a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5089,6 +5089,20 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct be_adapter *adapter = netdev_priv(dev); u8 l4_hdr = 0; + if (skb_is_gso(skb)) { + /* IPv6 TSO requests with extension hdrs are a problem + * to Lancer and BE3 HW. Disable TSO6 feature. + */ + if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb)) + features &= ~NETIF_F_TSO6; + + /* Lancer cannot handle the packet with MSS less than 256. + * Disable the GSO support in such cases + */ + if (lancer_chip(adapter) && skb_shinfo(skb)->gso_size < 256) + features &= ~NETIF_F_GSO_MASK; + } + /* The code below restricts offload features for some tunneled and * Q-in-Q packets. * Offload features for normal (non tunnel) packets are unchanged. diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 38c7b21e5d63faaeae25ad45715aecb5a3469578..ede1876a9a191c2c1a63ae5ff3f839deb826d54b 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -374,8 +374,8 @@ struct bufdesc_ex { #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) +#define FEC_NAPI_IMASK FEC_ENET_MII #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56f56d6ada9cfdfa6f719eb6d9793efb3a614ccd..3dc2d771a222147c89a3814a34d736e0bcf0d5c4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) if (int_events == 0) return false; - if (int_events & FEC_ENET_RXF) + if (int_events & FEC_ENET_RXF_0) fep->work_rx |= (1 << 2); if (int_events & FEC_ENET_RXF_1) fep->work_rx |= (1 << 0); if (int_events & FEC_ENET_RXF_2) fep->work_rx |= (1 << 1); - if (int_events & FEC_ENET_TXF) + if (int_events & FEC_ENET_TXF_0) fep->work_tx |= (1 << 2); if (int_events & FEC_ENET_TXF_1) fep->work_tx |= (1 << 0); @@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id) } if (fep->ptp_clock) - fec_ptp_check_pps_event(fep); - + if (fec_ptp_check_pps_event(fep)) + ret = IRQ_HANDLED; return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 59efbd605416fd5eed5f7987e163b8cf92e6889a..5bcb2238acb2b11368635ee70042130b02f0d7ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, } static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, - bool is_reg, bool *matched) + struct hnae3_ae_dev *ae_dev, bool is_reg) { int ret; - *matched = false; - /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } - /* there is a match of client and dev */ - *matched = true; /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { @@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client) { struct hnae3_client *client_tmp; struct hnae3_ae_dev *ae_dev; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port\n"); @@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client); void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; - bool matched; mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, &matched); + hnae3_match_n_instantiate(client, ae_dev, false); } list_del(&client->node); @@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true, - &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed\n"); - if (matched) - break; } } @@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; - bool matched; mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ @@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) /* check the client list for the match with this ae_dev type and * un-initialize the figure out client instance */ - list_for_each_entry(client, &hnae3_client_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, - &matched); - if (matched) - break; - } + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); @@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - bool matched; int ret = 0; mutex_lock(&hnae3_common_lock); @@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true, - &matched); + ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed\n"); - if (matched) - break; } out_err: @@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - bool matched; mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ @@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - list_for_each_entry(client, &hnae3_client_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false, - &matched); - if (matched) - break; - } + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index b2f28ae81273d71a6e31e3a70575570c0aae9ea2..1a01cadfe5f3035d21b35602ccf8c1c016854574 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -49,7 +49,17 @@ #define HNAE3_CLASS_NAME_SIZE 16 #define HNAE3_DEV_INITED_B 0x0 -#define HNAE_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_DCB_B 0x2 + +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ + BIT(HNAE3_DEV_SUPPORT_ROCE_B)) + +#define hnae3_dev_roce_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + +#define hnae3_dev_dcb_supported(hdev) \ + hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -366,12 +376,12 @@ struct hnae3_ae_algo { struct hnae3_tc_info { u16 tqp_offset; /* TQP offset from base TQP */ u16 tqp_count; /* Total TQPs */ - u8 up; /* user priority */ u8 tc; /* TC index */ bool enable; /* If this TC is enable or not */ }; #define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ @@ -379,6 +389,7 @@ struct hnae3_knic_private_info { u16 num_desc; u8 num_tc; /* Total number of enabled TCs */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ u16 num_tqps; /* total number of TQPs in this handle */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 91ae0135ee503848ec1ed8d3d691632485ad2170..758cf394813126450ce190b24d16af5e41f7b92b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -238,7 +238,7 @@ struct hclge_tqp_map { u8 rsv[18]; }; -#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 enum hclge_int_type { HCLGE_INT_TX, @@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain { #define HCLGE_INT_TYPE_S 0 #define HCLGE_INT_TYPE_M 0x3 #define HCLGE_TQP_ID_S 2 -#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) +#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S) +#define HCLGE_INT_GL_IDX_S 13 +#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S) __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 rsv; }; #define HCLGE_TC_NUM 8 @@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc { struct hclge_rx_priv_buff { __le16 buf_num[HCLGE_TC_NUM]; - u8 rsv[8]; + __le16 shared_buf; + u8 rsv[6]; }; struct hclge_query_version { @@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue { #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ +#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bb45365fb817eb648dcaa160e2173257e027ad2a..e0685e630afec795db1e5f1d3221919cd2766ba8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, - /* Required last entry */ - {0, } -}; - -static const struct pci_device_id roce_pci_tbl[] = { - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, - /* Required last entry */ + /* required last entry */ {0, } }; @@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; - if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msix = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); @@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->base_tqp_pid = 0; hdev->rss_size_max = 1; hdev->rx_buf_len = cfg.rx_buf_len; - for (i = 0; i < ETH_ALEN; i++) - hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i]; + ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; + hdev->hw.mac.phy_addr = cfg.phy_addr; hdev->num_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; hdev->tm_info.num_tc = cfg.tc_num; @@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) tc_num = hclge_get_tc_num(hdev); pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + if (hnae3_dev_dcb_supported(hdev)) + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + else + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + shared_buf_tc = pfc_enable_num * hdev->mps + (tc_num - pfc_enable_num) * hdev->mps / 2 + hdev->mps; @@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) struct hclge_priv_buf *priv; int i; + /* When DCB is not supported, rx private + * buffer is not allocated. + */ + if (!hnae3_dev_dcb_supported(hdev)) { + if (!hclge_is_rx_buf_ok(hdev, rx_all)) + return -ENOMEM; + + return 0; + } + /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &hdev->priv_buf[i]; @@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) priv->wl.high = 2 * hdev->mps; priv->buf_size = priv->wl.high; } + } else { + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; } } @@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { priv = &hdev->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) - priv->enable = 1; + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = 128; @@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); } + req->shared_buf = + cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + (1 << HCLGE_TC0_PRI_BUF_EN_B)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev) return ret; } - ret = hclge_rx_priv_wl_config(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "could not configure rx private waterline %d\n", ret); - return ret; - } + if (hnae3_dev_dcb_supported(hdev)) { + ret = hclge_rx_priv_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", + ret); + return ret; + } - ret = hclge_common_thrd_config(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "could not configure common threshold %d\n", ret); - return ret; + ret = hclge_common_thrd_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", + ret); + return ret; + } } ret = hclge_common_wl_config(hdev); @@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM]; u32 *rss_indir = NULL; + u16 rss_size = 0, roundup_size; const u8 *key; int i, ret, j; @@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { vport[j].rss_indirection_tbl[i] = - i % hdev->rss_size_max; + i % vport[j].alloc_rss_size; + + /* vport 0 is for PF */ + if (j != 0) + continue; + + rss_size = vport[j].alloc_rss_size; rss_indir[i] = vport[j].rss_indirection_tbl[i]; } } @@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) if (ret) goto err; + /* Each TC have the same queue size, and tc_size set to hardware is + * the log2 of roundup power of two of rss_size, the acutal queue + * size is limited by indirection table. + */ + if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %d\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if (hdev->hw_tc_map & BIT(i)) - tc_valid[i] = 1; - else - tc_valid[i] = 0; + tc_valid[i] = 0; - switch (hdev->rss_size_max) { - case HCLGE_RSS_TC_SIZE_0: - tc_size[i] = 0; - break; - case HCLGE_RSS_TC_SIZE_1: - tc_size[i] = 1; - break; - case HCLGE_RSS_TC_SIZE_2: - tc_size[i] = 2; - break; - case HCLGE_RSS_TC_SIZE_3: - tc_size[i] = 3; - break; - case HCLGE_RSS_TC_SIZE_4: - tc_size[i] = 4; - break; - case HCLGE_RSS_TC_SIZE_5: - tc_size[i] = 5; - break; - case HCLGE_RSS_TC_SIZE_6: - tc_size[i] = 6; - break; - case HCLGE_RSS_TC_SIZE_7: - tc_size[i] = 7; - break; - default: - break; - } - tc_offset[i] = hdev->rss_size_max * i; + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = rss_size * i; } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); err: @@ -2679,7 +2699,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -2763,8 +2787,12 @@ static int hclge_unmap_ring_from_vector( hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, node->tqp_index); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -2778,7 +2806,7 @@ static int hclge_unmap_ring_from_vector( } i = 0; hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_ADD_RING_TO_VECTOR, + HCLGE_OPC_DEL_RING_TO_VECTOR, false); req->int_vector_id = vector_id; } @@ -3665,6 +3693,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_VLAN_TYPE_VF_TABLE 0 #define HCLGE_VLAN_TYPE_PORT_TABLE 1 + struct hnae3_handle *handle; int ret; ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, @@ -3674,8 +3703,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, true); + if (ret) + return ret; - return ret; + handle = &hdev->vport[0].nic; + return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) @@ -3920,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, goto err; if (hdev->roce_client && - hnae_get_bit(hdev->ae_dev->flag, - HNAE_DEV_SUPPORT_ROCE_B)) { + hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; ret = hclge_init_roce_base_info(vport); @@ -3944,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, break; case HNAE3_CLIENT_ROCE: - if (hnae_get_bit(hdev->ae_dev->flag, - HNAE_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_client = client; vport->roce.client = client; } @@ -4057,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; - const struct pci_device_id *id; struct hclge_dev *hdev; int ret; @@ -4072,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->ae_dev = ae_dev; ae_dev->priv = hdev; - id = pci_match_id(roce_pci_tbl, ae_dev->pdev); - if (id) - hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1); - ret = hclge_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI init failed\n"); @@ -4138,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_rss_init_hw(hdev); - if (ret) { - dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); - return ret; - } - ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -4156,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + setup_timer(&hdev->service_timer, hclge_service_timer, (unsigned long)hdev); INIT_WORK(&hdev->service_task, hclge_service_task); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index edb10ad075eb2520a3f671d8062f043ddb1c8280..9fcfd9395424538870f4b837ad9ade21476867ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -176,7 +176,6 @@ struct hclge_pg_info { struct hclge_tc_info { u8 tc_id; u8 tc_sch_mode; /* 0: sp; 1: dwrr */ - u8 up; u8 pgid; u32 bw_limit; }; @@ -197,6 +196,7 @@ struct hclge_tm_info { u8 num_tc; u8 num_pg; /* It must be 1 if vNET-Base schd */ u8 pg_dwrr[HCLGE_PG_NUM]; + u8 prio_tc[HNAE3_MAX_USER_PRIO]; struct hclge_pg_info pg_info[HCLGE_PG_NUM]; struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; @@ -477,6 +477,7 @@ struct hclge_vport { u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + u16 alloc_rss_size; u16 qs_offset; u16 bw_limit; /* VSI BW Limit (0 = disabled) */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1c577d268f008b4d39b35164b9205ba35eaefdaa..73a75d7cc5517819b8135324dbc481a0aa993423 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; - for (tc = 0; tc < hdev->tm_info.num_tc; tc++) - if (hdev->tm_info.tc_info[tc].up == pri_id) - break; + tc = hdev->tm_info.prio_tc[pri_id]; if (tc >= hdev->tm_info.num_tc) return -EINVAL; @@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); - for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { + for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { ret = hclge_fill_pri_array(hdev, pri, pri_id); if (ret) return ret; @@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pg_id = pg_id; - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); - hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_id = pri_id; - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); - hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); + hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps / kinfo->num_tc); vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; vport->dwrr = 100; /* 100 percent as init */ + vport->alloc_rss_size = kinfo->rss_size; for (i = 0; i < kinfo->num_tc; i++) { if (hdev->hw_tc_map & BIT(i)) { @@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size; kinfo->tc_info[i].tc = i; - kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up; } else { /* Set to default queue if TC is disable */ kinfo->tc_info[i].enable = false; kinfo->tc_info[i].tqp_offset = 0; kinfo->tc_info[i].tqp_count = 1; kinfo->tc_info[i].tc = 0; - kinfo->tc_info[i].up = 0; } } + + memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, + FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) @@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; - hdev->tm_info.tc_info[i].up = i; hdev->tm_info.tc_info[i].pgid = 0; hdev->tm_info.tc_info[i].bw_limit = hdev->tm_info.pg_info[0].bw_limit; } + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = + (i >= hdev->tm_info.num_tc) ? 0 : i; + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; } @@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (ret) return ret; + /* Only DCB-supported dev supports qset back pressure setting */ + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + for (i = 0; i < hdev->tm_info.num_tc; i++) { ret = hclge_tm_qs_bp_cfg(hdev, i); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 7e67337dfaf2c36e8c0ecc79f68aaa3f7bc7a32c..85158b0d73fe395ffa90de916c789e5afca5d15a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd { u32 rsvd1; }; -#define hclge_tm_set_feild(dest, string, val) \ +#define hclge_tm_set_field(dest, string, val) \ hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH), val) -#define hclge_tm_get_feild(src, string) \ +#define hclge_tm_get_field(src, string) \ hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 1c3e29447891644f48214aee3edceb9efd0ba1ef..35369e1c8036f3abbade76b63eed2a103170b4c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -41,11 +41,16 @@ static struct hnae3_client client; static const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ {0, } }; @@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); @@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev) eth_hw_addr_random(netdev); dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); - /* Also copy this new MAC address into hdev */ - if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); } + + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + } static void hns3_nic_set_priv_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 2c74baa2398a016fb9468f8a1b4b3aa5d0dc2a05..fff09dcf9e3463f8b10800d2754a4631770d2d63 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget) unsigned long flags; MAL_DBG2(mal, "poll(%d)" NL, budget); - again: + /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = @@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget) spin_lock_irqsave(&mal->lock, flags); mal_disable_eob_irq(mal); spin_unlock_irqrestore(&mal->lock, flags); - goto again; } mc->ops->poll_tx(mc->dev); } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 8a835e82256a286a26080b36e6c77528ed5b0127..eef35bf3e8490f3832e62270d129d12bf098fae3 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4193,7 +4193,7 @@ static struct pci_driver skge_driver = { .driver.pm = SKGE_PM_OPS, }; -static struct dmi_system_id skge_32bit_dma_boards[] = { +static const struct dmi_system_id skge_32bit_dma_boards[] = { { .ident = "Gigabyte nForce boards", .matches = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ed7cd6c48019ad1526a01050e5e648034b451a1a..696b99e65a5a60e6e74a55a0e9cc6e7c9c1e2348 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -575,15 +575,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) { - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; int i; for (i = 0; i < mlxsw_sp->span.entries_count; i++) { struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - if (curr->used && curr->local_port == port->local_port) + if (curr->used && curr->local_port == local_port) return curr; } return NULL; @@ -594,7 +593,8 @@ static struct mlxsw_sp_span_entry { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(port); + span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, + port->local_port); if (span_entry) { /* Already exists, just take a reference */ span_entry->ref_count++; @@ -783,12 +783,13 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, } static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, + u8 destination_port, enum mlxsw_sp_span_type type) { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find(to); + span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, + destination_port); if (!span_entry) { netdev_err(from->dev, "no span entry found\n"); return; @@ -1563,14 +1564,12 @@ static void mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; - to_port = mlxsw_sp->ports[mirror->to_local_port]; span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port, + span_type); } static int @@ -2545,7 +2544,9 @@ static int mlxsw_sp_flash_device(struct net_device *dev, return err; } -#define MLXSW_SP_QSFP_I2C_ADDR 0x50 +#define MLXSW_SP_I2C_ADDR_LOW 0x50 +#define MLXSW_SP_I2C_ADDR_HIGH 0x51 +#define MLXSW_SP_EEPROM_PAGE_LENGTH 256 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, u16 offset, u16 size, void *data, @@ -2554,12 +2555,25 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; + u16 i2c_addr; int status; int err; size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); + + if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && + offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; + + i2c_addr = MLXSW_SP_I2C_ADDR_LOW; + if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { + i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; + offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; + } + mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, - 0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR); + 0, 0, offset, size, i2c_addr); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0fb898533fbdf64c56594d7984ddc9ff874a6e9..2cfb3f5d092dbf80acaea7481248ecf11ebdd7bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4868,7 +4868,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; - if (!net_eq(info->net, &init_net)) + if (!net_eq(info->net, &init_net) || + (info->family != AF_INET && info->family != AF_INET6)) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d396183108f76dc25f0d28724c5bee42b1948666..a18b4d2b1d3ea2d86352c7f0d07f0a894e600ad2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -44,6 +44,16 @@ #include "../nfp_net.h" #include "../nfp_port.h" +#define NFP_FLOWER_WHITELIST_DISSECTOR \ + (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_BASIC) | \ + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_IP)) + static int nfp_flower_xmit_flow(struct net_device *netdev, struct nfp_fl_payload *nfp_flow, u8 mtype) @@ -112,6 +122,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, u8 key_layer; int key_size; + if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + return -EOPNOTSUPP; + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_control *mask_enc_ctl = diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index f055b1774d65312a492010dd92b790559435c71c..f8fa63b66739dfcc081b623ff7de717bb5928445 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -74,6 +74,45 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +static bool nfp_board_ready(struct nfp_pf *pf) +{ + const char *cp; + long state; + int err; + + cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); + if (!cp) + return false; + + err = kstrtol(cp, 0, &state); + if (err < 0) + return false; + + return state == 15; +} + +static int nfp_pf_board_state_wait(struct nfp_pf *pf) +{ + const unsigned long wait_until = jiffies + 10 * HZ; + + while (!nfp_board_ready(pf)) { + if (time_is_before_eq_jiffies(wait_until)) { + nfp_err(pf->cpp, "NFP board initialization timeout\n"); + return -EINVAL; + } + + nfp_info(pf->cpp, "waiting for board initialization\n"); + if (msleep_interruptible(500)) + return -ERESTARTSYS; + + /* Refresh cached information */ + kfree(pf->hwinfo); + pf->hwinfo = nfp_hwinfo_read(pf->cpp); + } + + return 0; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { int err; @@ -312,6 +351,10 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); + if (err) + return err; + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); @@ -425,6 +468,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); + err = nfp_pf_board_state_wait(pf); + if (err) + goto err_hwinfo_free; + err = devlink_register(devlink, &pdev->dev); if (err) goto err_hwinfo_free; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 5abb9ba31e7d23dd2f52a697c5249a1c63595436..ff373acd28f3fcf72bd17bc207486eaf9739337c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -64,23 +64,6 @@ #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) -static int nfp_is_ready(struct nfp_pf *pf) -{ - const char *cp; - long state; - int err; - - cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); - if (!cp) - return 0; - - err = kstrtol(cp, 0, &state); - if (err < 0) - return 0; - - return state == 15; -} - /** * nfp_net_get_mac_addr() - Get the MAC address. * @pf: NFP PF handle @@ -725,12 +708,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); - /* Verify that the board has completed initialization */ - if (!nfp_is_ready(pf)) { - nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); - return -EINVAL; - } - if (!pf->rtbl) { nfp_err(pf->cpp, "No %s, giving up.\n", pf->fw_loaded ? "symbol table" : "firmware found"); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 1a8d04a1e11327f8ec4411c258469bca728ffb32..3ce51f03126fa76f69c007ed88e76e9ee77bb174 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -97,6 +97,8 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); void nfp_resource_release(struct nfp_resource *res); +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); + u32 nfp_resource_cpp_id(struct nfp_resource *res); const char *nfp_resource_name(struct nfp_resource *res); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 072612263dabe92e76481a14cacebfa0cf513b4a..b1dd13ff282bba0c2cce9b666d4cf822e5160ce1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -249,6 +249,51 @@ void nfp_resource_release(struct nfp_resource *res) kfree(res); } +/** + * nfp_resource_wait() - Wait for resource to appear + * @cpp: NFP CPP handle + * @name: Name of the resource + * @secs: Number of seconds to wait + * + * Wait for resource to appear in the resource table, grab and release + * its lock. The wait is jiffies-based, don't expect fine granularity. + * + * Return: 0 on success, errno otherwise. + */ +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + secs * HZ; + struct nfp_resource *res; + + while (true) { + res = nfp_resource_acquire(cpp, name); + if (!IS_ERR(res)) { + nfp_resource_release(res); + return 0; + } + + if (PTR_ERR(res) != -ENOENT) { + nfp_err(cpp, "error waiting for resource %s: %ld\n", + name, PTR_ERR(res)); + return PTR_ERR(res); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(cpp, "timeout waiting for resource %s\n", name); + return -ETIMEDOUT; + } + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_info(cpp, "waiting for NFP resource %s\n", name); + } + if (msleep_interruptible(10)) { + nfp_err(cpp, "wait for resource %s interrupted\n", + name); + return -ERESTARTSYS; + } + } +} + /** * nfp_resource_cpp_id() - Return the cpp_id of a resource handle * @res: NFP Resource handle diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 89ab786da25f7545353ef9c1911cb9916dad3b57..4a67c55aa9f14ac61706562bc8c3908de63053df 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index eaca4578435d2f8b17a0ed5f68ce892e54e22e1c..8f6ccc0c39e5e8682aa7ac677f576b4cdd970d6f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1244,7 +1244,6 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, if (!dcbx_info) return -ENOMEM; - memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); if (rc) { kfree(dcbx_info); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index bbe24639aa5a6a85d2c96130eb89bf9a599be635..c8c6231b87f3305ae570d6c5277d91415ff696e8 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data) static int emac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { + case ETH_SS_PRIV_FLAGS: + return 1; case ETH_SS_STATS: return EMAC_STATS_LEN; default: @@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) unsigned int i; switch (stringset) { + case ETH_SS_PRIV_FLAGS: + strcpy(data, "single-pause-mode"); + break; + case ETH_SS_STATS: for (i = 0; i < EMAC_STATS_LEN; i++) { strlcpy(data, emac_ethtool_stat_strings[i], @@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev) return EMAC_MAX_REG_SIZE * sizeof(u32); } +#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0) + +static int emac_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +static u32 emac_get_priv_flags(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0; +} + static const struct ethtool_ops emac_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, @@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_regs_len = emac_get_regs_len, .get_regs = emac_get_regs, + + .set_priv_flags = emac_set_priv_flags, + .get_priv_flags = emac_get_priv_flags, }; void emac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index bcd4708b374574fb06faf28d9b0a6cc90bc9c56d..0ea3ca09c68980bd2624102f3e40cc257e23e174 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt) mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | DEBUG_MODE | SINGLE_PAUSE_MODE); + /* Enable single-pause-frame mode if requested. + * + * If enabled, the EMAC will send a single pause frame when the RX + * queue is full. This normally leads to packet loss because + * the pause frame disables the remote MAC only for 33ms (the quanta), + * and then the remote MAC continues sending packets even though + * the RX queue is still full. + * + * If disabled, the EMAC sends a pause frame every 31ms until the RX + * queue is no longer full. Normally, this is the preferred + * method of operation. However, when the system is hung (e.g. + * cores are halted), the EMAC interrupt handler is never called + * and so the RX queue fills up quickly and stays full. The resuling + * non-stop "flood" of pause frames sometimes has the effect of + * disabling nearby switches. In some cases, other nearby switches + * are also affected, shutting down the entire network. + * + * The user can enable or disable single-pause-frame mode + * via ethtool. + */ + mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0; + writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 60850bfa3d32e5d574bddacda871cb75da2076fd..759543512117cbfcc628697a54dd7af4ab600c61 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) /* default to automatic flow control */ adpt->automatic = true; + + /* Disable single-pause-frame mode by default */ + adpt->single_pause_mode = false; } /* Get the clock */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 8ee4ec6aef2e4379060f0726a75ab8fd8ad1690c..d7c9f44209d499cbd4f7ee7be32ca692f580b12c 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h @@ -363,6 +363,9 @@ struct emac_adapter { bool tx_flow_control; bool rx_flow_control; + /* True == use single-pause-frame mode. */ + bool single_pause_mode; + /* Ring parameter */ u8 tpd_burst; u8 rfd_burst; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 557c9bf1a46907f672110c3c00472ad81f8fdbcd..86b8c758f94e1fb19cc7d56e52b6b59260ce8565 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -84,6 +84,10 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) if (((int)skb->len - (int)packet_len) < 0) return NULL; + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); if (!skbn) return NULL; @@ -94,11 +98,5 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) memcpy(skbn->data, skb->data, packet_len); skb_pull(skb, packet_len); - /* Some hardware can send us empty frames. Catch them */ - if (ntohs(maph->pkt_len) == 0) { - kfree_skb(skb); - return NULL; - } - return skbn; } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ca22f2898664617656f7fa6e4e98df1e7aa3bc26..d24b47b8e0b27e0f44243f5a1011779c0ebd09f9 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) if (likely(RTL_R16(IntrStatus) & RxAckBits)) work_done += rtl8139_rx(dev, tp, budget); - if (work_done < budget && napi_complete_done(napi, work_done)) { + if (work_done < budget) { unsigned long flags; spin_lock_irqsave(&tp->lock, flags); - RTL_W16_F(IntrMask, rtl8139_intr_mask); + if (napi_complete_done(napi, work_done)) + RTL_W16_F(IntrMask, rtl8139_intr_mask); spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 0b6a39b003a4e188a2bdcbeab52d4761b6ec49fc..012fb66eed8dd618d63fbeaad184accb0c08fc39 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2595,6 +2595,11 @@ static int smsc911x_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct smsc911x_data *pdata = netdev_priv(ndev); + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + /* enable wake on LAN, energy detection and the external PME * signal. */ smsc911x_reg_write(pdata, PMT_CTRL, @@ -2628,7 +2633,15 @@ static int smsc911x_resume(struct device *dev) while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) udelay(1000); - return (to == 0) ? -EIO : 0; + if (to == 0) + return -EIO; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; } static const struct dev_pm_ops smsc911x_pm_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index a366b3747eeb58aebf730fde9c44c0a94af70491..8a280b48e3a9fd0c7ada914f2ceea3618e9e34cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, { .compatible = "allwinner,sun8i-h3-emac" }, { .compatible = "allwinner,sun8i-v3s-emac" }, { .compatible = "allwinner,sun50i-a64-emac" }, + {}, }; /* If phy-handle property is passed from DT, use it as the PHY */ diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index acd29d60174ad9770f27cfd3d3fb73198df102eb..83e6f76eb9654ee2c0ed3ab9a97b91e3cba7ffb5 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2598,7 +2598,7 @@ static struct platform_driver rhine_driver_platform = { } }; -static struct dmi_system_id rhine_dmi_table[] __initdata = { +static const struct dmi_system_id rhine_dmi_table[] __initconst = { { .ident = "EPIA-M", .matches = { diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ec546da86683296bfa120caa47be5030f793f168..5176be76ca7d57771d82829f8bd08b64dbdc7773 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -150,6 +150,8 @@ struct netvsc_device_info { u32 num_chn; u32 send_sections; u32 recv_sections; + u32 send_section_size; + u32 recv_section_size; }; enum rndis_device_state { @@ -204,6 +206,8 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); + +void rndis_set_subchannel(struct work_struct *w); bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); @@ -782,6 +786,7 @@ struct netvsc_device { u32 num_chn; atomic_t open_chn; + struct work_struct subchan_work; wait_queue_head_t subchan_open; struct rndis_device *extension; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0062b802676fb5c4318b0e14f96133b849b66bc2..8d5077fb04929cb362e7266b45b04c1369ed1284 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -76,11 +76,9 @@ static struct netvsc_device *alloc_net_device(void) net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; - net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE; - net_device->send_section_size = NETVSC_SEND_SECTION_SIZE; - init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); + INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); return net_device; } @@ -261,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device, int ret = 0; /* Get receive buffer area. */ - buf_size = device_info->recv_sections * net_device->recv_section_size; + buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); net_device->recv_buf = vzalloc(buf_size); @@ -343,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; /* Now setup the send buffer. */ - buf_size = device_info->send_sections * net_device->send_section_size; + buf_size = device_info->send_sections * device_info->send_section_size; buf_size = round_up(buf_size, PAGE_SIZE); net_device->send_buf = vzalloc(buf_size); @@ -557,6 +555,8 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; + cancel_work_sync(&net_device->subchan_work); + netvsc_disconnect_vsp(device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 165ba4b3b4232a796e704c40c580204fdb9d601b..a32ae02e1b6cb6fc2975b0e124aacb42370c0c5d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -49,7 +49,7 @@ #define NETVSC_MIN_TX_SECTIONS 10 #define NETVSC_DEFAULT_TX 192 /* ~1M */ #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 2048 /* ~4M */ +#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -848,15 +848,14 @@ static int netvsc_set_channels(struct net_device *net, device_info.num_chn = count; device_info.ring_size = ring_size; device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(dev, nvdev); nvdev = rndis_filter_device_add(dev, &device_info); - if (!IS_ERR(nvdev)) { - netif_set_real_num_tx_queues(net, nvdev->num_chn); - netif_set_real_num_rx_queues(net, nvdev->num_chn); - } else { + if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); device_info.num_chn = orig; nvdev = rndis_filter_device_add(dev, &device_info); @@ -966,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.send_sections = nvdev->send_section_cnt; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; + device_info.recv_section_size = nvdev->recv_section_size; rndis_filter_device_remove(hdev, nvdev); @@ -1488,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, device_info.num_chn = nvdev->num_chn; device_info.ring_size = ring_size; device_info.send_sections = new_tx; + device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = new_rx; + device_info.recv_section_size = nvdev->recv_section_size; netif_device_detach(ndev); was_opened = rndis_filter_opened(nvdev); @@ -1937,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev, device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; device_info.send_sections = NETVSC_DEFAULT_TX; + device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; device_info.recv_sections = NETVSC_DEFAULT_RX; + device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; nvdev = rndis_filter_device_add(dev, &device_info); if (IS_ERR(nvdev)) { @@ -1954,9 +1959,6 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - netif_set_real_num_tx_queues(net, nvdev->num_chn); - netif_set_real_num_rx_queues(net, nvdev->num_chn); - netdev_lockdep_set_classes(net); /* MTU range: 68 - 1500 or 65521 */ @@ -2012,9 +2014,10 @@ static int netvsc_remove(struct hv_device *dev) if (vf_netdev) netvsc_unregister_vf(vf_netdev); + unregister_netdevice(net); + rndis_filter_device_remove(dev, rtnl_dereference(ndev_ctx->nvdev)); - unregister_netdevice(net); rtnl_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 69c40b8fccc384512c0b04f2de0e7a8a00e9a178..065b204d8e17f6bde17be931ff8d1758f529e6cc 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1039,8 +1039,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) /* Set the channel before opening.*/ nvchan->channel = new_sc; - netif_napi_add(ndev, &nvchan->napi, - netvsc_poll, NAPI_POLL_WEIGHT); ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, @@ -1048,10 +1046,86 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) if (ret == 0) napi_enable(&nvchan->napi); else - netif_napi_del(&nvchan->napi); + netdev_notice(ndev, "sub channel open failed: %d\n", ret); - atomic_inc(&nvscdev->open_chn); - wake_up(&nvscdev->subchan_open); + if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) + wake_up(&nvscdev->subchan_open); +} + +/* Open sub-channels after completing the handling of the device probe. + * This breaks overlap of processing the host message for the + * new primary channel with the initialization of sub-channels. + */ +void rndis_set_subchannel(struct work_struct *w) +{ + struct netvsc_device *nvdev + = container_of(w, struct netvsc_device, subchan_work); + struct nvsp_message *init_packet = &nvdev->channel_init_pkt; + struct net_device_context *ndev_ctx; + struct rndis_device *rdev; + struct net_device *ndev; + struct hv_device *hv_dev; + int i, ret; + + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (!rdev) + goto unlock; /* device was removed */ + + ndev = rdev->ndev; + ndev_ctx = netdev_priv(ndev); + hv_dev = ndev_ctx->device_ctx; + + memset(init_packet, 0, sizeof(struct nvsp_message)); + init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; + init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; + init_packet->msg.v5_msg.subchn_req.num_subchannels = + nvdev->num_chn - 1; + ret = vmbus_sendpacket(hv_dev->channel, init_packet, + sizeof(struct nvsp_message), + (unsigned long)init_packet, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); + goto failed; + } + + wait_for_completion(&nvdev->channel_init_wait); + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, "sub channel request failed\n"); + goto failed; + } + + nvdev->num_chn = 1 + + init_packet->msg.v5_msg.subchn_comp.num_subchannels; + + /* wait for all sub channels to open */ + wait_event(nvdev->subchan_open, + atomic_read(&nvdev->open_chn) == nvdev->num_chn); + + /* ignore failues from setting rss parameters, still have channels */ + rndis_filter_set_rss_param(rdev, netvsc_hash_key); + + netif_set_real_num_tx_queues(ndev, nvdev->num_chn); + netif_set_real_num_rx_queues(ndev, nvdev->num_chn); + + rtnl_unlock(); + return; + +failed: + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; +unlock: + rtnl_unlock(); } struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -1063,7 +1137,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; - struct nvsp_message *init_packet; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; @@ -1215,9 +1288,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, net_device->num_chn); atomic_set(&net_device->open_chn, 1); - - if (net_device->num_chn == 1) - return net_device; + vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); for (i = 1; i < net_device->num_chn; i++) { ret = netvsc_alloc_recv_comp_ring(net_device, i); @@ -1228,38 +1299,15 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, } } - vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); + for (i = 1; i < net_device->num_chn; i++) + netif_napi_add(net, &net_device->chan_table[i].napi, + netvsc_poll, NAPI_POLL_WEIGHT); - init_packet = &net_device->channel_init_pkt; - memset(init_packet, 0, sizeof(struct nvsp_message)); - init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; - init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; - init_packet->msg.v5_msg.subchn_req.num_subchannels = - net_device->num_chn - 1; - ret = vmbus_sendpacket(dev->channel, init_packet, - sizeof(struct nvsp_message), - (unsigned long)init_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto out; - - wait_for_completion(&net_device->channel_init_wait); - if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { - ret = -ENODEV; - goto out; - } + if (net_device->num_chn > 1) + schedule_work(&net_device->subchan_work); - net_device->num_chn = 1 + - init_packet->msg.v5_msg.subchn_comp.num_subchannels; - - /* wait for all sub channels to open */ - wait_event(net_device->subchan_open, - atomic_read(&net_device->open_chn) == net_device->num_chn); - - /* ignore failues from setting rss parameters, still have channels */ - rndis_filter_set_rss_param(rndis_device, netvsc_hash_key); out: + /* if unavailable, just proceed with one queue */ if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; @@ -1280,10 +1328,10 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); - kfree(rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); + kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a9d16a3af514ead8f96a34db3d05478785cdf6d6..cd931cf9dcc262d3a1ccb4d03b5022b2180f1266 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -160,15 +160,6 @@ config MDIO_XGENE endif -menuconfig PHYLIB - tristate "PHY Device support and infrastructure" - depends on NETDEVICES - select MDIO_DEVICE - help - Ethernet controllers are usually attached to PHY - devices. This option provides infrastructure for - managing PHY devices. - config PHYLINK tristate depends on NETDEVICES @@ -179,6 +170,15 @@ config PHYLINK configuration links, PHYs, and Serdes links with MAC level autonegotiation modes. +menuconfig PHYLIB + tristate "PHY Device support and infrastructure" + depends on NETDEVICES + select MDIO_DEVICE + help + Ethernet controllers are usually attached to PHY + devices. This option provides infrastructure for + managing PHY devices. + if PHYLIB config SWPHY diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e842d2cd1ee750f8930028370c1c7fac5a52dc77..2b1e67bc1e736ceb33f7afa8462f5a4858b522df 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, cmd->base.port = PORT_BNC; else cmd->base.port = PORT_MII; - + cmd->base.transceiver = phy_is_internal(phydev) ? + XCVR_INTERNAL : XCVR_EXTERNAL; cmd->base.phy_address = phydev->mdio.addr; cmd->base.autoneg = phydev->autoneg; cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8cf0c5901f95870fc613edba0289594a5d40640a..67f25ac29025c53903cc724fac62efdd94828510 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; char *irq_str; - char irq_num[4]; + char irq_num[8]; switch(phydev->irq) { case PHY_POLL: diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index d15dd3938ba82624fd693b5a6f77301bb6d26bde..2e5150b0b8d52c5dd784a3df1818962d64972898 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) priv->phy_drv->read_status(phydev); val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); - val &= XILINX_GMII2RGMII_SPEED_MASK; + val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) val |= BMCR_SPEED1000; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b99a7fb09f8e31827a725151b415967699cdfa27..0161f77641fac8eafc1284a5457a7ccb273efc39 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; ee->magic = LAN78XX_EEPROM_MAGIC; - return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); + + usb_autopm_put_interface(dev->intf); + + return ret; } static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret) + return ret; - /* Allow entire eeprom update only */ - if ((ee->magic == LAN78XX_EEPROM_MAGIC) && - (ee->offset == 0) && - (ee->len == 512) && - (data[0] == EEPROM_INDICATOR)) - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + /* Invalid EEPROM_INDICATOR at offset zero will result in a failure + * to load data from EEPROM + */ + if (ee->magic == LAN78XX_EEPROM_MAGIC) + ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); else if ((ee->magic == LAN78XX_OTP_MAGIC) && (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) - return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); + ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); + + usb_autopm_put_interface(dev->intf); - return -EINVAL; + return ret; } static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, @@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 340c13484e5cc7dd5001577b7522d5a4318bd5b6..309b88acd3d0b6ca1528dde7b27a23926f9be952 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -526,7 +526,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev) static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { - u32 flow, afc_cfg = 0; + u32 flow = 0, afc_cfg; int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); if (ret < 0) @@ -537,20 +537,19 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, if (cap & FLOW_CTRL_RX) flow = 0xFFFF0002; - else - flow = 0; - if (cap & FLOW_CTRL_TX) + if (cap & FLOW_CTRL_TX) { afc_cfg |= 0xF; - else + flow |= 0xFFFF0000; + } else { afc_cfg &= ~0xF; + } netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", cap & FLOW_CTRL_RX ? "enabled" : "disabled", cap & FLOW_CTRL_TX ? "enabled" : "disabled"); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); - flow = 0; afc_cfg |= 0xF; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7e19051f32303195dc7d4ed9707f237e387a6e29..9b243e6f3008bb5319844412cd49db1cd7bce594 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -957,12 +957,12 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, { const struct ipv6hdr *iph = ipv6_hdr(skb); struct flowi6 fl6 = { + .flowi6_iif = ifindex, + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), - .flowi6_mark = skb->mark, - .flowi6_proto = iph->nexthdr, - .flowi6_iif = ifindex, }; struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 1427a386a033e72f1f8ec7b4fe02471e9d5d92fb..3e4d1e7998dacb13df8638763eb4fa0181e73357 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev) struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nd_namespace_index *nsindex; + /* + * If any of the DIMMs do not support labels the only + * possible BTT format is v1. + */ + if (!ndd) { + loop_bitmask = 0; + break; + } + nsindex = to_namespace_index(ndd, ndd->ns_current); if (nsindex == NULL) loop_bitmask |= 1; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index e9aa453da50c5a8c3d4ce9eac8cb7b7345d9ddcc..39dfd7affa319a3aa0599e6a1bfa6ade7ec3c5f1 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -262,16 +262,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return copy_from_iter_flushcache(addr, bytes, i); } -static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t size) -{ - arch_wb_cache_pmem(addr, size); -} - static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, .copy_from_iter = pmem_copy_from_iter, - .flush = pmem_dax_flush, }; static const struct attribute_group *pmem_attribute_groups[] = { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index acc816b67582f30524ad19f66843b071dfcef6ae..bb2aad07863736c44adbcad9f43f0efb14d17bb7 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req) return false; if (nvme_req(req)->status & NVME_SC_DNR) return false; - if (jiffies - req->start_time >= req->timeout) - return false; if (nvme_req(req)->retries >= nvme_max_retries) return false; return true; @@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); spin_lock_irq(&ctrl->lock); - while (ctrl->event_limit > 0) { + while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) { int aer_idx = --ctrl->event_limit; spin_unlock_irq(&ctrl->lock); @@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, /*FALLTHRU*/ case NVME_SC_ABORT_REQ: ++ctrl->event_limit; - queue_work(nvme_wq, &ctrl->async_event_work); + if (ctrl->state == NVME_CTRL_LIVE) + queue_work(nvme_wq, &ctrl->async_event_work); break; default: break; @@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, nvme_queue_scan(ctrl); break; case NVME_AER_NOTICE_FW_ACT_STARTING: - schedule_work(&ctrl->fw_act_work); + queue_work(nvme_wq, &ctrl->fw_act_work); break; default: dev_warn(ctrl->device, "async event result %08x\n", result); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 47307752dc65d3b9e78f7ab736cfc33831c6522b..555c976cc2ee7aae729c6465003b5cdffa11d594 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->queue_size = NVMF_DEF_QUEUE_SIZE; opts->nr_io_queues = num_online_cpus(); opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; + opts->kato = NVME_DEFAULT_KATO; options = o = kstrdup(buf, GFP_KERNEL); if (!options) @@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, goto out; } - if (opts->discovery_nqn) { - pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n"); - ret = -EINVAL; - goto out; - } - if (token < 0) { pr_err("Invalid keep_alive_tmo %d\n", token); ret = -EINVAL; goto out; - } else if (token == 0) { + } else if (token == 0 && !opts->discovery_nqn) { /* Allowed for debug */ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); } opts->kato = token; + + if (opts->discovery_nqn && opts->kato) { + pr_err("Discovery controllers cannot accept KATO != 0\n"); + ret = -EINVAL; + goto out; + } + break; case NVMF_OPT_CTRL_LOSS_TMO: if (match_int(args, &token)) { @@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, uuid_copy(&opts->host->id, &hostid); out: - if (!opts->discovery_nqn && !opts->kato) - opts->kato = NVME_DEFAULT_KATO; kfree(options); return ret; } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d2e882c0f4968e40eb256a3b0f08c8c200611810..af075e9989449e97d7b32038d8e30a1cf0875ad6 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); else if (freq->status) - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); /* * For the linux implementation, if we have an unsuccesful @@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) */ if (freq->transferred_length != be32_to_cpu(op->cmd_iu.data_len)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result.u64 = 0; @@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) freq->transferred_length || op->rsp_iu.status_code || sqe->common.command_id != cqe->command_id)) { - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } result = cqe->result; @@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) break; default: - status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); + status = cpu_to_le16(NVME_SC_INTERNAL << 1); goto done; } @@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, * as well as those by FC-NVME spec. */ WARN_ON_ONCE(sqe->common.metadata); - WARN_ON_ONCE(sqe->common.dptr.prp1); - WARN_ON_ONCE(sqe->common.dptr.prp2); sqe->common.flags |= NVME_CMD_SGL_METABUF; /* - * format SQE DPTR field per FC-NVME rules - * type=data block descr; subtype=offset; - * offset is currently 0. + * format SQE DPTR field per FC-NVME rules: + * type=0x5 Transport SGL Data Block Descriptor + * subtype=0xA Transport-specific value + * address=0 + * length=length of the data series */ - sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; + sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | + NVME_SGL_FMT_TRANSPORT_A; sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); sqe->rw.dptr.sgl.addr = 0; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4a2121335f48a0b4af31b413e04af8f9a6a52a2e..cb73bc8cad3bd2c332bdf3c7c4903e1b0c0f035c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif +static void nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " + "dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; bad_sgl: - if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->nents)) { - for_each_sg(iod->sg, sg, iod->nents, i) { - dma_addr_t phys = sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", i, &phys, - sg->offset, sg->length, - &sg_dma_address(sg), - sg_dma_len(sg)); - } - } + WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), iod->nents); return BLK_STS_IOERR; - } static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, @@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) if (result < 0) goto release_cq; + nvme_init_queue(nvmeq, qid); result = queue_request_irq(nvmeq); if (result < 0) goto release_sq; - nvme_init_queue(nvmeq, qid); return result; release_sq: @@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) return result; nvmeq->cq_vector = 0; + nvme_init_queue(nvmeq, 0); result = queue_request_irq(nvmeq); if (result) { nvmeq->cq_vector = -1; @@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; - nvme_init_queue(dev->queues[0], 0); result = nvme_alloc_admin_tags(dev); if (result) goto out; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 58983000964be4c2ce3280ca9f4dda768979ed76..92a03ff5fb4d4392c8726a8cd556d9476fc5c0a1 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - WARN_ON_ONCE(!changed); + if (!changed) { + /* state change failure is ok if we're in DELETING state */ + WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); + return; + } + ctrl->ctrl.nr_reconnects = 0; nvme_start_ctrl(&ctrl->ctrl); @@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, err_work); - nvme_stop_ctrl(&ctrl->ctrl); + nvme_stop_keep_alive(&ctrl->ctrl); if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 7c23eaf8e5639c14b5824fad26bdd4d6642e45b1..1b208beeef5097c6e4a2bd38e112543b8818af19 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) if (status) nvmet_set_status(req, status); - /* XXX: need to fill in something useful for sq_head */ - req->rsp->sq_head = 0; - if (likely(req->sq)) /* may happen during early failure */ - req->rsp->sq_id = cpu_to_le16(req->sq->qid); + if (req->sq->size) + req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; + req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); + req->rsp->sq_id = cpu_to_le16(req->sq->qid); req->rsp->command_id = req->cmd->common.command_id; if (req->ns) @@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size) { + sq->sqhd = 0; sq->qid = qid; sq->size = size; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 859a66725291d62bbf6f12a06b8486a9c5edbcc2..db3bf6b8bf9ee63581661310aa98820a7341445b 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue already connected!\n"); return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; } + if (!sqsize) { + pr_warn("queue size zero!\n"); + return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + } - nvmet_cq_setup(ctrl, req->cq, qid, sqsize); - nvmet_sq_setup(ctrl, req->sq, qid, sqsize); + /* note: convert queue size from 0's-based value to 1's-based value */ + nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); + nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); return 0; } diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 421e43bf1dd78f5d15f1ffb7cf88d36575ec3503..58e010bdda3ea5c155da5c268f542a6939e5c786 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc { u32 a_id; struct nvmet_fc_tgtport *tgtport; struct list_head a_list; - struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; }; @@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, unsigned long flags; int ret; - if (qid >= NVMET_NR_QUEUES) + if (qid > NVMET_NR_QUEUES) return NULL; queue = kzalloc((sizeof(*queue) + @@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, u16 qid = nvmet_fc_getqueueid(connection_id); unsigned long flags; + if (qid > NVMET_NR_QUEUES) + return NULL; + spin_lock_irqsave(&tgtport->lock, flags); list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { @@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) int i; spin_lock_irqsave(&tgtport->lock, flags); - for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { + for (i = NVMET_NR_QUEUES; i >= 0; i--) { queue = assoc->queues[i]; if (queue) { if (!nvmet_fc_tgt_q_get(queue)) @@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, spin_lock_irqsave(&fod->flock, flags); fod->writedataactive = false; spin_unlock_irqrestore(&fod->flock, flags); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { fcpreq->fcp_error = ret; fcpreq->transferred_length = 0; @@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) /* if in the middle of an io and we need to tear down */ if (abort) { if (fcpreq->op == NVMET_FCOP_WRITEDATA) { - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return true; } @@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) fod->abort = true; spin_unlock(&fod->flock); - nvmet_req_complete(&fod->req, - NVME_SC_FC_TRANSPORT_ERROR); + nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; } @@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_tgtport *tgtport = port->priv; unsigned long flags; + bool matched = false; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); if (tgtport->port == port) { - nvmet_fc_tgtport_put(tgtport); + matched = true; tgtport->port = NULL; } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (matched) + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1cb9847ec261182dd5fdc6da836d14fa1503ad9b..7b75d9de55ab0d33939bf314a81ee01e26a6d1b1 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -224,8 +224,6 @@ struct fcloop_nport { struct fcloop_lport *lport; struct list_head nport_list; struct kref ref; - struct completion rport_unreg_done; - struct completion tport_unreg_done; u64 node_name; u64 port_name; u32 port_role; @@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, tfcp_req->aborted = true; spin_unlock(&tfcp_req->reqlock); - tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; + tfcp_req->status = NVME_SC_INTERNAL; /* * nothing more to do. If io wasn't active, the transport should @@ -630,6 +628,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, schedule_work(&inireq->iniwork); } +static void +fcloop_nport_free(struct kref *ref) +{ + struct fcloop_nport *nport = + container_of(ref, struct fcloop_nport, ref); + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(nport); +} + +static void +fcloop_nport_put(struct fcloop_nport *nport) +{ + kref_put(&nport->ref, fcloop_nport_free); +} + +static int +fcloop_nport_get(struct fcloop_nport *nport) +{ + return kref_get_unless_zero(&nport->ref); +} + static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { @@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; - /* release any threads waiting for the unreg to complete */ - complete(&rport->nport->rport_unreg_done); + fcloop_nport_put(rport->nport); } static void @@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; - /* release any threads waiting for the unreg to complete */ - complete(&tport->nport->tport_unreg_done); + fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 @@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, goto out_free_opts; } + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = opts->wwnn; pinfo.port_name = opts->wwpn; pinfo.port_role = opts->roles; @@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, return ret ? ret : count; } -static void -fcloop_nport_free(struct kref *ref) -{ - struct fcloop_nport *nport = - container_of(ref, struct fcloop_nport, ref); - unsigned long flags; - - spin_lock_irqsave(&fcloop_lock, flags); - list_del(&nport->nport_list); - spin_unlock_irqrestore(&fcloop_lock, flags); - - kfree(nport); -} - -static void -fcloop_nport_put(struct fcloop_nport *nport) -{ - kref_put(&nport->ref, fcloop_nport_free); -} - -static int -fcloop_nport_get(struct fcloop_nport *nport) -{ - return kref_get_unless_zero(&nport->ref); -} - static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { @@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -EIO; + memset(&pinfo, 0, sizeof(pinfo)); pinfo.node_name = nport->node_name; pinfo.port_name = nport->port_name; pinfo.port_role = nport->port_role; @@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport) } static int -__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) +__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - int ret; - if (!rport) return -EALREADY; - init_completion(&nport->rport_unreg_done); - - ret = nvme_fc_unregister_remoteport(rport->remoteport); - if (ret) - return ret; - - wait_for_completion(&nport->rport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvme_fc_unregister_remoteport(rport->remoteport); } static ssize_t @@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); return ret ? ret : count; } @@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport) } static int -__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) +__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - int ret; - if (!tport) return -EALREADY; - init_completion(&nport->tport_unreg_done); - - ret = nvmet_fc_unregister_targetport(tport->targetport); - if (ret) - return ret; - - wait_for_completion(&nport->tport_unreg_done); - - fcloop_nport_put(nport); - - return ret; + return nvmet_fc_unregister_targetport(tport->targetport); } static ssize_t @@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (!nport) return -ENOENT; - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); return ret ? ret : count; } @@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_targetport_unreg(nport, tport); + ret = __targetport_unreg(nport, tport); if (ret) pr_warn("%s: Failed deleting target port\n", __func__); - ret = __wait_remoteport_unreg(nport, rport); + ret = __remoteport_unreg(nport, rport); if (ret) pr_warn("%s: Failed deleting remote port\n", __func__); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7d261ab894f47b56addb93fe95beee8fda37b8d3..7b8e20adf76029f293569688e8a5c71013c6393d 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -74,6 +74,7 @@ struct nvmet_sq { struct percpu_ref ref; u16 qid; u16 size; + u16 sqhd; struct completion free_done; struct completion confirm_done; }; diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 4ddc6e8f9fe7431ae24b7a2068ca2eb8a90e0b0e..f9308c2f22e6754d0b50fc627c11a96cf44af8f4 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -251,9 +251,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) return ret; } -static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) +static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq) { - u8 irq; u8 msi_count; struct pci_epf *epf = epf_test->epf; struct pci_epc *epc = epf->epc; @@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) reg->status |= STATUS_IRQ_RAISED; msi_count = pci_epc_get_msi(epc); - irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); else @@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->command = 0; reg->status = 0; + irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; + if (command & COMMAND_RAISE_LEGACY_IRQ) { reg->status = STATUS_IRQ_RAISED; pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); @@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_WRITE_FAIL; else reg->status |= STATUS_WRITE_SUCCESS; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } @@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_READ_SUCCESS; else reg->status |= STATUS_READ_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } @@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->status |= STATUS_COPY_SUCCESS; else reg->status |= STATUS_COPY_FAIL; - pci_epf_test_raise_irq(epf_test); + pci_epf_test_raise_irq(epf_test, irq); goto reset_handler; } if (command & COMMAND_RAISE_MSI_IRQ) { msi_count = pci_epc_get_msi(epc); - irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; if (irq > msi_count || msi_count <= 0) goto reset_handler; reg->status = STATUS_IRQ_RAISED; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 1eecfa301f7fb2efbfd025f70d39b7d159a42dde..8e075ea2743ef41b4ed08065f502527cbe804c82 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; /* We need to keep extra room for a newline */ if (count >= (PAGE_SIZE - 1)) @@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + ssize_t len; - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b0002daa50f3bb98dd0c0e40488e033f00a2ee0a..6078dfc11b112a88aec1c58eb911de9f996b6644 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -52,7 +52,6 @@ static void pci_pme_list_scan(struct work_struct *work); static LIST_HEAD(pci_pme_list); static DEFINE_MUTEX(pci_pme_list_mutex); static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); -static DEFINE_MUTEX(pci_bridge_mutex); struct pci_pme_device { struct list_head list; @@ -1351,16 +1350,10 @@ static void pci_enable_bridge(struct pci_dev *dev) if (bridge) pci_enable_bridge(bridge); - /* - * Hold pci_bridge_mutex to prevent a race when enabling two - * devices below the bridge simultaneously. The race may cause a - * PCI_COMMAND_MEMORY update to be lost (see changelog). - */ - mutex_lock(&pci_bridge_mutex); if (pci_is_enabled(dev)) { if (!dev->is_busmaster) pci_set_master(dev); - goto end; + return; } retval = pci_enable_device(dev); @@ -1368,8 +1361,6 @@ static void pci_enable_bridge(struct pci_dev *dev) dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval); pci_set_master(dev); -end: - mutex_unlock(&pci_bridge_mutex); } static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) @@ -1394,7 +1385,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) return 0; /* already enabled */ bridge = pci_upstream_bridge(dev); - if (bridge && !pci_is_enabled(bridge)) + if (bridge) pci_enable_bridge(bridge); /* only skip sriov related */ diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index be635f01775616c44fcca8c57e378df6446d4595..083276e03c38d924886a9e1d386deaee5b9e06e5 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -260,7 +260,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { +static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = { /* * Boxes that should not use MSI for PCIe PME signaling. */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a2afb44fad100ac77dfd9560a08243fe0f70561a..a4d33619a7bb67157f128f0009a58f9d21c7c727 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1707,7 +1707,7 @@ static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id boot_interrupt_dmi_table[] = { +static const struct dmi_system_id boot_interrupt_dmi_table[] = { /* * Systems to exclude from boot interrupt reroute quirks */ diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 944674ee34644fa2583f2193ad738a4b5cbe4912..19e17829f515ac5d317bc3dc124c9353e1e981ed 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -131,22 +131,27 @@ static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data) return IRQ_HANDLED; } +/* Db/Pb1200 have separate per-socket insertion and ejection + * interrupts which stay asserted as long as the card is + * inserted/missing. The one which caused us to be called + * needs to be disabled and the other one enabled. + */ static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data) +{ + disable_irq_nosync(irq); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t db1200_pcmcia_cdirq_fn(int irq, void *data) { struct db1x_pcmcia_sock *sock = data; - /* Db/Pb1200 have separate per-socket insertion and ejection - * interrupts which stay asserted as long as the card is - * inserted/missing. The one which caused us to be called - * needs to be disabled and the other one enabled. - */ - if (irq == sock->insert_irq) { - disable_irq_nosync(sock->insert_irq); + /* Wait a bit for the signals to stop bouncing. */ + msleep(100); + if (irq == sock->insert_irq) enable_irq(sock->eject_irq); - } else { - disable_irq_nosync(sock->eject_irq); + else enable_irq(sock->insert_irq); - } pcmcia_parse_events(&sock->socket, SS_DETECT); @@ -172,13 +177,13 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock) */ if ((sock->board_type == BOARD_TYPE_DB1200) || (sock->board_type == BOARD_TYPE_DB1300)) { - ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_insert", sock); + ret = request_threaded_irq(sock->insert_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_insert", sock); if (ret) goto out1; - ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, - 0, "pcmcia_eject", sock); + ret = request_threaded_irq(sock->eject_irq, db1200_pcmcia_cdirq, + db1200_pcmcia_cdirq_fn, 0, "pcmcia_eject", sock); if (ret) { free_irq(sock->insert_irq, sock); goto out1; diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 0a9b78705ee810c9e18c6fa1f46551fc27374287..3303dd8d8eb5718f96de5326949ba89a60e9f32e 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c @@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) ret = armpmu_register(pmu); if (ret) { pr_warn("Failed to register PMU for CPU%d\n", cpu); + kfree(pmu->name); return ret; } } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 441912c10b82bbeb664b53067ed0e17815a594df..5c8d452e35e2ea58251ba3160d6b9fef18870f3c 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -44,6 +44,7 @@ source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" source "drivers/phy/hisilicon/Kconfig" +source "drivers/phy/lantiq/Kconfig" source "drivers/phy/marvell/Kconfig" source "drivers/phy/mediatek/Kconfig" source "drivers/phy/motorola/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 06f3c500030d53866603fb4771057d50d5d2a893..3a52dcb09566822dfeea95988b4811a8d79c9982 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -6,9 +6,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o - obj-$(CONFIG_ARCH_SUNXI) += allwinner/ obj-$(CONFIG_ARCH_MESON) += amlogic/ +obj-$(CONFIG_LANTIQ) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_RENESAS) += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..326d88a6417dc92ca65d48cccd8483bf9d56c588 --- /dev/null +++ b/drivers/phy/lantiq/Kconfig @@ -0,0 +1,9 @@ +# +# Phy drivers for Lantiq / Intel platforms +# +config PHY_LANTIQ_RCU_USB2 + tristate "Lantiq XWAY SoC RCU based USB PHY" + depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST) + select GENERIC_PHY + help + Support for the USB PHY(s) on the Lantiq / Intel XWAY family SoCs. diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f73eb56a5416c2c59d2dd39c9ccb1f13af4edbf8 --- /dev/null +++ b/drivers/phy/lantiq/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c new file mode 100644 index 0000000000000000000000000000000000000000..986224fca9e913ca521695376bdb818cbec28612 --- /dev/null +++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c @@ -0,0 +1,254 @@ +/* + * Lantiq XWAY SoC RCU module based USB 1.1/2.0 PHY driver + * + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Transmitter HS Pre-Emphasis Enable */ +#define RCU_CFG1_TX_PEE BIT(0) +/* Disconnect Threshold */ +#define RCU_CFG1_DIS_THR_MASK 0x00038000 +#define RCU_CFG1_DIS_THR_SHIFT 15 + +struct ltq_rcu_usb2_bits { + u8 hostmode; + u8 slave_endianness; + u8 host_endianness; + bool have_ana_cfg; +}; + +struct ltq_rcu_usb2_priv { + struct regmap *regmap; + unsigned int phy_reg_offset; + unsigned int ana_cfg1_reg_offset; + const struct ltq_rcu_usb2_bits *reg_bits; + struct device *dev; + struct phy *phy; + struct clk *phy_gate_clk; + struct reset_control *ctrl_reset; + struct reset_control *phy_reset; +}; + +static const struct ltq_rcu_usb2_bits xway_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx100_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 17, + .host_endianness = 10, + .have_ana_cfg = false, +}; + +static const struct ltq_rcu_usb2_bits xrx200_rcu_usb2_reg_bits = { + .hostmode = 11, + .slave_endianness = 9, + .host_endianness = 10, + .have_ana_cfg = true, +}; + +static const struct of_device_id ltq_rcu_usb2_phy_of_match[] = { + { + .compatible = "lantiq,ase-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,danube-usb2-phy", + .data = &xway_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx100-usb2-phy", + .data = &xrx100_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx200-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { + .compatible = "lantiq,xrx300-usb2-phy", + .data = &xrx200_rcu_usb2_reg_bits, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ltq_rcu_usb2_phy_of_match); + +static int ltq_rcu_usb2_phy_init(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + if (priv->reg_bits->have_ana_cfg) { + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_TX_PEE, RCU_CFG1_TX_PEE); + regmap_update_bits(priv->regmap, priv->ana_cfg1_reg_offset, + RCU_CFG1_DIS_THR_MASK, 7 << RCU_CFG1_DIS_THR_SHIFT); + } + + /* Configure core to host mode */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->hostmode), 0); + + /* Select DMA endianness (Host-endian: big-endian) */ + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->slave_endianness), 0); + regmap_update_bits(priv->regmap, priv->phy_reg_offset, + BIT(priv->reg_bits->host_endianness), + BIT(priv->reg_bits->host_endianness)); + + return 0; +} + +static int ltq_rcu_usb2_phy_power_on(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + struct device *dev = priv->dev; + int ret; + + reset_control_deassert(priv->phy_reset); + + ret = clk_prepare_enable(priv->phy_gate_clk); + if (ret) + dev_err(dev, "failed to enable PHY gate\n"); + + return ret; +} + +static int ltq_rcu_usb2_phy_power_off(struct phy *phy) +{ + struct ltq_rcu_usb2_priv *priv = phy_get_drvdata(phy); + + reset_control_assert(priv->phy_reset); + + clk_disable_unprepare(priv->phy_gate_clk); + + return 0; +} + +static struct phy_ops ltq_rcu_usb2_phy_ops = { + .init = ltq_rcu_usb2_phy_init, + .power_on = ltq_rcu_usb2_phy_power_on, + .power_off = ltq_rcu_usb2_phy_power_off, + .owner = THIS_MODULE, +}; + +static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv, + struct platform_device *pdev) +{ + struct device *dev = priv->dev; + const __be32 *offset; + int ret; + + priv->reg_bits = of_device_get_match_data(dev); + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU PHY reg offset\n"); + return -ENOENT; + } + priv->phy_reg_offset = __be32_to_cpu(*offset); + + if (priv->reg_bits->have_ana_cfg) { + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(dev, "Failed to get RCU ANA CFG1 reg offset\n"); + return -ENOENT; + } + priv->ana_cfg1_reg_offset = __be32_to_cpu(*offset); + } + + priv->phy_gate_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->phy_gate_clk)) { + dev_err(dev, "Unable to get USB phy gate clk\n"); + return PTR_ERR(priv->phy_gate_clk); + } + + priv->ctrl_reset = devm_reset_control_get_shared(dev, "ctrl"); + if (IS_ERR(priv->ctrl_reset)) { + if (PTR_ERR(priv->ctrl_reset) != -EPROBE_DEFER) + dev_err(dev, "failed to get 'ctrl' reset\n"); + return PTR_ERR(priv->ctrl_reset); + } + + priv->phy_reset = devm_reset_control_get_optional(dev, "phy"); + if (IS_ERR(priv->phy_reset)) + return PTR_ERR(priv->phy_reset); + + return 0; +} + +static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ltq_rcu_usb2_priv *priv; + struct phy_provider *provider; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + + ret = ltq_rcu_usb2_of_parse(priv, pdev); + if (ret) + return ret; + + /* Reset USB core through reset controller */ + reset_control_deassert(priv->ctrl_reset); + + reset_control_assert(priv->phy_reset); + + priv->phy = devm_phy_create(dev, dev->of_node, <q_rcu_usb2_phy_ops); + if (IS_ERR(priv->phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(priv->phy); + } + + phy_set_drvdata(priv->phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return PTR_ERR(provider); + + dev_set_drvdata(priv->dev, priv); + return 0; +} + +static struct platform_driver ltq_rcu_usb2_phy_driver = { + .probe = ltq_rcu_usb2_phy_probe, + .driver = { + .name = "lantiq-rcu-usb2-phy", + .of_match_table = ltq_rcu_usb2_phy_of_match, + } +}; +module_platform_driver(ltq_rcu_usb2_phy_driver); + +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY USB2 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index b8b6ab072cd0702f1368645798a3b766d51f2425..71b944748304e2d14b2633b8e4c22b9760f10d4f 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -550,9 +550,9 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); if (on) - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); else - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); writel(val, info->base + reg); spin_unlock_irqrestore(&info->irq_lock, flags); @@ -571,10 +571,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) val = readl(info->base + reg); switch (type) { case IRQ_TYPE_EDGE_RISING: - val &= ~d->mask; + val &= ~(BIT(d->hwirq % GPIO_PER_REG)); break; case IRQ_TYPE_EDGE_FALLING: - val |= d->mask; + val |= (BIT(d->hwirq % GPIO_PER_REG)); break; default: spin_unlock_irqrestore(&info->irq_lock, flags); @@ -624,11 +624,27 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static unsigned int armada_37xx_irq_startup(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + int irq = d->hwirq - chip->irq_base; + /* + * The mask field is a "precomputed bitmask for accessing the + * chip registers" which was introduced for the generic + * irqchip framework. As we don't use this framework, we can + * reuse this field for our own usage. + */ + d->mask = BIT(irq % GPIO_PER_REG); + + armada_37xx_irq_unmask(d); + + return 0; +} + static int armada_37xx_irqchip_register(struct platform_device *pdev, struct armada_37xx_pinctrl *info) { struct device_node *np = info->dev->of_node; - int nrirqs = info->data->nr_pins; struct gpio_chip *gc = &info->gpio_chip; struct irq_chip *irqchip = &info->irq_chip; struct resource res; @@ -666,8 +682,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, irqchip->irq_unmask = armada_37xx_irq_unmask; irqchip->irq_set_wake = armada_37xx_irq_set_wake; irqchip->irq_set_type = armada_37xx_irq_set_type; + irqchip->irq_startup = armada_37xx_irq_startup; irqchip->name = info->data->name; - ret = gpiochip_irqchip_add(gc, irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { @@ -680,19 +696,6 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, * controller. But we do not take advantage of this and use * the chained irq with all of them. */ - for (i = 0; i < nrirqs; i++) { - struct irq_data *d = irq_get_irq_data(gc->irq_base + i); - - /* - * The mask field is a "precomputed bitmask for - * accessing the chip registers" which was introduced - * for the generic irqchip framework. As we don't use - * this framework, we can reuse this field for our own - * usage. - */ - d->mask = BIT(i % GPIO_PER_REG); - } - for (i = 0; i < nr_irq_parent; i++) { int irq = irq_of_parse_and_map(np, i); diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 38af1ec2df0c2b9c172915906f0a63864b65f1b5..3f6b34febbf11249cf2a30e400647038f4a66f33 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -36,6 +36,7 @@ #include #include +#include "core.h" #include "pinctrl-utils.h" #include "pinctrl-amd.h" @@ -725,6 +726,69 @@ static const struct pinconf_ops amd_pinconf_ops = { .pin_config_group_set = amd_pinconf_group_set, }; +#ifdef CONFIG_PM_SLEEP +static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) +{ + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); + + if (!pd) + return false; + + /* + * Only restore the pin if it is actually in use by the kernel (or + * by userspace). + */ + if (pd->mux_owner || pd->gpio_owner || + gpiochip_line_is_irq(&gpio_dev->gc, pin)) + return true; + + return false; +} + +int amd_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4); + } + + return 0; +} + +int amd_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev); + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + int i; + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + + if (!amd_gpio_should_save(gpio_dev, pin)) + continue; + + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4); + } + + return 0; +} + +static const struct dev_pm_ops amd_gpio_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend, + amd_gpio_resume) +}; +#endif + static struct pinctrl_desc amd_pinctrl_desc = { .pins = kerncz_pins, .npins = ARRAY_SIZE(kerncz_pins), @@ -764,6 +828,14 @@ static int amd_gpio_probe(struct platform_device *pdev) return irq_base; } +#ifdef CONFIG_PM_SLEEP + gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins, + sizeof(*gpio_dev->saved_regs), + GFP_KERNEL); + if (!gpio_dev->saved_regs) + return -ENOMEM; +#endif + gpio_dev->pdev = pdev; gpio_dev->gc.direction_input = amd_gpio_direction_input; gpio_dev->gc.direction_output = amd_gpio_direction_output; @@ -853,6 +925,9 @@ static struct platform_driver amd_gpio_driver = { .driver = { .name = "amd_gpio", .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match), +#ifdef CONFIG_PM_SLEEP + .pm = &amd_gpio_pm_ops, +#endif }, .probe = amd_gpio_probe, .remove = amd_gpio_remove, diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 5b1cb965c767038869a4df99919043c60b26f553..8fa453a59da5e657a3cd7732171a639f6fd57d19 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -97,6 +97,7 @@ struct amd_gpio { unsigned int hwbank_num; struct resource *res; struct platform_device *pdev; + u32 *saved_regs; }; /* KERNCZ configuration*/ diff --git a/drivers/pinctrl/sprd/Kconfig b/drivers/pinctrl/sprd/Kconfig index 6f4a7f9ac6fd65d689d69dee216a165aeee35766..bc7f3fab22f1c43142e260313d99ebb2be6f4320 100644 --- a/drivers/pinctrl/sprd/Kconfig +++ b/drivers/pinctrl/sprd/Kconfig @@ -4,6 +4,8 @@ config PINCTRL_SPRD bool "Spreadtrum pinctrl driver" + depends on OF + depends on ARCH_SPRD || COMPILE_TEST select PINMUX select PINCONF select GENERIC_PINCONF @@ -13,5 +15,6 @@ config PINCTRL_SPRD config PINCTRL_SPRD_SC9860 bool "Spreadtrum SC9860 pinctrl driver" + depends on PINCTRL_SPRD help Say Y here to enable Spreadtrum SC9860 pinctrl driver diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c index 7e7b9ac7e83612dbef6a8817d9667cafc8b00efe..63529911445c7815bd5b455db7d73fe03c328a77 100644 --- a/drivers/pinctrl/sprd/pinctrl-sprd.c +++ b/drivers/pinctrl/sprd/pinctrl-sprd.c @@ -353,13 +353,13 @@ static const struct pinctrl_ops sprd_pctrl_ops = { .dt_free_map = pinctrl_utils_free_map, }; -int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) +static int sprd_pmx_get_function_count(struct pinctrl_dev *pctldev) { return PIN_FUNC_MAX; } -const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, - unsigned int selector) +static const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, + unsigned int selector) { switch (selector) { case PIN_FUNC_1: @@ -375,10 +375,10 @@ const char *sprd_pmx_get_function_name(struct pinctrl_dev *pctldev, } } -int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, - unsigned int selector, - const char * const **groups, - unsigned int * const num_groups) +static int sprd_pmx_get_function_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int * const num_groups) { struct sprd_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sprd_pinctrl_soc_info *info = pctl->info; @@ -400,7 +400,7 @@ static int sprd_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned long reg; unsigned int val = 0; - if (group_selector > info->ngroups) + if (group_selector >= info->ngroups) return -EINVAL; switch (func_selector) { @@ -734,7 +734,7 @@ static int sprd_pinconf_group_get(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; unsigned int pin_id; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -753,7 +753,7 @@ static int sprd_pinconf_group_set(struct pinctrl_dev *pctldev, struct sprd_pin_group *grp; int ret, i; - if (selector > info->ngroups) + if (selector >= info->ngroups) return -EINVAL; grp = &info->groups[selector]; @@ -813,7 +813,7 @@ static void sprd_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, const char *name; int i, ret; - if (selector > info->ngroups) + if (selector >= info->ngroups) return; grp = &info->groups[selector]; @@ -1100,12 +1100,16 @@ int sprd_pinctrl_remove(struct platform_device *pdev) void sprd_pinctrl_shutdown(struct platform_device *pdev) { - struct pinctrl *pinctl = devm_pinctrl_get(&pdev->dev); + struct pinctrl *pinctl; struct pinctrl_state *state; + pinctl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(pinctl)) + return; state = pinctrl_lookup_state(pinctl, "shutdown"); - if (!IS_ERR(state)) - pinctrl_select_state(pinctl, state); + if (IS_ERR(state)) + return; + pinctrl_select_state(pinctl, state); } MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver"); diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h index c075ecb8e5dbf6967d84aa5559a7bfc333cb6af9..0a3d2ac275033245fc2649db03e5956a4ff862fb 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h @@ -17,7 +17,7 @@ #define __PINCTRL_UNIPHIER_H__ #include -#include +#include #include #include diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index e8a44a9bc916d9ff5a335bf74c9b0e979b26a90c..d8599736a41a268eba0eeeabb594d4d21499901f 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -518,7 +518,7 @@ static struct chromeos_laptop cr48 = { .callback = chromeos_laptop_dmi_matched, \ .driver_data = (void *)&board_ -static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = { { .ident = "Samsung Series 5 550", .matches = { diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index 308a853ac4f19ac3a126821b34d10f46ddb8776d..b0693fdec8c6a700acdfdc1f704bea97c3abfd68 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -14,7 +14,7 @@ #include #include -static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = { +static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 2b6436d1b6a47f447146b805f72a698ceabd492b..1baf720faf690abda2b769a338b6bd73782738c8 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -329,7 +329,7 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); -static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { +static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { { /* * Today all Chromebooks/boxes ship with Google_* as version and diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index a8e4a539e704e2f85c8adb2193b12fd457575a84..6bcb750e18652d9c0cd95d9b9691eea959f023b0 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -805,7 +805,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata compal_dmi_table[] = { +static const struct dmi_system_id compal_dmi_table[] __initconst = { { .ident = "FL90/IFL90", .matches = { diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 85de30f93a9cc45c1fe0d634c7f687947e1a39bb..56a8195096a229c6975d3f78746ecbc4c6169660 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b) { struct acpi_device *device = bl_get_data(b); - if (b->props.power == FB_BLANK_POWERDOWN) - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); - else - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + if (fext) { + if (b->props.power == FB_BLANK_POWERDOWN) + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); + else + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); + } return set_lcd_level(device, b->props.brightness); } diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 458e6c948c11e4b6d6e24b67535020e0047d6f21..c26baf77938eaba1657f9cda929f13aadf777822 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id) "ThinkPad T42p", so the order of the entries matters. If your ThinkPad is not recognized, please update to latest BIOS. This is especially the case for some R52 ThinkPads. */ -static struct dmi_system_id __initdata hdaps_whitelist[] = { +static const struct dmi_system_id hdaps_whitelist[] __initconst = { HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"), HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"), diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 610ac8391caa3f14d0ad66583d965099c5ccf28c..18d55cee5bcde5d5f1cd0248373598ccae9e8713 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) { } -static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { +static const struct dmi_system_id ibm_rtl_dmi_table[] __initconst = { { \ .matches = { \ DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 6aa33c4a809fedb28e727d00cf599defd2156e23..5747f63c8d9f490c51edb31f5fc836ee89dab33f 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -299,7 +299,7 @@ static int dmi_check_cb(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id __initdata oaktrail_dmi_table[] = { +static const struct dmi_system_id oaktrail_dmi_table[] __initconst = { { .ident = "OakTrail platform", .matches = { diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8f98c211b440c0a7658d54980daa5fc46a195da3..4f3de2a8c4dfe4fd594a44d9fa9081a9157b9069 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -247,7 +247,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) return 1; }; -static struct dmi_system_id mlxplat_dmi_table[] __initdata = { +static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { .callback = mlxplat_dmi_default_matched, .matches = { diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 61b9014d26106707a65b651ddf3ba313f96b9c1c..d5bfcc602090a33dc6c360c671f11b9a54f33b04 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -605,7 +605,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi) return 1; } -static struct dmi_system_id __initdata msi_dmi_table[] = { +static const struct dmi_system_id msi_dmi_table[] __initconst = { { .ident = "MSI S270", .matches = { diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 0c703feaeb88feb80b5c904481d25222a6b7d801..d3cb26f6df736026631e86e91ac5b7dd3b77c49b 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id __initdata samsung_dmi_table[] = { +static const struct dmi_system_id samsung_dmi_table[] __initconst = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index e6aac725a0af75af40952aa542bc610fc5ce6d0d..a2fb7fbc327314c90409903cfa64403e1eebe579 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id __initdata samsungq10_dmi_table[] = { +static const struct dmi_system_id samsungq10_dmi_table[] __initconst = { { .ident = "Samsung Q10", .matches = { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index bfae79534f44afd9615c7506e79b8052f64117aa..a16cea2be9c34a0c83d17a6ce7a35a1704956682 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4880,7 +4880,7 @@ static struct acpi_driver sony_pic_driver = { .drv.pm = &sony_pic_pm, }; -static struct dmi_system_id __initdata sonypi_dmi_table[] = { +static const struct dmi_system_id sonypi_dmi_table[] __initconst = { { .ident = "Sony Vaio", .matches = { diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c index 4405286761709a159db314fa30c336c3c5a188ac..03d7620cd6d7c55019897948f877f7729d6546e5 100644 --- a/drivers/platform/x86/toshiba-wmi.c +++ b/drivers/platform/x86/toshiba-wmi.c @@ -64,7 +64,7 @@ static void toshiba_wmi_notify(u32 value, void *context) kfree(response.pointer); } -static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = { +static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = { { .ident = "Toshiba laptop", .matches = { diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 0ced908e7aa8b39a7a56f19762737d6178ca309d..e681140b85d8f8f2811d10311e901b95e73cb859 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -495,7 +495,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id pnpbios_dmi_table[] __initdata = { +static const struct dmi_system_id pnpbios_dmi_table[] __initconst = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 52d5251660b9b51c3c55710d38c90716c2c47976..e0c393214264adad04a08fb6e48fc34266144e54 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -47,6 +47,12 @@ config RESET_IMX7 help This enables the reset controller driver for i.MX7 SoCs. +config RESET_LANTIQ + bool "Lantiq XWAY Reset Driver" if COMPILE_TEST + default SOC_TYPE_XWAY + help + This enables the reset controller driver for Lantiq / Intel XWAY SoCs. + config RESET_LPC18XX bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST default ARCH_LPC18XX diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index b62783f50fe5b0c0b29b6152c2ddbd3f5e06cc21..d368367110e55780c5154995011cd193c709ab49 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o +obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o diff --git a/drivers/reset/reset-lantiq.c b/drivers/reset/reset-lantiq.c new file mode 100644 index 0000000000000000000000000000000000000000..11a582e50d3076dc5b10b8c9427d4a27a5b2379f --- /dev/null +++ b/drivers/reset/reset-lantiq.c @@ -0,0 +1,212 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define LANTIQ_RCU_RESET_TIMEOUT 10000 + +struct lantiq_rcu_reset_priv { + struct reset_controller_dev rcdev; + struct device *dev; + struct regmap *regmap; + u32 reset_offset; + u32 status_offset; +}; + +static struct lantiq_rcu_reset_priv *to_lantiq_rcu_reset_priv( + struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct lantiq_rcu_reset_priv, rcdev); +} + +static int lantiq_rcu_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int status = (id >> 8) & 0x1f; + u32 val; + int ret; + + ret = regmap_read(priv->regmap, priv->status_offset, &val); + if (ret) + return ret; + + return !!(val & BIT(status)); +} + +static int lantiq_rcu_reset_status_timeout(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + int ret; + int retry = LANTIQ_RCU_RESET_TIMEOUT; + + do { + ret = lantiq_rcu_reset_status(rcdev, id); + if (ret < 0) + return ret; + if (ret == assert) + return 0; + usleep_range(20, 40); + } while (--retry); + + return -ETIMEDOUT; +} + +static int lantiq_rcu_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev); + unsigned int set = id & 0x1f; + u32 val = assert ? BIT(set) : 0; + int ret; + + ret = regmap_update_bits(priv->regmap, priv->reset_offset, BIT(set), + val); + if (ret) { + dev_err(priv->dev, "Failed to set reset bit %u\n", set); + return ret; + } + + + ret = lantiq_rcu_reset_status_timeout(rcdev, id, assert); + if (ret) + dev_err(priv->dev, "Failed to %s bit %u\n", + assert ? "assert" : "deassert", set); + + return ret; +} + +static int lantiq_rcu_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, true); +} + +static int lantiq_rcu_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return lantiq_rcu_reset_update(rcdev, id, false); +} + +static int lantiq_rcu_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = lantiq_rcu_reset_assert(rcdev, id); + if (ret) + return ret; + + return lantiq_rcu_reset_deassert(rcdev, id); +} + +static const struct reset_control_ops lantiq_rcu_reset_ops = { + .assert = lantiq_rcu_reset_assert, + .deassert = lantiq_rcu_reset_deassert, + .status = lantiq_rcu_reset_status, + .reset = lantiq_rcu_reset_reset, +}; + +static int lantiq_rcu_reset_of_parse(struct platform_device *pdev, + struct lantiq_rcu_reset_priv *priv) +{ + struct device *dev = &pdev->dev; + const __be32 *offset; + + priv->regmap = syscon_node_to_regmap(dev->of_node->parent); + if (IS_ERR(priv->regmap)) { + dev_err(&pdev->dev, "Failed to lookup RCU regmap\n"); + return PTR_ERR(priv->regmap); + } + + offset = of_get_address(dev->of_node, 0, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU reset offset\n"); + return -ENOENT; + } + priv->reset_offset = __be32_to_cpu(*offset); + + offset = of_get_address(dev->of_node, 1, NULL, NULL); + if (!offset) { + dev_err(&pdev->dev, "Failed to get RCU status offset\n"); + return -ENOENT; + } + priv->status_offset = __be32_to_cpu(*offset); + + return 0; +} + +static int lantiq_rcu_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + unsigned int status, set; + + set = reset_spec->args[0]; + status = reset_spec->args[1]; + + if (set >= rcdev->nr_resets || status >= rcdev->nr_resets) + return -EINVAL; + + return (status << 8) | set; +} + +static int lantiq_rcu_reset_probe(struct platform_device *pdev) +{ + struct lantiq_rcu_reset_priv *priv; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + err = lantiq_rcu_reset_of_parse(pdev, priv); + if (err) + return err; + + priv->rcdev.ops = &lantiq_rcu_reset_ops; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.of_node = pdev->dev.of_node; + priv->rcdev.nr_resets = 32; + priv->rcdev.of_xlate = lantiq_rcu_reset_xlate; + priv->rcdev.of_reset_n_cells = 2; + + return reset_controller_register(&priv->rcdev); +} + +static const struct of_device_id lantiq_rcu_reset_dt_ids[] = { + { .compatible = "lantiq,danube-reset", }, + { .compatible = "lantiq,xrx200-reset", }, + { }, +}; +MODULE_DEVICE_TABLE(of, lantiq_rcu_reset_dt_ids); + +static struct platform_driver lantiq_rcu_reset_driver = { + .probe = lantiq_rcu_reset_probe, + .driver = { + .name = "lantiq-reset", + .of_match_table = lantiq_rcu_reset_dt_ids, + }, +}; +module_platform_driver(lantiq_rcu_reset_driver); + +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY RCU Reset Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index ea19b4ff87a2f00837abd83c8e22a9e21016ff21..29f35e29d4801f83aa74b0a590bdb4412a9caa7c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device) dasd_schedule_device_bh(device); if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } } EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); @@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device) dasd_schedule_device_bh(device); if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } if (!device->stopped) @@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev) if (device->block) { dasd_schedule_block_bh(device->block); - blk_mq_run_hw_queues(device->block->request_queue, true); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, + true); } clear_bit(DASD_FLAG_SUSPENDED, &device->flags); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2e7fd966c515107714a4e612aaec5f36a57747b8..eb51893c74a4ba4053fe8d15e064fbf42bed9845 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq) static void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; - int *error; + blk_status_t *error; int i; for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { @@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) static void scm_blk_request_done(struct request *req) { - int *error = blk_mq_rq_to_pdu(req); + blk_status_t *error = blk_mq_rq_to_pdu(req); blk_mq_end_request(req, *error); } @@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) atomic_set(&bdev->queued_reqs, 0); bdev->tag_set.ops = &scm_mq_ops; - bdev->tag_set.cmd_size = sizeof(int); + bdev->tag_set.cmd_size = sizeof(blk_status_t); bdev->tag_set.nr_hw_queues = nr_requests; bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 489b583f263d2806db03a909b1c5d92002e7b848..e5c32f4b5287ebc4da569d403475d5624bac43ef 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev) static int recovery_check(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); + struct subchannel *sch; int *redo = data; spin_lock_irq(cdev->ccwlock); switch (cdev->private->state) { + case DEV_STATE_ONLINE: + sch = to_subchannel(cdev->dev.parent); + if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) + break; + /* fall through */ case DEV_STATE_DISCONNECTED: CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, @@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused) } spin_unlock_irq(&recovery_lock); } else - CIO_MSG_EVENT(4, "recovery: end\n"); + CIO_MSG_EVENT(3, "recovery: end\n"); } static DECLARE_WORK(recovery_work, recovery_work_func); @@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data) schedule_work(&recovery_work); } -static void ccw_device_schedule_recovery(void) +void ccw_device_schedule_recovery(void) { unsigned long flags; - CIO_MSG_EVENT(4, "recovery: schedule\n"); + CIO_MSG_EVENT(3, "recovery: schedule\n"); spin_lock_irqsave(&recovery_lock, flags); if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { recovery_phase = 0; diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index ec497af99dd8acfb74c388f990201214ee02713e..69cb70f080a59a46d0c71dbd06a003b527e850d4 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev); void ccw_device_set_notoper(struct ccw_device *cdev); void ccw_device_set_timeout(struct ccw_device *, int); +void ccw_device_schedule_recovery(void); /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 12016e32e5193796b471eb5430281bea5b6423f7..f98ea674c3d8054390a5486802fedd482f0dbe16 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type) } } +static void ccw_device_handle_broken_paths(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; + + if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) + ccw_device_schedule_recovery(); + + cdev->private->path_broken_mask = broken_paths; +} + void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -508,6 +519,7 @@ void ccw_device_verify_done(struct ccw_device *cdev, int err) memset(&cdev->private->irb, 0, sizeof(struct irb)); } ccw_device_report_path_events(cdev); + ccw_device_handle_broken_paths(cdev); break; case -ETIME: case -EUSERS: diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 220f49145b2f9bf48e29f92dc06f94ab7a9a0ef2..9a1b56b2df3e5e3eac8e2ad35b5def3bb149b44c 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -131,6 +131,8 @@ struct ccw_device_private { not operable */ u8 path_gone_mask; /* mask of paths, that became unavailable */ u8 path_new_mask; /* mask of paths, that became available */ + u8 path_broken_mask; /* mask of paths, which were found to be + unusable */ struct { unsigned int fast:1; /* post with "channel end" */ unsigned int repall:1; /* report every interrupt status */ diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a64285ab0728f14c870db3d72b19190d516d44f3..af3e4d3f9735fdc3430eea0ea05cc1a78e2fa306 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - if (!(fibptr->dev->supplement_adapter_info.supported_options2 & - AAC_OPTION_VARIABLE_BLOCK_SIZE)) + if (!aac_supports_2T(fibptr->dev)) { dresp->mnt[0].capacityhigh = 0; - if ((le32_to_cpu(dresp->status) != ST_OK) || - (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { - _aac_probe_container2(context, fibptr); - return; + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { + _aac_probe_container2(context, fibptr); + return; + } } scsicmd = (struct scsi_cmnd *) context; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 92fabf2b0c24c4c24f79ae8626d80ef09853816d..403a639574e5ea10c5c8500141204ebc513bd7c1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev) return 0; } +static inline int aac_supports_2T(struct aac_dev *dev) +{ + return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64); +} + char * get_container_type(unsigned type); extern int numacb; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 87cc4a93e637e6db517c12b0c01bb212eb2e8445..62beb259646692c26ea9a05a72cfe26019d24eed 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); - info = &aac->hba_map[bus][cid]; - if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || - info->devtype != AAC_DEVTYPE_NATIVE_RAW) + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; - if (info->reset_state > 0) + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) return FAILED; pr_err("%s: Host adapter reset request. SCSI hang ?\n", @@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) bus = aac_logical_to_phys(scmd_channel(cmd)); cid = scmd_id(cmd); - info = &aac->hba_map[bus][cid]; - if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || - info->devtype != AAC_DEVTYPE_NATIVE_RAW) + + if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) return FAILED; - if (info->reset_state > 0) + info = &aac->hba_map[bus][cid]; + + if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && + info->reset_state > 0) return FAILED; pr_err("%s: Host adapter reset request. SCSI hang ?\n", diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 48c2b2b34b7222ae656ab4f0c9113d59a0d58d24..0c9361c87ec8de8b853f6ccaa6132663a4b982bd 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev) aac_set_intx_mode(dev); src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); + + msleep(5000); } static void aac_send_hardware_soft_reset(struct aac_dev *dev) diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 690816f3c6af9a67ff38e801a4e9c440b763f114..421fe869a11ef0cbdb8130aa7b8ebdffbc775236 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) * Params : SCpnt - command causing reset * Returns : one of SCSI_RESET_ macros */ -int acornscsi_host_reset(struct Scsi_Host *shpnt) +int acornscsi_host_reset(struct scsi_cmnd *SCpnt) { - AS_Host *host = (AS_Host *)shpnt->hostdata; + AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; struct scsi_cmnd *SCptr; host->stats.resets += 1; @@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt) printk(KERN_WARNING "acornscsi_reset: "); print_sbic_status(asr, ssr, host->scsi.phase); - for (devidx = 0; devidx < 9; devidx ++) { + for (devidx = 0; devidx < 9; devidx++) acornscsi_dumplog(host, devidx); } #endif diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7e7ae786121b6e8458f7a7aa8954141c77e223a5..100bc4c8798d76852adb9224edc763f70f0741ff 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) "Extents and RPI headers enabled.\n"); } mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; goto out_free_bsmbx; } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 79ba3ce063a4f847ce8187d3290ecf5a309f62b9..23bdb1ca106e459355aed0fa7627d2665a7312a3 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -884,7 +884,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, wcqe->total_data_placed); nCmd->transferred_length = 0; nCmd->rcv_rsplen = 0; - nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; + nCmd->status = NVME_SC_INTERNAL; } } diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 1f59e7a74c7b7b9dc7db660e6114420bc90ce8b7..6b33a1f24f56169a3a17803e5a6952da2e6445a6 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res) goto rel; if (unlikely(res == QLA_FUNCTION_FAILED)) - fd->status = NVME_SC_FC_TRANSPORT_ERROR; + fd->status = NVME_SC_INTERNAL; else fd->status = 0; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 38942050b2656bd104abb465a79b29c4e7039d43..dab876c6547392c0ccb75047c4cb767640407834 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd) if (sshdr.asc == 0x20 || /* Invalid command operation code */ sshdr.asc == 0x21 || /* Logical block address out of range */ sshdr.asc == 0x24 || /* Invalid field in cdb */ - sshdr.asc == 0x26) { /* Parameter value invalid */ + sshdr.asc == 0x26 || /* Parameter value invalid */ + sshdr.asc == 0x27) { /* Write protected */ set_host_byte(scmd, DID_TARGET_FAILURE); } return SUCCESS; diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3c6bc0081fcbe34afcfc5c398a5637a2d31ae3ab..cbd4495d0ff9dedb92852d029845418e1a173072 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, list_for_each_entry(rport, &fc_host->rports, peers) { - if ((rport->port_state == FC_PORTSTATE_BLOCKED) && + if ((rport->port_state == FC_PORTSTATE_BLOCKED || + rport->port_state == FC_PORTSTATE_NOTPRESENT) && (rport->channel == channel)) { switch (fc_host->tgtid_bind_type) { @@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; - rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; @@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, fci->f->dd_fcrport_size); spin_unlock_irqrestore(shost->host_lock, flags); - if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { - scsi_target_unblock(&rport->dev, SDEV_RUNNING); - - /* initiate a scan of the target */ - spin_lock_irqsave(shost->host_lock, flags); - rport->flags |= FC_RPORT_SCAN_PENDING; - scsi_queue_work(shost, &rport->scan_work); - spin_unlock_irqrestore(shost->host_lock, flags); - } + fc_remote_port_rolechg(rport, ids->roles); return rport; } } @@ -3571,7 +3563,7 @@ fc_vport_sched_delete(struct work_struct *work) static enum blk_eh_timer_return fc_bsg_job_timeout(struct request *req) { - struct bsg_job *job = (void *) req->special; + struct bsg_job *job = blk_mq_rq_to_pdu(req); struct Scsi_Host *shost = fc_bsg_to_shost(job); struct fc_rport *rport = fc_bsg_to_rport(job); struct fc_internal *i = to_fc_internal(shost->transportt); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 8934f19bce8ea815d696189e7bb075c43cffaa5c..0190aeff5f7fde124da9e5d621a8e20d1f0b3eae 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3689,7 +3689,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t group; nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || skb->len < nlh->nlmsg_len) { break; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 11c1738c21000862718ec4f60a663409a11c0b6a..fb9f8b5f4673c0883eefbd91242441e37d3d7189 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2915,8 +2915,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); - else if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); else sd_config_discard(sdkp, SD_LBP_DISABLE); } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cf0e71db9e5146068294ab46fb1db1fa69addbc9..0419c2298eabdb8f80897357a04be66e85e9a4ac 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q) return max_sectors << 9; } +static void +sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) +{ + Sg_request *srp; + int val; + unsigned int ms; + + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if (val > SG_MAX_QUEUE) + break; + rinfo[val].req_state = srp->done + 1; + rinfo[val].problem = + srp->header.masked_status & + srp->header.host_status & + srp->header.driver_status; + if (srp->done) + rinfo[val].duration = + srp->header.duration; + else { + ms = jiffies_to_msecs(jiffies); + rinfo[val].duration = + (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; + } + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + val++; + } +} + static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { @@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -EFAULT; else { sg_req_info_t *rinfo; - unsigned int ms; - rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, - GFP_KERNEL); + rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, + GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); - val = 0; - list_for_each_entry(srp, &sfp->rq_list, entry) { - if (val >= SG_MAX_QUEUE) - break; - memset(&rinfo[val], 0, SZ_SG_REQ_INFO); - rinfo[val].req_state = srp->done + 1; - rinfo[val].problem = - srp->header.masked_status & - srp->header.host_status & - srp->header.driver_status; - if (srp->done) - rinfo[val].duration = - srp->header.duration; - else { - ms = jiffies_to_msecs(jiffies); - rinfo[val].duration = - (ms > srp->header.duration) ? - (ms - srp->header.duration) : 0; - } - rinfo[val].orphan = srp->orphan; - rinfo[val].sg_io_owned = srp->sg_io_owned; - rinfo[val].pack_id = srp->header.pack_id; - rinfo[val].usr_ptr = srp->header.usr_ptr; - val++; - } + sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 280a6a91a9e26f1f8aa07b510d07ae698fde07b4..2fcaff8645846ca19202306ecc91f5b37c1bf441 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ obj-$(CONFIG_ARCH_MXC) += imx/ +obj-$(CONFIG_SOC_XWAY) += lantiq/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_MESON) += amlogic/ obj-$(CONFIG_ARCH_QCOM) += qcom/ diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..be9e866d53e5022f59d73737cbdc4503c32f3f83 --- /dev/null +++ b/drivers/soc/lantiq/Makefile @@ -0,0 +1,2 @@ +obj-y += fpi-bus.o +obj-$(CONFIG_XRX200_PHY_FW) += gphy.o diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c new file mode 100644 index 0000000000000000000000000000000000000000..a671c9984c4cbf721441ee75fc17978d8464ad9b --- /dev/null +++ b/drivers/soc/lantiq/fpi-bus.c @@ -0,0 +1,87 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011-2015 John Crispin + * Copyright (C) 2015 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define XBAR_ALWAYS_LAST 0x430 +#define XBAR_FPI_BURST_EN BIT(1) +#define XBAR_AHB_BURST_EN BIT(2) + +#define RCU_VR9_BE_AHB1S 0x00000008 + +static int ltq_fpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res_xbar; + struct regmap *rcu_regmap; + void __iomem *xbar_membase; + u32 rcu_ahb_endianness_reg_offset; + int ret; + + res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xbar_membase = devm_ioremap_resource(dev, res_xbar); + if (IS_ERR(xbar_membase)) + return PTR_ERR(xbar_membase); + + /* RCU configuration is optional */ + rcu_regmap = syscon_regmap_lookup_by_phandle(np, "lantiq,rcu"); + if (IS_ERR(rcu_regmap)) + return PTR_ERR(rcu_regmap); + + ret = device_property_read_u32(dev, "lantiq,offset-endianness", + &rcu_ahb_endianness_reg_offset); + if (ret) { + dev_err(&pdev->dev, "Failed to get RCU reg offset\n"); + return ret; + } + + ret = regmap_update_bits(rcu_regmap, rcu_ahb_endianness_reg_offset, + RCU_VR9_BE_AHB1S, RCU_VR9_BE_AHB1S); + if (ret) { + dev_warn(&pdev->dev, + "Failed to configure RCU AHB endianness\n"); + return ret; + } + + /* disable fpi burst */ + ltq_w32_mask(XBAR_FPI_BURST_EN, 0, xbar_membase + XBAR_ALWAYS_LAST); + + return of_platform_populate(dev->of_node, NULL, NULL, dev); +} + +static const struct of_device_id ltq_fpi_match[] = { + { .compatible = "lantiq,xrx200-fpi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ltq_fpi_match); + +static struct platform_driver ltq_fpi_driver = { + .probe = ltq_fpi_probe, + .driver = { + .name = "fpi-xway", + .of_match_table = ltq_fpi_match, + }, +}; + +module_platform_driver(ltq_fpi_driver); + +MODULE_DESCRIPTION("Lantiq FPI bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c new file mode 100644 index 0000000000000000000000000000000000000000..8d8659463b3e85f4e1513007dde398ced1bc3e6a --- /dev/null +++ b/drivers/soc/lantiq/gphy.c @@ -0,0 +1,260 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2012 John Crispin + * Copyright (C) 2016 Martin Blumenstingl + * Copyright (C) 2017 Hauke Mehrtens + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define XRX200_GPHY_FW_ALIGN (16 * 1024) + +struct xway_gphy_priv { + struct clk *gphy_clk_gate; + struct reset_control *gphy_reset; + struct reset_control *gphy_reset2; + struct notifier_block gphy_reboot_nb; + void __iomem *membase; + char *fw_name; +}; + +struct xway_gphy_match_data { + char *fe_firmware_name; + char *ge_firmware_name; +}; + +static const struct xway_gphy_match_data xrx200a1x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", +}; + +static const struct xway_gphy_match_data xrx200a2x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", +}; + +static const struct xway_gphy_match_data xrx300_gphy_data = { + .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", + .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", +}; + +static const struct of_device_id xway_gphy_match[] = { + { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data }, + { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data }, + { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data }, + { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, xway_gphy_match); + +static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) +{ + return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); +} + +static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, + unsigned long code, void *unused) +{ + struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); + + if (priv) { + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + } + + return NOTIFY_DONE; +} + +static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, + dma_addr_t *dev_addr) +{ + const struct firmware *fw; + void *fw_addr; + dma_addr_t dma_addr; + size_t size; + int ret; + + ret = request_firmware(&fw, priv->fw_name, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, error: %i\n", + priv->fw_name, ret); + return ret; + } + + /* + * GPHY cores need the firmware code in a persistent and contiguous + * memory area with a 16 kB boundary aligned start address. + */ + size = fw->size + XRX200_GPHY_FW_ALIGN; + + fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); + if (fw_addr) { + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); + *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); + memcpy(fw_addr, fw->data, fw->size); + } else { + dev_err(dev, "failed to alloc firmware memory\n"); + ret = -ENOMEM; + } + + release_firmware(fw); + + return ret; +} + +static int xway_gphy_of_probe(struct platform_device *pdev, + struct xway_gphy_priv *priv) +{ + struct device *dev = &pdev->dev; + const struct xway_gphy_match_data *gphy_fw_name_cfg; + u32 gphy_mode; + int ret; + struct resource *res_gphy; + + gphy_fw_name_cfg = of_device_get_match_data(dev); + + priv->gphy_clk_gate = devm_clk_get(dev, NULL); + if (IS_ERR(priv->gphy_clk_gate)) { + dev_err(dev, "Failed to lookup gate clock\n"); + return PTR_ERR(priv->gphy_clk_gate); + } + + res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->membase = devm_ioremap_resource(dev, res_gphy); + if (IS_ERR(priv->membase)) + return PTR_ERR(priv->membase); + + priv->gphy_reset = devm_reset_control_get(dev, "gphy"); + if (IS_ERR(priv->gphy_reset)) { + if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER) + dev_err(dev, "Failed to lookup gphy reset\n"); + return PTR_ERR(priv->gphy_reset); + } + + priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2"); + if (IS_ERR(priv->gphy_reset2)) + return PTR_ERR(priv->gphy_reset2); + + ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode); + /* Default to GE mode */ + if (ret) + gphy_mode = GPHY_MODE_GE; + + switch (gphy_mode) { + case GPHY_MODE_FE: + priv->fw_name = gphy_fw_name_cfg->fe_firmware_name; + break; + case GPHY_MODE_GE: + priv->fw_name = gphy_fw_name_cfg->ge_firmware_name; + break; + default: + dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); + return -EINVAL; + } + + return 0; +} + +static int xway_gphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv; + dma_addr_t fw_addr = 0; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = xway_gphy_of_probe(pdev, priv); + if (ret) + return ret; + + ret = clk_prepare_enable(priv->gphy_clk_gate); + if (ret) + return ret; + + ret = xway_gphy_load(dev, priv, &fw_addr); + if (ret) { + clk_disable_unprepare(priv->gphy_clk_gate); + return ret; + } + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(fw_addr, priv->membase); + + reset_control_deassert(priv->gphy_reset); + reset_control_deassert(priv->gphy_reset2); + + /* assert the gphy reset because it can hang after a reboot: */ + priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; + priv->gphy_reboot_nb.priority = -1; + + ret = register_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to register reboot notifier\n"); + + platform_set_drvdata(pdev, priv); + + return ret; +} + +static int xway_gphy_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct xway_gphy_priv *priv = platform_get_drvdata(pdev); + int ret; + + reset_control_assert(priv->gphy_reset); + reset_control_assert(priv->gphy_reset2); + + iowrite32be(0, priv->membase); + + clk_disable_unprepare(priv->gphy_clk_gate); + + ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); + if (ret) + dev_warn(dev, "Failed to unregister reboot notifier\n"); + + return 0; +} + +static struct platform_driver xway_gphy_driver = { + .probe = xway_gphy_probe, + .remove = xway_gphy_remove, + .driver = { + .name = "xway-rcu-gphy", + .of_match_table = xway_gphy_match, + }, +}; + +module_platform_driver(xway_gphy_driver); + +MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 6ba270e0494ddc689eb7be7a12eecc819bc1ac7b..0f695df14c9d8f0a5c0a95ea8c28dc075c2aa36c 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -294,19 +294,9 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } -/** - * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file - * @file: The associated backing file. - * @buf: The buffer of data being written to - * @len: The number of bytes being read - * @pos: The position of the first byte to read. - * - * Return: 0 if successful, or another return code if not. - */ -static ssize_t ashmem_read(struct file *file, char __user *buf, - size_t len, loff_t *pos) +static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) { - struct ashmem_area *asma = file->private_data; + struct ashmem_area *asma = iocb->ki_filp->private_data; int ret = 0; mutex_lock(&ashmem_mutex); @@ -320,20 +310,17 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, goto out_unlock; } - mutex_unlock(&ashmem_mutex); - /* * asma and asma->file are used outside the lock here. We assume * once asma->file is set it will never be changed, and will not * be destroyed until all references to the file are dropped and * ashmem_release is called. */ - ret = __vfs_read(asma->file, buf, len, pos); - if (ret >= 0) - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; - return ret; - + mutex_unlock(&ashmem_mutex); + ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); + mutex_lock(&ashmem_mutex); + if (ret > 0) + asma->file->f_pos = iocb->ki_pos; out_unlock: mutex_unlock(&ashmem_mutex); return ret; @@ -834,7 +821,7 @@ static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, - .read = ashmem_read, + .read_iter = ashmem_read_iter, .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c index 0d33e520f6350f599d8e2d3b7a6a4a2e561ce960..cc18e25103ca5976dd914de4b2021db28c10b6e6 100644 --- a/drivers/staging/comedi/drivers/serial2002.c +++ b/drivers/staging/comedi/drivers/serial2002.c @@ -106,16 +106,8 @@ static long serial2002_tty_ioctl(struct file *f, unsigned int op, static int serial2002_tty_write(struct file *f, unsigned char *buf, int count) { - const char __user *p = (__force const char __user *)buf; - int result; - loff_t offset = 0; - mm_segment_t oldfs; - - oldfs = get_fs(); - set_fs(KERNEL_DS); - result = __vfs_write(f, p, count, &offset); - set_fs(oldfs); - return result; + loff_t pos = 0; + return kernel_write(f, buf, count, &pos); } static void serial2002_tty_read_poll_wait(struct file *f, int timeout) @@ -148,19 +140,14 @@ static int serial2002_tty_read(struct file *f, int timeout) { unsigned char ch; int result; + loff_t pos = 0; result = -1; if (!IS_ERR(f)) { - mm_segment_t oldfs; - char __user *p = (__force char __user *)&ch; - loff_t offset = 0; - - oldfs = get_fs(); - set_fs(KERNEL_DS); if (f->f_op->poll) { serial2002_tty_read_poll_wait(f, timeout); - if (__vfs_read(f, p, 1, &offset) == 1) + if (kernel_read(f, &ch, 1, &pos) == 1) result = ch; } else { /* Device does not support poll, busy wait */ @@ -171,14 +158,13 @@ static int serial2002_tty_read(struct file *f, int timeout) if (retries >= timeout) break; - if (__vfs_read(f, p, 1, &offset) == 1) { + if (kernel_read(f, &ch, 1, &pos) == 1) { result = ch; break; } usleep_range(100, 1000); } } - set_fs(oldfs); } return result; } diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 68f283a2744c307d01fedf4531e0ddf0490a7246..f916b475e767be6c5770010fc92196a6bc17957d 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -731,8 +731,7 @@ int cfs_tracefile_dump_all_pages(char *filename) __LASSERT_TAGE_INVARIANT(tage); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &filp->f_pos); + rc = kernel_write(filp, buf, tage->used, &filp->f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -976,7 +975,6 @@ static int tracefiled(void *arg) struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - mm_segment_t __oldfs; struct file *filp; char *buf; int last_loop = 0; @@ -1014,8 +1012,6 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } - __oldfs = get_fs(); - set_fs(get_ds()); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; @@ -1028,8 +1024,7 @@ static int tracefiled(void *arg) f_pos = i_size_read(file_inode(filp)); buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &f_pos); + rc = kernel_write(filp, buf, tage->used, &f_pos); kunmap(tage->page); if (rc != (int)tage->used) { @@ -1040,7 +1035,6 @@ static int tracefiled(void *arg) break; } } - set_fs(__oldfs); filp_close(filp, NULL); put_pages_on_daemon_list(&pc); diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index d855129768f8ff8ed99bde2cd365b3f125422f18..25393e3a0fe89b012273381ff4916c77b8c8668e 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -210,7 +210,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, data->ocd_ibits_known = MDS_INODELOCK_FULL; data->ocd_version = LUSTRE_VERSION_CODE; - if (sb->s_flags & MS_RDONLY) + if (sb_rdonly(sb)) data->ocd_connect_flags |= OBD_CONNECT_RDONLY; if (sbi->ll_flags & LL_SBI_USER_XATTR) data->ocd_connect_flags |= OBD_CONNECT_XATTR; @@ -2031,7 +2031,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data) int err; __u32 read_only; - if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) { read_only = *flags & MS_RDONLY; err = obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_READ_ONLY), diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 4897dbd3286d4c818d890b6a17532d1a12edec4d..5cc2b325520767ec77c106ec276f3e0fa71bb056 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -561,8 +561,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, } } - if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && - dentry->d_sb->s_flags & MS_RDONLY) + if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb)) return ERR_PTR(-EROFS); if (it->it_op & IT_CREAT) diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c index 8f0707a27a836df5cff134edc4f14def4b36fb8b..4f0a42633d5af99c43a4edf3a6b25c328825edcd 100644 --- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -52,7 +52,6 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) struct kuc_hdr *kuch = (struct kuc_hdr *)payload; ssize_t count = kuch->kuc_msglen; loff_t offset = 0; - mm_segment_t fs; int rc = -ENXIO; if (IS_ERR_OR_NULL(filp)) @@ -63,18 +62,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) return rc; } - fs = get_fs(); - set_fs(KERNEL_DS); while (count > 0) { - rc = vfs_write(filp, (void __force __user *)payload, - count, &offset); + rc = kernel_write(filp, payload, count, &offset); if (rc < 0) break; count -= rc; payload += rc; rc = 0; } - set_fs(fs); if (rc < 0) CWARN("message send failed (%d)\n", rc); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index a91b7c25ffd411abba6932d0c5e6ce9556cce657..928127642574b2d4b90592dfcd3688477a7d7a96 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -896,13 +896,14 @@ static int core_alua_write_tpg_metadata( u32 md_buf_len) { struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); + loff_t pos = 0; int ret; if (IS_ERR(file)) { pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } - ret = kernel_write(file, md_buf, md_buf_len, 0); + ret = kernel_write(file, md_buf, md_buf_len, &pos); if (ret < 0) pr_err("Error writing ALUA metadata file: %s\n", path); fput(file); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 24cf11d9e50a5b457aae518e5e2c045705786a69..c629817a8854bea49a18c6b3f93f3f8923f99079 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -443,7 +443,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, for (prot = 0; prot < prot_length;) { sector_t len = min_t(sector_t, bufsize, prot_length - prot); - ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot); + ssize_t ret = kernel_write(prot_fd, buf, len, &pos); if (ret != len) { pr_err("vfs_write to prot file failed: %zd\n", ret); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 6d5def64db61d98be33effdacb9deaa55c2147a0..dd2cd8048582ce7520661b5ec3477a8e431b0f86 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1974,6 +1974,7 @@ static int __core_scsi3_write_aptpl_to_file( char path[512]; u32 pr_aptpl_buf_len; int ret; + loff_t pos = 0; memset(path, 0, 512); @@ -1993,7 +1994,7 @@ static int __core_scsi3_write_aptpl_to_file( pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */ - ret = kernel_write(file, buf, pr_aptpl_buf_len, 0); + ret = kernel_write(file, buf, pr_aptpl_buf_len, &pos); if (ret < 0) pr_debug("Error writing APTPL metadata file: %s\n", path); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index ae8cfc81ffc5880d067369009d2925330fbb3fcd..d9123f9957058f4870008f2a5e1cf57368b377aa 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -371,7 +371,7 @@ static const struct file_operations port_regs_ops = { }; #endif /* CONFIG_DEBUG_FS */ -static struct dmi_system_id pch_uart_dmi_table[] = { +static const struct dmi_system_id pch_uart_dmi_table[] = { { .ident = "CM-iTC", { diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index f95bddd6513f7e88e4d511508afef2b9c84c063b..d6bd0244b008aa93f91e63049fe96fbc0b9eccfe 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -686,9 +686,8 @@ static int do_read(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *)bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nread); if (signal_pending(current)) @@ -883,8 +882,8 @@ static int do_write(struct fsg_common *common) /* Perform the write */ file_offset_tmp = file_offset; - nwritten = vfs_write(curlun->filp, (char __user *)bh->buf, - amount, &file_offset_tmp); + nwritten = kernel_write(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, (unsigned long long)file_offset, (int)nwritten); if (signal_pending(current)) @@ -1021,9 +1020,8 @@ static int do_verify(struct fsg_common *common) /* Perform the read */ file_offset_tmp = file_offset; - nread = vfs_read(curlun->filp, - (char __user *) bh->buf, - amount, &file_offset_tmp); + nread = kernel_read(curlun->filp, bh->buf, amount, + &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); @@ -2453,13 +2451,6 @@ static int fsg_main_thread(void *common_) /* Allow the thread to be frozen */ set_freezable(); - /* - * Arrange for userspace references to be interpreted as kernel - * pointers. That way we can pass a kernel pointer to a routine - * that expects a __user pointer and it will work okay. - */ - set_fs(get_ds()); - /* The main loop */ while (common->state != FSG_STATE_TERMINATED) { if (exception_in_progress(common) || signal_pending(current)) { diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 84a110a719cbd73256c326b13fe19619b6cfde74..96312c3afc079ce0bc13c7d8039c40f857edeedc 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 -static struct dmi_system_id kb3886bl_device_table[] __initdata = { +static const struct dmi_system_id kb3886bl_device_table[] __initconst = { { .ident = "Sahara Touch-iT", .matches = { diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 2111d06f8c81a5260235befb029d59901e7bde28..7f1f1fbcef9e269d1b6e48b25bdeb51b0e68426a 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -117,7 +117,7 @@ config DUMMY_CONSOLE_ROWS Select 25 if you use a 640x480 resolution by default. config FRAMEBUFFER_CONSOLE - tristate "Framebuffer Console support" + bool "Framebuffer Console support" depends on FB && !UML select VT_HW_CONSOLE_BINDING select CRC32 diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile index 43bfa485db9687411411e1fecdef4c29565c7ab6..eb2cbec5264340300078fa3b74de31ce28885aaa 100644 --- a/drivers/video/console/Makefile +++ b/drivers/video/console/Makefile @@ -7,13 +7,5 @@ obj-$(CONFIG_SGI_NEWPORT_CONSOLE) += newport_con.o obj-$(CONFIG_STI_CONSOLE) += sticon.o sticore.o obj-$(CONFIG_VGA_CONSOLE) += vgacon.o obj-$(CONFIG_MDA_CONSOLE) += mdacon.o -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon.o bitblit.o softcursor.o -ifeq ($(CONFIG_FB_TILEBLITTING),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += tileblit.o -endif -ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) -obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ - fbcon_ccw.o -endif obj-$(CONFIG_FB_STI) += sticore.o diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index dc06cb6a15dc094b4d107c843320f6e97c4e0103..445b1dc5d441a1863ce64cf3807f03d77ae662c7 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -398,9 +398,8 @@ static const char *vgacon_startup(void) #endif } - /* boot_params.screen_info initialized? */ - if ((screen_info.orig_video_mode == 0) && - (screen_info.orig_video_lines == 0) && + /* boot_params.screen_info reasonably initialized? */ + if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index c0c6b88d383915fe6e5924bd49434f6a13170278..d48e96088f76fcf90c3b53a628952dcc1e8b90e2 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -72,7 +72,7 @@ static struct fb_var_screeninfo mc68x328fb_default __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo mc68x328fb_fix __initdata = { +static const struct fb_fix_screeninfo mc68x328fb_fix __initconst = { .id = "68328fb", .type = FB_TYPE_PACKED_PIXELS, .xpanstep = 1, diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5c6696bb56da89d1ca86f00f16c3c408303ca5ad..5e58f5ec0a28e449afa8813a652b1aa3469e0721 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2173,7 +2173,7 @@ config FB_PS3_DEFAULT_SIZE_M config FB_XILINX tristate "Xilinx frame buffer support" - depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ) + depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index ffc2c33c6cef520a9aec6473b1b5fd1af96039fd..36d25190b48c80991dcabc0c5480c897f60e34eb 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -1035,7 +1035,7 @@ static struct clcd_vendor_data vendor_nomadik = { .init_panel = nomadik_clcd_init_panel, }; -static struct amba_id clcdfb_id_table[] = { +static const struct amba_id clcdfb_id_table[] = { { .id = 0x00041110, .mask = 0x000ffffe, diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 6a317de7082c9874d23c1fa5b4dfaa10dacbecda..13ba371e70aa3630795164af9f6608c4fb90753e 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -1157,7 +1157,7 @@ static int ark_pci_resume (struct pci_dev* dev) /* List of boards that we are trying to support */ -static struct pci_device_id ark_devices[] = { +static const struct pci_device_id ark_devices[] = { {PCI_DEVICE(0xEDD8, 0xA099)}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index 91eea4583382dfa12a20204eb907789d28e4d4ba..ea31054a28ca80205c50d6b0a97f7a71fb177297 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -592,7 +592,7 @@ static void asiliantfb_remove(struct pci_dev *dp) framebuffer_release(p); } -static struct pci_device_id asiliantfb_pci_tbl[] = { +static const struct pci_device_id asiliantfb_pci_tbl[] = { { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 669ecc755fa9852f97479a5819bc71ca11314c07..e06358da4b99698525fa57d3dcca6666757d23dd 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -320,7 +320,7 @@ static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int } } -static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = { +static const struct fb_fix_screeninfo atmel_lcdfb_fix __initconst = { .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index fa07242a78d2910966db4492618b7e5d5ca3282f..db18474607c9c9c701353b8c347baf3e39c5ba63 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -116,7 +116,7 @@ static const struct fb_var_screeninfo default_var = { /* default modedb mode */ /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { .refresh = 60, .xres = 640, .yres = 480, @@ -166,7 +166,7 @@ static int aty128_pci_resume(struct pci_dev *pdev); static int aty128_do_resume(struct pci_dev *pdev); /* supported Rage128 chipsets */ -static struct pci_device_id aty128_pci_tbl[] = { +static const struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index b55fdac9c9f51a7b0b6d9fe5d2c804b3046f2f05..3ec72f19114badf5cb26fe1cb530c04916b9d8c0 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -274,7 +274,7 @@ static struct fb_var_screeninfo default_var = { 0, FB_VMODE_NONINTERLACED }; -static struct fb_videomode defmode = { +static const struct fb_videomode defmode = { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED @@ -1855,7 +1855,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) case ATYIO_CLKR: if (M64_HAS(INTEGRATED)) { - struct atyclk clk; + struct atyclk clk = { 0 }; union aty_pll *pll = &par->pll; u32 dsp_config = pll->ct.dsp_config; u32 dsp_on_off = pll->ct.dsp_on_off; @@ -3756,7 +3756,7 @@ static void atyfb_pci_remove(struct pci_dev *pdev) atyfb_remove(info); } -static struct pci_device_id atyfb_pci_tbl[] = { +static const struct pci_device_id atyfb_pci_tbl[] = { #ifdef CONFIG_FB_ATY_GX { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) }, diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 6b4c7872b37519f9fe6238d4fa505d76e14d4c92..1e2ec360f8c16da91b8ee9e921452ef4bda77834 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -96,7 +96,7 @@ #define CHIP_DEF(id, family, flags) \ { PCI_VENDOR_ID_ATI, id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (flags) | (CHIP_FAMILY_##family) } -static struct pci_device_id radeonfb_pci_table[] = { +static const struct pci_device_id radeonfb_pci_table[] = { /* Radeon Xpress 200m */ CHIP_DEF(PCI_CHIP_RS480_5955, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), CHIP_DEF(PCI_CHIP_RS482_5975, RS480, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY), @@ -2241,7 +2241,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj, return radeon_show_one_edid(buf, off, count, rinfo->mon2_EDID); } -static struct bin_attribute edid1_attr = { +static const struct bin_attribute edid1_attr = { .attr = { .name = "edid1", .mode = 0444, @@ -2250,7 +2250,7 @@ static struct bin_attribute edid1_attr = { .read = radeon_show_edid1, }; -static struct bin_attribute edid2_attr = { +static const struct bin_attribute edid2_attr = { .attr = { .name = "edid2", .mode = 0444, diff --git a/drivers/video/fbdev/bfin-lq035q1-fb.c b/drivers/video/fbdev/bfin-lq035q1-fb.c index b594a58ff21d3e63e5c7a6dd29bc6c86919cf491..b459354ad9400bcc386d188197c92574ec618558 100644 --- a/drivers/video/fbdev/bfin-lq035q1-fb.c +++ b/drivers/video/fbdev/bfin-lq035q1-fb.c @@ -841,7 +841,7 @@ static int bfin_lq035q1_resume(struct device *dev) return 0; } -static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { +static const struct dev_pm_ops bfin_lq035q1_dev_pm_ops = { .suspend = bfin_lq035q1_suspend, .resume = bfin_lq035q1_resume, }; diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 8c5b281f0b299c864664c3bba78ec3b0b3ef0599..7aa9720723577ece2bf536e2776b21e28b047c08 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -333,8 +333,8 @@ static int bw2_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: bwtwo at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: bwtwo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index 43e915eaf606f31bd5c4dee0367a45fb0647bf71..8de88b129b62244c11e44092687020c2a7b0257f 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -553,8 +553,8 @@ static int cg14_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n", - dp->full_name, + printk(KERN_INFO "%pOF: cgfourteen at %lx:%lx, %dMB\n", + dp, par->iospace, info->fix.smem_start, par->ramsize >> 20); diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 716391f22e75f97ac55751cd3b20b4d1f3e24496..6c334260cf532faca8b36f9e8e05cb76d167fcf7 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -412,8 +412,8 @@ static int cg3_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: cg3 at %lx:%lx\n", - dp->full_name, par->which_io, info->fix.smem_start); + printk(KERN_INFO "%pOF: cg3 at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/cg6.c b/drivers/video/fbdev/cg6.c index bdf901ed52910e1fc8f08dde72eff1f22d312190..0296c21acc78a2bc5207694b4821c9c3f30e1750 100644 --- a/drivers/video/fbdev/cg6.c +++ b/drivers/video/fbdev/cg6.c @@ -810,8 +810,8 @@ static int cg6_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n", - dp->full_name, info->fix.id, + printk(KERN_INFO "%pOF: CGsix [%s] at %lx:%lx\n", + dp, info->fix.id, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 59abdc6a97f668ed1b67e6bd9e958b74479bd99e..f103665cad431c449422b87bea145934444a46e4 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -292,7 +292,7 @@ static void chips_hw_init(void) write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); } -static struct fb_fix_screeninfo chipsfb_fix = { +static const struct fb_fix_screeninfo chipsfb_fix = { .id = "C&T 65550", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -309,7 +309,7 @@ static struct fb_fix_screeninfo chipsfb_fix = { .smem_len = 0x100000, /* 1MB */ }; -static struct fb_var_screeninfo chipsfb_var = { +static const struct fb_var_screeninfo chipsfb_var = { .xres = 800, .yres = 600, .xres_virtual = 800, diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c index 9da90bd242f4e4a0c36af5fd4e7d34254e9e6ecb..0ef633e278a1b782ed9a903eb5d0f0848d3d7953 100644 --- a/drivers/video/fbdev/cobalt_lcdfb.c +++ b/drivers/video/fbdev/cobalt_lcdfb.c @@ -126,7 +126,7 @@ static void lcd_clear(struct fb_info *info) lcd_write_control(info, LCD_RESET); } -static struct fb_fix_screeninfo cobalt_lcdfb_fix = { +static const struct fb_fix_screeninfo cobalt_lcdfb_fix = { .id = "cobalt-lcd", .type = FB_TYPE_TEXT, .type_aux = FB_AUX_TEXT_MDA, diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile index 9e3ddf225393e697ce0993814eb37f1dbe6f087a..73493bbd7a15a6278708b55315563cdbd08e3c9d 100644 --- a/drivers/video/fbdev/core/Makefile +++ b/drivers/video/fbdev/core/Makefile @@ -4,6 +4,20 @@ obj-$(CONFIG_FB) += fb.o fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ modedb.o fbcvt.o fb-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o + +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y) +fb-y += fbcon.o bitblit.o softcursor.o +ifeq ($(CONFIG_FB_TILEBLITTING),y) +fb-y += tileblit.o +endif +ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE_ROTATION),y) +fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \ + fbcon_ccw.o +endif +ifeq ($(CONFIG_DMI),y) +fb-y += fbcon_dmi_quirks.o +endif +endif fb-objs := $(fb-y) obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o diff --git a/drivers/video/console/bitblit.c b/drivers/video/fbdev/core/bitblit.c similarity index 98% rename from drivers/video/console/bitblit.c rename to drivers/video/fbdev/core/bitblit.c index dbfe4eecf12e56d328478dff9a84064c591bfe85..790900d646c03cf2c96871e9373c367a58edc83e 100644 --- a/drivers/video/console/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -203,7 +203,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, } static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -213,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = info->var.yres - bh; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -416,7 +416,3 @@ void fbcon_set_bitops(struct fbcon_ops *ops) EXPORT_SYMBOL(fbcon_set_bitops); -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Bit Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/console/fbcon.c b/drivers/video/fbdev/core/fbcon.c similarity index 99% rename from drivers/video/console/fbcon.c rename to drivers/video/fbdev/core/fbcon.c index 12ded23f1aaff70a136114e78d58b3efdfaddc37..04612f938bab1fc3eecf457d070fe501b8a1a3de 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include @@ -135,8 +136,9 @@ static char fontname[40]; static int info_idx = -1; /* console rotation */ -static int initial_rotation; +static int initial_rotation = -1; static int fbcon_has_sysfs; +static int margin_color; static const struct consw fb_con; @@ -491,6 +493,13 @@ static int __init fb_console_setup(char *this_opt) initial_rotation = 0; continue; } + + if (!strncmp(options, "margin:", 7)) { + options += 7; + if (*options) + margin_color = simple_strtoul(options, &options, 0); + continue; + } } return 1; } @@ -563,7 +572,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, unsigned short *save = NULL, *r, *q; int logo_height; - if (info->flags & FBINFO_MODULE) { + if (info->fbops->owner) { logo_shown = FBCON_LOGO_DONTSHOW; return; } @@ -954,7 +963,10 @@ static const char *fbcon_startup(void) ops->cur_rotate = -1; ops->cur_blink_jiffies = HZ / 5; info->fbcon_par = ops; - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); if (info->fix.type != FB_TYPE_TEXT) { @@ -1091,7 +1103,10 @@ static void fbcon_init(struct vc_data *vc, int init) ops = info->fbcon_par; ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); - p->con_rotate = initial_rotation; + if (initial_rotation != -1) + p->con_rotate = initial_rotation; + else + p->con_rotate = fbcon_platform_get_rotate(info); set_blitting_type(vc, info); cols = vc->vc_cols; @@ -1299,7 +1314,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only) struct fbcon_ops *ops = info->fbcon_par; if (!fbcon_is_inactive(vc, info)) - ops->clear_margins(vc, info, bottom_only); + ops->clear_margins(vc, info, margin_color, bottom_only); } static void fbcon_cursor(struct vc_data *vc, int mode) @@ -3606,7 +3621,7 @@ static void fbcon_exit(void) fbcon_has_exited = 1; } -static int __init fb_console_init(void) +void __init fb_console_init(void) { int i; @@ -3628,11 +3643,8 @@ static int __init fb_console_init(void) console_unlock(); fbcon_start(); - return 0; } -fs_initcall(fb_console_init); - #ifdef MODULE static void __exit fbcon_deinit_device(void) @@ -3647,7 +3659,7 @@ static void __exit fbcon_deinit_device(void) } } -static void __exit fb_console_exit(void) +void __exit fb_console_exit(void) { console_lock(); fb_unregister_client(&fbcon_event_notifier); @@ -3657,9 +3669,4 @@ static void __exit fb_console_exit(void) do_unregister_con_driver(&fb_con); console_unlock(); } - -module_exit(fb_console_exit); - #endif - -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon.h b/drivers/video/fbdev/core/fbcon.h similarity index 97% rename from drivers/video/console/fbcon.h rename to drivers/video/fbdev/core/fbcon.h index 7aaa4eabbba0557af34afa2414560efb8b74804b..18f3ac14423706adc006f37bee55b1241627f99d 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -60,7 +60,7 @@ struct fbcon_ops { const unsigned short *s, int count, int yy, int xx, int fg, int bg); void (*clear_margins)(struct vc_data *vc, struct fb_info *info, - int bottom_only); + int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg); int (*update_start)(struct fb_info *info); @@ -261,5 +261,10 @@ extern void fbcon_set_rotate(struct fbcon_ops *ops); #define fbcon_set_rotate(x) do {} while(0) #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ -#endif /* _VIDEO_FBCON_H */ +#ifdef CONFIG_DMI +int fbcon_platform_get_rotate(struct fb_info *info); +#else +#define fbcon_platform_get_rotate(i) FB_ROTATE_UR +#endif /* CONFIG_DMI */ +#endif /* _VIDEO_FBCON_H */ diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c similarity index 98% rename from drivers/video/console/fbcon_ccw.c rename to drivers/video/fbdev/core/fbcon_ccw.c index 5a3cbf6dff4d944ff5a0ac3fb1fd62c2fba3106a..37a8b0b225663780e6e28dd4ba239a6be9581071 100644 --- a/drivers/video/console/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -189,7 +189,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, } static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -198,7 +198,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bs = vc->vc_rows*ch; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -418,7 +418,3 @@ void fbcon_rotate_ccw(struct fbcon_ops *ops) ops->update_start = ccw_update_start; } EXPORT_SYMBOL(fbcon_rotate_ccw); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (270 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c similarity index 98% rename from drivers/video/console/fbcon_cw.c rename to drivers/video/fbdev/core/fbcon_cw.c index e7ee44db4e98b1318cf8e9f679843354e6b84862..1888f8c866e82ecc35571fe9ad35493f0feedef2 100644 --- a/drivers/video/console/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -172,7 +172,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, } static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -181,7 +181,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int rs = info->var.yres - rw; struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -401,7 +401,3 @@ void fbcon_rotate_cw(struct fbcon_ops *ops) ops->update_start = cw_update_start; } EXPORT_SYMBOL(fbcon_rotate_cw); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (90 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbcon_dmi_quirks.c b/drivers/video/fbdev/core/fbcon_dmi_quirks.c new file mode 100644 index 0000000000000000000000000000000000000000..6904e47d1e51b02be92a06c1b31c356fde7ed5ad --- /dev/null +++ b/drivers/video/fbdev/core/fbcon_dmi_quirks.c @@ -0,0 +1,145 @@ +/* + * fbcon_dmi_quirks.c -- DMI based quirk detection for fbcon + * + * Copyright (C) 2017 Hans de Goede + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include "fbcon.h" + +/* + * Some x86 clamshell design devices use portrait tablet screens and a display + * engine which cannot rotate in hardware, so we need to rotate the fbcon to + * compensate. Unfortunately these (cheap) devices also typically have quite + * generic DMI data, so we match on a combination of DMI data, screen resolution + * and a list of known BIOS dates to avoid false positives. + */ + +struct fbcon_dmi_rotate_data { + int width; + int height; + const char * const *bios_dates; + int rotate; +}; + +static const struct fbcon_dmi_rotate_data rotate_data_asus_t100ha = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CCW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_pocket = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "05/26/2017", "06/28/2017", + "07/05/2017", "08/07/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_gpd_win = { + .width = 720, + .height = 1280, + .bios_dates = (const char * const []){ + "10/25/2016", "11/18/2016", "12/23/2016", "12/26/2016", + "02/21/2017", "03/20/2017", "05/25/2017", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_itworks_tw891 = { + .width = 800, + .height = 1280, + .bios_dates = (const char * const []){ "10/16/2015", NULL }, + .rotate = FB_ROTATE_CW, +}; + +static const struct fbcon_dmi_rotate_data rotate_data_vios_lth17 = { + .width = 800, + .height = 1280, + .rotate = FB_ROTATE_CW, +}; + +static const struct dmi_system_id rotate_data[] = { + { /* Asus T100HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), + }, + .driver_data = (void *)&rotate_data_asus_t100ha, + }, { /* + * GPD Pocket, note that the the DMI data is less generic then + * it seems, devices with a board-vendor of "AMI Corporation" + * are quite rare, as are devices which have both board- *and* + * product-id set to "Default String" + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_pocket, + }, { /* GPD Win (same note on DMI match as GPD Pocket) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&rotate_data_gpd_win, + }, { /* I.T.Works TW891 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TW891"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"), + }, + .driver_data = (void *)&rotate_data_itworks_tw891, + }, { /* VIOS LTH17 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "VIOS"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "LTH17"), + }, + .driver_data = (void *)&rotate_data_vios_lth17, + }, + {} +}; + +int fbcon_platform_get_rotate(struct fb_info *info) +{ + const struct dmi_system_id *match; + const struct fbcon_dmi_rotate_data *data; + const char *bios_date; + int i; + + for (match = dmi_first_match(rotate_data); + match; + match = dmi_first_match(match + 1)) { + data = match->driver_data; + + if (data->width != info->var.xres || + data->height != info->var.yres) + continue; + + if (!data->bios_dates) + return data->rotate; + + bios_date = dmi_get_system_info(DMI_BIOS_DATE); + if (!bios_date) + continue; + + for (i = 0; data->bios_dates[i]; i++) { + if (!strcmp(data->bios_dates[i], bios_date)) + return data->rotate; + } + } + + return FB_ROTATE_UR; +} diff --git a/drivers/video/console/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c similarity index 95% rename from drivers/video/console/fbcon_rotate.c rename to drivers/video/fbdev/core/fbcon_rotate.c index db6528f2d3f29506cd33c55b48ee5ece6fef4a37..8a51e4d95cc5062ae9efa6c14ac7433de9e668bd 100644 --- a/drivers/video/console/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -110,7 +110,3 @@ void fbcon_set_rotate(struct fbcon_ops *ops) } } EXPORT_SYMBOL(fbcon_set_rotate); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h similarity index 100% rename from drivers/video/console/fbcon_rotate.h rename to drivers/video/fbdev/core/fbcon_rotate.h diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c similarity index 98% rename from drivers/video/console/fbcon_ud.c rename to drivers/video/fbdev/core/fbcon_ud.c index 19e3714abfe8fb765eaaea2ebeb9a21c5c2725de..f98eee263597b08a9677dc8f0072fc62a2cba022 100644 --- a/drivers/video/console/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -220,7 +220,7 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, } static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; @@ -228,7 +228,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, unsigned int bh = info->var.yres - (vc->vc_rows*ch); struct fb_fillrect region; - region.color = 0; + region.color = color; region.rop = ROP_COPY; if (rw && !bottom_only) { @@ -446,7 +446,3 @@ void fbcon_rotate_ud(struct fbcon_ops *ops) ops->update_start = ud_update_start; } EXPORT_SYMBOL(fbcon_rotate_ud); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Console Rotation (180 degrees) Support"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 25e862c487f643f97353abb607caf23ea336a757..f741ba8df01b8e28f4331f988d2b04219bb103b6 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -316,7 +317,7 @@ static void fb_set_logo(struct fb_info *info, for (i = 0; i < logo->height; i++) { for (j = 0; j < logo->width; src++) { d = *src ^ xor; - for (k = 7; k >= 0; k--) { + for (k = 7; k >= 0 && j < logo->width; k--) { *dst++ = ((d >> k) & 1) ? fg : 0; j++; } @@ -463,7 +464,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, /* Return if the frame buffer is not mapped or suspended */ if (logo == NULL || info->state != FBINFO_STATE_RUNNING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; image.depth = 8; @@ -601,7 +602,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate) memset(&fb_logo, 0, sizeof(struct logo_data)); if (info->flags & FBINFO_MISC_TILEBLITTING || - info->flags & FBINFO_MODULE) + info->fbops->owner) return 0; if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -1892,6 +1893,9 @@ fbmem_init(void) fb_class = NULL; goto err_class; } + + fb_console_init(); + return 0; err_class: @@ -1906,6 +1910,8 @@ module_init(fbmem_init); static void __exit fbmem_exit(void) { + fb_console_exit(); + remove_proc_entry("fb", NULL); class_destroy(fb_class); unregister_chrdev(FB_MAJOR, "fb"); diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 41d7979d81c53df3d459d39939798659386444a8..2b2d673285148bc4dba1eb24c8d0d18f4d8b6a01 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1479,8 +1479,8 @@ int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb, if (ret) return ret; - pr_debug("%s: got %dx%d display mode from %s\n", - of_node_full_name(np), vm.hactive, vm.vactive, np->name); + pr_debug("%pOF: got %dx%d display mode from %s\n", + np, vm.hactive, vm.vactive, np->name); dump_fb_videomode(fb); return 0; diff --git a/drivers/video/console/softcursor.c b/drivers/video/fbdev/core/softcursor.c similarity index 93% rename from drivers/video/console/softcursor.c rename to drivers/video/fbdev/core/softcursor.c index 46dd8f5d2e9e1c899921893f314ce590ea998d10..fc93f254498e2aa5b0566d1e08052ac31d6ebb93 100644 --- a/drivers/video/console/softcursor.c +++ b/drivers/video/fbdev/core/softcursor.c @@ -76,7 +76,3 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) } EXPORT_SYMBOL(soft_cursor); - -MODULE_AUTHOR("James Simmons "); -MODULE_DESCRIPTION("Generic software cursor"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/console/tileblit.c b/drivers/video/fbdev/core/tileblit.c similarity index 96% rename from drivers/video/console/tileblit.c rename to drivers/video/fbdev/core/tileblit.c index 15e8e1a89c45def0d7f9ebc0ab8fbdaa1563ff8b..93390312957ffa3d89627deffc8eb6643cfb74f5 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -74,7 +74,7 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, } static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, - int bottom_only) + int color, int bottom_only) { return; } @@ -152,8 +152,3 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) } EXPORT_SYMBOL(fbcon_set_tileops); - -MODULE_AUTHOR("Antonino Daplas "); -MODULE_DESCRIPTION("Tile Blitting Operation"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 99acf538a8b812a98ada497fb6430c6ae0f02f25..9a5751cb4e1656bb7b0b2932cf8baae9e0fb61d0 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -1336,7 +1336,7 @@ static void cyber2000fb_i2c_unregister(struct cfb_info *cfb) * These parameters give * 640x480, hsync 31.5kHz, vsync 60Hz */ -static struct fb_videomode cyber2000fb_default_mode = { +static const struct fb_videomode cyber2000fb_default_mode = { .refresh = 60, .xres = 640, .yres = 480, diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index c229b1a0d13b350bce029d2fcd78ef60bd4c65b1..a74096c53cb50516df5e1415af95e1373f0602b0 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1341,7 +1341,7 @@ static int fb_probe(struct platform_device *device) { struct da8xx_lcdc_platform_data *fb_pdata = dev_get_platdata(&device->dev); - static struct resource *lcdc_regs; + struct resource *lcdc_regs; struct lcd_ctrl_config *lcd_cfg; struct fb_videomode *lcdc_info; struct fb_info *da8xx_fb_info; diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c index 3526899da61b93e2c40532b6edb337cc5be53300..7b1492d34e989ab92a1a4819192afc00294af4d7 100644 --- a/drivers/video/fbdev/dnfb.c +++ b/drivers/video/fbdev/dnfb.c @@ -126,7 +126,7 @@ struct fb_var_screeninfo dnfb_var = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo dnfb_fix = { +static const struct fb_fix_screeninfo dnfb_fix = { .id = "Apollo Mono", .smem_start = (FRAME_BUFFER_START + IO_BASE), .smem_len = FRAME_BUFFER_LEN, diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c index 88fa2e70a0bb1683fe04e81b3f32227b7d2f6789..d9e816d53531324b2ec03a33ebea709936489c53 100644 --- a/drivers/video/fbdev/fb-puv3.c +++ b/drivers/video/fbdev/fb-puv3.c @@ -69,7 +69,7 @@ static const struct fb_videomode unifb_modes[] = { 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; -static struct fb_var_screeninfo unifb_default = { +static const struct fb_var_screeninfo unifb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c index dda31e0a45af282a4b72d4ad19f33cdda1da7efc..6b1915872af1658c06f43d057f387303f6e69386 100644 --- a/drivers/video/fbdev/ffb.c +++ b/drivers/video/fbdev/ffb.c @@ -997,9 +997,9 @@ static int ffb_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: %s at %016lx, type %d, " + printk(KERN_INFO "%pOF: %s at %016lx, type %d, " "DAC pnum[%x] rev[%d] manuf_rev[%d]\n", - dp->full_name, + dp, ((par->flags & FFB_FLAG_AFB) ? "AFB" : "FFB"), par->physbase, par->board_type, dac_pnum, dac_rev, dac_mrev); diff --git a/drivers/video/fbdev/fm2fb.c b/drivers/video/fbdev/fm2fb.c index e69d47af9932a0c813f8402129b646ed8f98b5aa..ac7a4ebfd390e8f589e8a433ce47397b490bd1dd 100644 --- a/drivers/video/fbdev/fm2fb.c +++ b/drivers/video/fbdev/fm2fb.c @@ -213,7 +213,7 @@ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id); -static struct zorro_device_id fm2fb_devices[] = { +static const struct zorro_device_id fm2fb_devices[] = { { ZORRO_PROD_BSC_FRAMEMASTER_II }, { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index ec9fc9ac23decf320408e66f15ae5e5cf0bb8bea..f4f76373b2a871ae26532afe8729282939988942 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -474,7 +474,7 @@ static void gxfb_remove(struct pci_dev *pdev) framebuffer_release(info); } -static struct pci_device_id gxfb_id_table[] = { +static const struct pci_device_id gxfb_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO) }, { 0, } }; diff --git a/drivers/video/fbdev/grvga.c b/drivers/video/fbdev/grvga.c index b471f92969b1bbd5de8c16b592d4ddb18064e5cb..8fc8f46dadeb5c597942773f12c4f97e47bf0c5b 100644 --- a/drivers/video/fbdev/grvga.c +++ b/drivers/video/fbdev/grvga.c @@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = { } }; -static struct fb_fix_screeninfo grvga_fix = { +static const struct fb_fix_screeninfo grvga_fix = { .id = "AG SVGACTRL", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index 2488baab7c89253b4606faf3b5ae3877939b9201..d18f7b31932c832d3a00b6e0475c8512b2db80a0 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -107,7 +107,7 @@ static const char * const i810_pci_list[] = { "Intel(R) 815 (Internal Graphics with AGP) Framebuffer Device" }; -static struct pci_device_id i810fb_pci_tbl[] = { +static const struct pci_device_id i810fb_pci_tbl[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, @@ -1542,7 +1542,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor) return 0; } -static struct fb_ops i810fb_ops = { +static const struct fb_ops i810fb_ops = { .owner = THIS_MODULE, .fb_open = i810fb_open, .fb_release = i810fb_release, diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 4363c64d74e8c1481842735aa2fc0d1831344b05..ecdcf358ad5eac2d76f20439add5818625ff64e2 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1318,7 +1318,7 @@ imsttfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) } } -static struct pci_device_id imsttfb_pci_tbl[] = { +static const struct pci_device_id imsttfb_pci_tbl[] = { { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IBM }, { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT3D, diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index ffc391208b27152ffcea7ce1a6842ecceb754597..d7463a2a5d83f2f11ae00662aab3edc7b1682cba 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -173,7 +173,7 @@ static int intelfb_set_fbinfo(struct intelfb_info *dinfo); #define INTELFB_CLASS_MASK 0 #endif -static struct pci_device_id intelfb_pci_table[] = { +static const struct pci_device_id intelfb_pci_table[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index f77478fb3d14a3dcc8d013824994a378ba541b4d..a7bd9f25911b534d10e23ca1eae7ccac6ba99288 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -633,7 +633,7 @@ static int kyrofb_ioctl(struct fb_info *info, return 0; } -static struct pci_device_id kyrofb_pci_tbl[] = { +static const struct pci_device_id kyrofb_pci_tbl[] = { { PCI_VENDOR_ID_ST, PCI_DEVICE_ID_STG4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c index 62e59dc90ee64ad100980dd99eea4756d0d086b3..71862188f5285953104c62744d3216bfb8a59d10 100644 --- a/drivers/video/fbdev/leo.c +++ b/drivers/video/fbdev/leo.c @@ -619,8 +619,8 @@ static int leo_probe(struct platform_device *op) dev_set_drvdata(&op->dev, info); - printk(KERN_INFO "%s: leo at %lx:%lx\n", - dp->full_name, + printk(KERN_INFO "%pOF: leo at %lx:%lx\n", + dp, par->which_io, info->fix.smem_start); return 0; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index f6a0b9af97a90b3fa3989c480b3934d9bec0ed9b..b9b284d79631d35b11f65536d516314b6157b956 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1198,7 +1198,7 @@ static int matroxfb_blank(int blank, struct fb_info *info) return 0; } -static struct fb_ops matroxfb_ops = { +static const struct fb_ops matroxfb_ops = { .owner = THIS_MODULE, .fb_open = matroxfb_open, .fb_release = matroxfb_release, @@ -1573,14 +1573,14 @@ static struct board { NULL}}; #ifndef MODULE -static struct fb_videomode defaultmode = { +static const struct fb_videomode defaultmode = { /* 640x480 @ 60Hz, 31.5 kHz */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }; -#endif /* !MODULE */ static int hotplug = 0; +#endif /* !MODULE */ static void setDefaultOutputs(struct matrox_fb_info *minfo) { @@ -1623,7 +1623,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) unsigned int memsize; int err; - static struct pci_device_id intel_82437[] = { + static const struct pci_device_id intel_82437[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437) }, { }, }; @@ -1794,9 +1794,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) minfo->fbops = matroxfb_ops; minfo->fbcon.fbops = &minfo->fbops; minfo->fbcon.pseudo_palette = minfo->cmap; - /* after __init time we are like module... no logo */ - minfo->fbcon.flags = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT; - minfo->fbcon.flags |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ + minfo->fbcon.flags = FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */ FBINFO_HWACCEL_COPYAREA | /* We have hw-assisted bmove */ FBINFO_HWACCEL_FILLRECT | /* And fillrect */ FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */ @@ -2116,7 +2114,7 @@ static void pci_remove_matrox(struct pci_dev* pdev) { matroxfb_remove(minfo, 1); } -static struct pci_device_id matroxfb_devices[] = { +static const struct pci_device_id matroxfb_devices[] = { #ifdef CONFIG_FB_MATROX_MILLENIUM {PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_MIL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c index cab7333208eab44e38b1ac8f33bfd1a3c230dcfc..5bb1b5c308a775d4f35c83b2ed243e0996e7146e 100644 --- a/drivers/video/fbdev/maxinefb.c +++ b/drivers/video/fbdev/maxinefb.c @@ -39,7 +39,7 @@ static struct fb_info fb_info; -static struct fb_var_screeninfo maxinefb_defined = { +static const struct fb_var_screeninfo maxinefb_defined = { .xres = 1024, .yres = 768, .xres_virtual = 1024, diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index f9ec5c0484fabbd8d6f2cc5b5e5897c003e07b10..cd372527c9e4b750def373bc1dfa1192f5315744 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -982,7 +982,7 @@ static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par) #define CHIP_ID(id) \ { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) } -static struct pci_device_id mb862xx_pci_tbl[] = { +static const struct pci_device_id mb862xx_pci_tbl[] = { /* MB86295/MB86296 */ CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP), CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA), diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 698df9543e30c0a0c13f78a0d9e70bc73753ed39..539b85da08973b9dda4261436e7811fcbf76f7ed 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -79,7 +79,7 @@ struct mbxfb_info { }; -static struct fb_var_screeninfo mbxfb_default = { +static const struct fb_var_screeninfo mbxfb_default = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -102,7 +102,7 @@ static struct fb_var_screeninfo mbxfb_default = { .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; -static struct fb_fix_screeninfo mbxfb_fix = { +static const struct fb_fix_screeninfo mbxfb_fix = { .id = "MBX", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index db023a97d1eaedca6dc1d1c8929330c4a993f390..5d3a444083f74c713b784a474586f64c69db1642 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -2138,7 +2138,7 @@ static void neofb_remove(struct pci_dev *dev) } } -static struct pci_device_id neofb_devices[] = { +static const struct pci_device_id neofb_devices[] = { {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070}, diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index ce7dab7299fe2e66846171bdc60e073da57c0581..418a2d0d06a95b7005472983f386a75bc4426abf 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -55,7 +55,7 @@ /* HW cursor parameters */ #define MAX_CURS 32 -static struct pci_device_id nvidiafb_pci_tbl[] = { +static const struct pci_device_id nvidiafb_pci_tbl[] = { {PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0}, { 0, } diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 9be884b0c778b562a699d4ae620d136fa3ae4e2c..90d38de344797884fbf9d092e8095851dad21412 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -383,7 +383,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } -static void __init offb_init_fb(const char *name, const char *full_name, +static void __init offb_init_fb(const char *name, int width, int height, int depth, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) @@ -402,14 +402,13 @@ static void __init offb_init_fb(const char *name, const char *full_name, "Using unsupported %dx%d %s at %lx, depth=%d, pitch=%d\n", width, height, name, address, depth, pitch); if (depth != 8 && depth != 15 && depth != 16 && depth != 32) { - printk(KERN_ERR "%s: can't use depth = %d\n", full_name, - depth); + printk(KERN_ERR "%pOF: can't use depth = %d\n", dp, depth); release_mem_region(res_start, res_size); return; } info = framebuffer_alloc(sizeof(u32) * 16, NULL); - + if (info == 0) { release_mem_region(res_start, res_size); return; @@ -515,7 +514,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, if (register_framebuffer(info) < 0) goto out_err; - fb_info(info, "Open Firmware frame buffer device on %s\n", full_name); + fb_info(info, "Open Firmware frame buffer device on %pOF\n", dp); return; out_err: @@ -644,7 +643,6 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; offb_init_fb(no_real_node ? "bootx" : dp->name, - no_real_node ? "display" : dp->full_name, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c index df9e6ebcfad5f8d37048e3db8901406e96177c1a..e3a85432f9266943886e6e2504e5415b66f359d4 100644 --- a/drivers/video/fbdev/omap/lcd_mipid.c +++ b/drivers/video/fbdev/omap/lcd_mipid.c @@ -496,7 +496,7 @@ static void mipid_cleanup(struct lcd_panel *panel) mipid_esd_stop_check(md); } -static struct lcd_panel mipid_panel = { +static const struct lcd_panel mipid_panel = { .config = OMAP_LCDC_PANEL_TFT, .bpp = 16, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index f14691ce8d020f850a8df3b86b9ccc9d538afb4b..6cd759c0103754b3378915eecf4f6ce7da5314d3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -18,7 +18,7 @@ #include